Skip to content

Commit e017db9

Browse files
authored
build_generator: Remove unnecessary truncation (#1032)
As of #1017, the prompt truncation process has been implemented directly in the OpenAI LLM models. This makes the additional truncation in the build_generator LLM agent unnecessary and potentially confusing. This PR removes the redundant second truncation from the LLM agent. --------- Signed-off-by: Arthur Chan <[email protected]>
1 parent ed0d947 commit e017db9

File tree

3 files changed

+4
-6
lines changed

3 files changed

+4
-6
lines changed

experimental/build_generator/constants.py

+2
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,8 @@
2222
MODEL_VERTEX = 'vertex'
2323
MODELS = [MODEL_GPT_35_TURBO, MODEL_VERTEX]
2424

25+
MAX_PROMPT_LENGTH = 25000
26+
2527
# Common -l<lib> to required package mapping for Dockerfile installation
2628
LIBRARY_PACKAGE_MAP = {
2729
"z": "zlib1g-dev",

experimental/build_generator/llm_agent.py

+1-5
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@
2929
from tool.base_tool import BaseTool
3030
from tool.container_tool import ProjectContainerTool
3131

32-
MAX_PROMPT_LENGTH = 20000
3332
SAMPLE_HEADERS_COUNT = 30
3433
MAX_DISCOVERY_ROUND = 100
3534
INTROSPECTOR_OSS_FUZZ_DIR = '/src/inspector'
@@ -207,10 +206,7 @@ def _container_handle_conclusion(self, cur_round: int, response: str,
207206
retry = retry.replace('{FUZZER_NAME}', self.harness_name)
208207
else:
209208
retry = templates.LLM_RETRY.replace('{BASH_RESULT}', self.last_result)
210-
211-
# Refine prompt text to max prompt count and add to prompt
212-
length = min(len(retry), (MAX_PROMPT_LENGTH - len(prompt.gettext())))
213-
prompt.add_problem(retry[-length:])
209+
prompt.add_problem(retry)
214210

215211
# Store build result
216212
build_result.compiles = False

experimental/build_generator/runner.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -425,7 +425,7 @@ def run_agent(target_repositories: List[str], args: argparse.Namespace):
425425
temperature=0.4,
426426
temperature_list=[],
427427
)
428-
llm.MAX_INPUT_TOKEN = llm_agent.MAX_PROMPT_LENGTH
428+
llm.MAX_INPUT_TOKEN = constants.MAX_PROMPT_LENGTH
429429

430430
logger.info('Agent: %s.', llm_agent_ctr.__name__)
431431
agent = llm_agent_ctr(trial=1,

0 commit comments

Comments
 (0)