Skip to content

Commit ca4e7e9

Browse files
authored
fix: support seed and fix absolute path (microsoft#278)
* fix: support seed and fix absolute path * Absolute path * lint
1 parent 145e2f7 commit ca4e7e9

File tree

3 files changed

+19
-5
lines changed

3 files changed

+19
-5
lines changed

rdagent/components/coder/factor_coder/CoSTEER/evaluators.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -505,6 +505,7 @@ def evaluate(
505505
user_prompt=user_prompt,
506506
system_prompt=system_prompt,
507507
json_mode=True,
508+
seed=attempts, # in case of useless retrying when cache enabled.
508509
),
509510
)
510511
final_decision = final_evaluation_dict["final_decision"]

rdagent/components/coder/factor_coder/factor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,7 +89,7 @@ def __init__(
8989

9090
@staticmethod
9191
def link_data_to_workspace(data_path: Path, workspace_path: Path):
92-
data_path = Path(data_path)
92+
data_path = Path(data_path).absolute() # in case of relative path that will be invalid when we change cwd.
9393
workspace_path = Path(workspace_path)
9494
for data_file_path in data_path.iterdir():
9595
workspace_data_file_path = workspace_path / data_file_path.name

rdagent/oai/llm_utils.py

Lines changed: 17 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
import uuid
1414
from copy import deepcopy
1515
from pathlib import Path
16-
from typing import Any
16+
from typing import Any, Optional
1717

1818
import numpy as np
1919
import tiktoken
@@ -401,7 +401,10 @@ def build_messages(
401401
*,
402402
shrink_multiple_break: bool = False,
403403
) -> list[dict]:
404-
"""build the messages to avoid implementing several redundant lines of code"""
404+
"""
405+
build the messages to avoid implementing several redundant lines of code
406+
407+
"""
405408
if former_messages is None:
406409
former_messages = []
407410
# shrink multiple break will recursively remove multiple breaks(more than 2)
@@ -440,7 +443,10 @@ def build_messages_and_create_chat_completion(
440443
if former_messages is None:
441444
former_messages = []
442445
messages = self.build_messages(
443-
user_prompt, system_prompt, former_messages, shrink_multiple_break=shrink_multiple_break
446+
user_prompt,
447+
system_prompt,
448+
former_messages,
449+
shrink_multiple_break=shrink_multiple_break,
444450
)
445451
return self._try_create_chat_completion_or_embedding(
446452
messages=messages,
@@ -567,14 +573,21 @@ def _create_chat_completion_inner_function( # noqa: C901, PLR0912, PLR0915
567573
*,
568574
json_mode: bool = False,
569575
add_json_in_prompt: bool = False,
576+
seed: Optional[int] = None,
570577
) -> str:
578+
"""
579+
seed : Optional[int]
580+
When retrying with cache enabled, it will keep returning the same results.
581+
To make retries useful, we need to enable a seed.
582+
This seed is different from `self.chat_seed` for GPT. It is for the local cache mechanism enabled by RD-Agent locally.
583+
"""
571584
# TODO: we can add this function back to avoid so much `self.cfg.log_llm_chat_content`
572585
if self.cfg.log_llm_chat_content:
573586
logger.info(self._build_log_messages(messages), tag="llm_messages")
574587
# TODO: fail to use loguru adaptor due to stream response
575588
input_content_json = json.dumps(messages)
576589
input_content_json = (
577-
chat_cache_prefix + input_content_json
590+
chat_cache_prefix + input_content_json + f"<seed={seed}/>"
578591
) # FIXME this is a hack to make sure the cache represents the round index
579592
if self.use_chat_cache:
580593
cache_result = self.cache.chat_get(input_content_json)

0 commit comments

Comments
 (0)