Skip to content

Commit 1c2c6d7

Browse files
committed
.gitignoreにcontexts/を追加
LMStudioClientとTaskHandlerでの空レスポンスチェックと関数呼び出し処理を改善
1 parent 7357881 commit 1c2c6d7

3 files changed

Lines changed: 89 additions & 9 deletions

File tree

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
node_modules/
22
gitlab/
33
.vscode/
4+
contexts/
45
.env
56
*.pyc
67
*.log

clients/lmstudio_client.py

Lines changed: 17 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -69,20 +69,21 @@ def send_user_message(self, message: str) -> None:
6969
self.chat.add_user_message(message)
7070

7171
def send_function_result(self, name: str, result: object) -> None:
72-
"""関数の実行結果を送信する(LM Studioでは未対応).
72+
"""関数の実行結果を送信する.
7373
7474
Args:
7575
name: 関数名
7676
result: 実行結果
7777
78-
Raises:
79-
NotImplementedError: LM Studioは関数呼び出しをサポートしていない
80-
8178
"""
82-
msg = "LMStudio does not support function calls. Use OpenAI compatible call instead."
83-
raise NotImplementedError(
84-
msg,
85-
)
79+
if self.message_store:
80+
# File-based mode: add result as user message
81+
output_message = f"output: {result}"
82+
self.message_store.add_message("user", output_message)
83+
else:
84+
# Legacy mode: LM Studio does not support function calls
85+
msg = "LMStudio does not support function calls. Use OpenAI compatible call instead."
86+
raise NotImplementedError(msg)
8687

8788
def get_response(self) -> str:
8889
"""LLMからの応答を取得する.
@@ -91,10 +92,16 @@ def get_response(self) -> str:
9192
LLMからの応答テキスト
9293
9394
"""
95+
import logging
96+
logger = logging.getLogger(__name__)
97+
9498
if self.message_store:
9599
# File-based mode: need to manually create chat from messages
96100
chat = lms.Chat()
97101
messages = self._load_messages_from_file()
102+
103+
logger.debug("メッセージ数: %d", len(messages))
104+
98105
for msg in messages:
99106
role = msg.get("role")
100107
content = msg.get("content", "")
@@ -106,6 +113,8 @@ def get_response(self) -> str:
106113
chat.add_assistant_response(content)
107114

108115
result = self.model.respond(chat)
116+
logger.debug("LMStudio応答型: %s, 内容: %s", type(result), str(result)[:100])
117+
109118
self.message_store.add_message("assistant", str(result))
110119
else:
111120
# Legacy mode

handlers/task_handler.py

Lines changed: 71 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -576,15 +576,24 @@ def _process_llm_interaction_with_client(
576576
# LLMからレスポンスを取得
577577
resp, functions = llm_client.get_response()
578578
self.logger.info("LLM応答: %s", resp)
579+
580+
# 空レスポンスのチェック
581+
if not resp or not resp.strip():
582+
self.logger.error("LLMから空の応答が返されました")
583+
if count >= MAX_JSON_PARSE_ERRORS:
584+
task.comment("LLM応答エラーでスキップ")
585+
return True
586+
return False
579587

580588
# レスポンスの前処理
581589
resp_clean = self._process_think_tags(task, resp)
590+
self.logger.debug("think処理後のレスポンス: %s", resp_clean)
582591

583592
# JSON応答の解析
584593
try:
585594
data = self._extract_json(resp_clean)
586595
except Exception:
587-
self.logger.exception("LLM応答JSONパース失敗")
596+
self.logger.exception("LLM応答JSONパース失敗 (応答内容: %s)", resp_clean[:200])
588597
if count >= MAX_JSON_PARSE_ERRORS:
589598
task.comment("LLM応答エラーでスキップ")
590599
return True
@@ -627,6 +636,67 @@ def _process_response_data_with_context(
627636
task.comment(str(data["plan"]))
628637
llm_client.send_user_message(str(data["plan"]))
629638

639+
# function_call形式の処理 (OpenAI/LMStudio互換)
640+
if "function_call" in data:
641+
func_call = data["function_call"]
642+
tool_name = func_call["name"]
643+
args_str = func_call.get("arguments", "{}")
644+
645+
# argumentsが文字列の場合はパース
646+
if isinstance(args_str, str):
647+
args = json.loads(args_str) if args_str else {}
648+
else:
649+
args = args_str
650+
651+
args = self.sanitize_arguments(args)
652+
mcp_server, tool_func = tool_name.split("_", 1)
653+
654+
start_time = time.time()
655+
try:
656+
output = self.mcp_clients[mcp_server].call_tool(tool_func, args)
657+
duration_ms = (time.time() - start_time) * 1000
658+
659+
# Record tool execution
660+
tool_store.add_tool_call(
661+
tool_name=tool_name,
662+
args=args,
663+
result=output,
664+
status="success",
665+
duration_ms=duration_ms,
666+
)
667+
context_manager.update_statistics(tool_calls=1)
668+
669+
# Send result to LLM
670+
llm_client.send_function_result(tool_name, output)
671+
672+
# Reset error count on success
673+
if error_state.get("last_tool") == tool_name:
674+
error_state["tool_error_count"] = 0
675+
676+
except Exception as e:
677+
duration_ms = (time.time() - start_time) * 1000
678+
error_msg = str(e)
679+
680+
# Record tool error
681+
tool_store.add_tool_call(
682+
tool_name=tool_name,
683+
args=args,
684+
result=None,
685+
status="error",
686+
duration_ms=duration_ms,
687+
error=error_msg,
688+
)
689+
context_manager.update_statistics(tool_calls=1)
690+
691+
self.logger.exception("ツール呼び出しエラー: %s", error_msg)
692+
llm_client.send_function_result(tool_name, {"error": error_msg})
693+
694+
# Update error state
695+
self._update_error_count(tool_name, error_state)
696+
if error_state.get("tool_error_count", 0) >= MAX_CONSECUTIVE_TOOL_ERRORS:
697+
task.comment(f"同じツール({tool_name})で3回連続エラーが発生したため処理を中止します。")
698+
return True
699+
630700
if "call_tool" in data:
631701
# ツール呼び出し処理
632702
for tool_call in data["call_tool"]:

0 commit comments

Comments
 (0)