diff --git a/.jules/bolt.md b/.jules/bolt.md new file mode 100644 index 0000000..23f0a63 --- /dev/null +++ b/.jules/bolt.md @@ -0,0 +1,3 @@ +## 2024-04-10 - [Concurrent LLM Tool Calls] +**Learning:** In the `RetrievalPipeline` and `CodeRetrievalPipeline`, the execution of LLM tool calls was originally a sequential loop `for tc in ai_response.tool_calls: ... await self._execute_tool(...)`. This sequential execution was a significant bottleneck because each tool call (like searching Pinecone or Neo4j, or fetching files) had to wait for the previous one to finish. +**Action:** Replaced sequential execution with `asyncio.gather(*(_process_tool_call(tc) for tc in ai_response.tool_calls))`. I learned that wrapping the processing logic in an internal helper function ensures we can properly track metrics (like timing) and securely reconstruct the shared records list sequentially after the concurrent work completes. This concurrent model also correctly flattened sublists for `_search_symbols` and `_search_files` when doing a full organization repository search. diff --git a/src/pipelines/code_retrieval.py b/src/pipelines/code_retrieval.py index 69e4abc..7c88bd6 100644 --- a/src/pipelines/code_retrieval.py +++ b/src/pipelines/code_retrieval.py @@ -26,7 +26,8 @@ from __future__ import annotations import logging -from typing import Any, Callable, Dict, List, Optional +import asyncio +from typing import Any, Callable, Dict, List, Optional, Tuple from langchain_core.language_models import BaseChatModel from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage @@ -37,7 +38,6 @@ from src.scanner.code_store import CodeStore from src.schemas.code import ( annotations_namespace, - directories_namespace, files_namespace, snippets_namespace, symbols_namespace, @@ -375,26 +375,29 @@ async def run( turn_records: List[SourceRecord] = [] only_read_tools = True - for tc in ai_response.tool_calls: + + async def _process_tool_call(tc: Dict[str, Any]) -> Tuple[str, str, List[SourceRecord], float]: tool_name = tc["name"] tool_args = tc["args"] tool_id = tc["id"] - t1 = _time.perf_counter() records = await self._execute_tool( - tool_name, tool_args, repo=repo, top_k=top_k, - user_id=user_id, + tool_name, tool_args, repo=repo, top_k=top_k, user_id=user_id ) tool_ms = (_time.perf_counter() - t1) * 1000 logger.info(" Tool: %s(%s) → %d results (%.0fms)", tool_name, tool_args, len(records), tool_ms) + return tool_name, tool_id, records, tool_ms + + results = await asyncio.gather( + *(_process_tool_call(tc) for tc in ai_response.tool_calls) + ) + + for tool_name, tool_id, records, _ in results: turn_records.extend(records) sources.extend(records) - - # Track if this turn ONLY used read tools (no search) normalized = tool_name.lower().replace("_", "") if normalized not in ("readfilecode", "readsymbolcode", "getfilecontext"): only_read_tools = False - tool_result_text = self._format_tool_results(records) messages.append( ToolMessage(content=tool_result_text, tool_call_id=tool_id) @@ -471,19 +474,23 @@ async def run_stream( if ai_response.tool_calls: yield json.dumps({"type": "status", "content": f"Running {len(ai_response.tool_calls)} search tool(s)..."}) + "\n" - for tc in ai_response.tool_calls: + + async def _process_tool_call(tc: Dict[str, Any]) -> Tuple[str, str, List[SourceRecord]]: tool_name = tc["name"] tool_args = tc["args"] tool_id = tc["id"] - logger.info(" Tool: %s(%s)", tool_name, tool_args) - records = await self._execute_tool( - tool_name, tool_args, repo=repo, top_k=top_k, - user_id=user_id, + tool_name, tool_args, repo=repo, top_k=top_k, user_id=user_id ) - sources.extend(records) + return tool_name, tool_id, records + results = await asyncio.gather( + *(_process_tool_call(tc) for tc in ai_response.tool_calls) + ) + + for tool_name, tool_id, records in results: + sources.extend(records) tool_result_text = self._format_tool_results(records) tool_messages.append( ToolMessage(content=tool_result_text, tool_call_id=tool_id) @@ -589,15 +596,17 @@ async def _search_symbols( ) -> List[SourceRecord]: if not repo: logger.warning("search_symbols called without repo — searching all repos") - results = [] - for r in self.repos: - results.extend(await self._search_namespace( + results = await asyncio.gather(*( + self._search_namespace( namespace=symbols_namespace(self.org_id, r), query=query, domain="symbol", top_k=top_k, - )) - return results[:top_k] + ) for r in self.repos + )) + # Flatten the list of lists + flat_results = [item for sublist in results for item in sublist] + return flat_results[:top_k] return await self._search_namespace( namespace=symbols_namespace(self.org_id, repo), @@ -612,15 +621,17 @@ async def _search_files( self, query: str, repo: str, top_k: int = 10, ) -> List[SourceRecord]: if not repo: - results = [] - for r in self.repos: - results.extend(await self._search_namespace( + results = await asyncio.gather(*( + self._search_namespace( namespace=files_namespace(self.org_id, r), query=query, domain="file", top_k=top_k, - )) - return results[:top_k] + ) for r in self.repos + )) + # Flatten the list of lists + flat_results = [item for sublist in results for item in sublist] + return flat_results[:top_k] return await self._search_namespace( namespace=files_namespace(self.org_id, repo), diff --git a/src/pipelines/retrieval.py b/src/pipelines/retrieval.py index d54cc0d..cbbc585 100644 --- a/src/pipelines/retrieval.py +++ b/src/pipelines/retrieval.py @@ -21,8 +21,8 @@ from __future__ import annotations import logging -import os -from typing import Any, Callable, Dict, List, Optional +import asyncio +from typing import Any, Callable, Dict, List, Optional, Tuple from dotenv import load_dotenv from langchain_core.language_models import BaseChatModel @@ -176,25 +176,26 @@ async def run( tool_messages: List[ToolMessage] = [] if ai_response.tool_calls: - called_tools = set() - for tc in ai_response.tool_calls: + + async def _process_tool_call(tc: Dict[str, Any]) -> Tuple[str, str, List[SourceRecord]]: tool_name = tc["name"] tool_args = tc["args"] tool_id = tc["id"] - logger.info(" Tool call: %s(%s)", tool_name, tool_args) + records = await self._execute_tool(tool_name, tool_args, user_id, top_k) + return tool_name, tool_id, records - records = await self._execute_tool( - tool_name, tool_args, user_id, top_k, - ) - sources.extend(records) + called_tools = set() + results = await asyncio.gather( + *(_process_tool_call(tc) for tc in ai_response.tool_calls) + ) - # Build ToolMessage for the LLM + for tool_name, tool_id, records in results: + sources.extend(records) tool_result_text = self._format_tool_results(records) tool_messages.append( ToolMessage(content=tool_result_text, tool_call_id=tool_id) ) - called_tools.add(tool_name.lower().replace("_", "")) # Auto-add summary context when only profile or temporal was requested @@ -351,7 +352,6 @@ async def _search_temporal( top_k: int = 3, ) -> List[SourceRecord]: """Semantic search over temporal events in Neo4j.""" - import asyncio from functools import partial loop = asyncio.get_running_loop()