Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .jules/bolt.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
## 2024-04-10 - [Concurrent LLM Tool Calls]
**Learning:** In the `RetrievalPipeline` and `CodeRetrievalPipeline`, the execution of LLM tool calls was originally a sequential loop `for tc in ai_response.tool_calls: ... await self._execute_tool(...)`. This sequential execution was a significant bottleneck because each tool call (like searching Pinecone or Neo4j, or fetching files) had to wait for the previous one to finish.
**Action:** Replaced sequential execution with `asyncio.gather(*(_process_tool_call(tc) for tc in ai_response.tool_calls))`. I learned that wrapping the processing logic in an internal helper function ensures we can properly track metrics (like timing) and securely reconstruct the shared records list sequentially after the concurrent work completes. This concurrent model also correctly flattened sublists for `_search_symbols` and `_search_files` when doing a full organization repository search.
61 changes: 36 additions & 25 deletions src/pipelines/code_retrieval.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,8 @@
from __future__ import annotations

import logging
from typing import Any, Callable, Dict, List, Optional
import asyncio
from typing import Any, Callable, Dict, List, Optional, Tuple

from langchain_core.language_models import BaseChatModel
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
Expand All @@ -37,7 +38,6 @@
from src.scanner.code_store import CodeStore
from src.schemas.code import (
annotations_namespace,
directories_namespace,
files_namespace,
snippets_namespace,
symbols_namespace,
Expand Down Expand Up @@ -375,26 +375,29 @@ async def run(
turn_records: List[SourceRecord] = []
only_read_tools = True

for tc in ai_response.tool_calls:

async def _process_tool_call(tc: Dict[str, Any]) -> Tuple[str, str, List[SourceRecord], float]:
tool_name = tc["name"]
tool_args = tc["args"]
tool_id = tc["id"]

t1 = _time.perf_counter()
records = await self._execute_tool(
tool_name, tool_args, repo=repo, top_k=top_k,
user_id=user_id,
tool_name, tool_args, repo=repo, top_k=top_k, user_id=user_id
)
tool_ms = (_time.perf_counter() - t1) * 1000
logger.info(" Tool: %s(%s) → %d results (%.0fms)", tool_name, tool_args, len(records), tool_ms)
return tool_name, tool_id, records, tool_ms

results = await asyncio.gather(
*(_process_tool_call(tc) for tc in ai_response.tool_calls)
)

for tool_name, tool_id, records, _ in results:
turn_records.extend(records)
sources.extend(records)

# Track if this turn ONLY used read tools (no search)
normalized = tool_name.lower().replace("_", "")
if normalized not in ("readfilecode", "readsymbolcode", "getfilecontext"):
only_read_tools = False

tool_result_text = self._format_tool_results(records)
messages.append(
ToolMessage(content=tool_result_text, tool_call_id=tool_id)
Expand Down Expand Up @@ -471,19 +474,23 @@ async def run_stream(
if ai_response.tool_calls:
yield json.dumps({"type": "status", "content": f"Running {len(ai_response.tool_calls)} search tool(s)..."}) + "\n"

for tc in ai_response.tool_calls:

async def _process_tool_call(tc: Dict[str, Any]) -> Tuple[str, str, List[SourceRecord]]:
tool_name = tc["name"]
tool_args = tc["args"]
tool_id = tc["id"]

logger.info(" Tool: %s(%s)", tool_name, tool_args)

records = await self._execute_tool(
tool_name, tool_args, repo=repo, top_k=top_k,
user_id=user_id,
tool_name, tool_args, repo=repo, top_k=top_k, user_id=user_id
)
sources.extend(records)
return tool_name, tool_id, records

results = await asyncio.gather(
*(_process_tool_call(tc) for tc in ai_response.tool_calls)
)

for tool_name, tool_id, records in results:
sources.extend(records)
tool_result_text = self._format_tool_results(records)
tool_messages.append(
ToolMessage(content=tool_result_text, tool_call_id=tool_id)
Expand Down Expand Up @@ -589,15 +596,17 @@ async def _search_symbols(
) -> List[SourceRecord]:
if not repo:
logger.warning("search_symbols called without repo — searching all repos")
results = []
for r in self.repos:
results.extend(await self._search_namespace(
results = await asyncio.gather(*(
self._search_namespace(
namespace=symbols_namespace(self.org_id, r),
query=query,
domain="symbol",
top_k=top_k,
))
return results[:top_k]
) for r in self.repos
))
# Flatten the list of lists
flat_results = [item for sublist in results for item in sublist]
return flat_results[:top_k]

return await self._search_namespace(
namespace=symbols_namespace(self.org_id, repo),
Expand All @@ -612,15 +621,17 @@ async def _search_files(
self, query: str, repo: str, top_k: int = 10,
) -> List[SourceRecord]:
if not repo:
results = []
for r in self.repos:
results.extend(await self._search_namespace(
results = await asyncio.gather(*(
self._search_namespace(
namespace=files_namespace(self.org_id, r),
query=query,
domain="file",
top_k=top_k,
))
return results[:top_k]
) for r in self.repos
))
# Flatten the list of lists
flat_results = [item for sublist in results for item in sublist]
return flat_results[:top_k]

return await self._search_namespace(
namespace=files_namespace(self.org_id, repo),
Expand Down
24 changes: 12 additions & 12 deletions src/pipelines/retrieval.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@
from __future__ import annotations

import logging
import os
from typing import Any, Callable, Dict, List, Optional
import asyncio
from typing import Any, Callable, Dict, List, Optional, Tuple

from dotenv import load_dotenv
from langchain_core.language_models import BaseChatModel
Expand Down Expand Up @@ -176,25 +176,26 @@ async def run(
tool_messages: List[ToolMessage] = []

if ai_response.tool_calls:
called_tools = set()
for tc in ai_response.tool_calls:

async def _process_tool_call(tc: Dict[str, Any]) -> Tuple[str, str, List[SourceRecord]]:
tool_name = tc["name"]
tool_args = tc["args"]
tool_id = tc["id"]

logger.info(" Tool call: %s(%s)", tool_name, tool_args)
records = await self._execute_tool(tool_name, tool_args, user_id, top_k)
return tool_name, tool_id, records

records = await self._execute_tool(
tool_name, tool_args, user_id, top_k,
)
sources.extend(records)
called_tools = set()
results = await asyncio.gather(
*(_process_tool_call(tc) for tc in ai_response.tool_calls)
)

# Build ToolMessage for the LLM
for tool_name, tool_id, records in results:
sources.extend(records)
tool_result_text = self._format_tool_results(records)
tool_messages.append(
ToolMessage(content=tool_result_text, tool_call_id=tool_id)
)

called_tools.add(tool_name.lower().replace("_", ""))

# Auto-add summary context when only profile or temporal was requested
Expand Down Expand Up @@ -351,7 +352,6 @@ async def _search_temporal(
top_k: int = 3,
) -> List[SourceRecord]:
"""Semantic search over temporal events in Neo4j."""
import asyncio
from functools import partial

loop = asyncio.get_running_loop()
Expand Down