Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Moved all anthropic specific logic to anthropic class
  • Loading branch information
vizsatiz committed Nov 15, 2025
commit a898e58c1d606ee6a03a7cd179c93574ee7713dd
35 changes: 35 additions & 0 deletions flo_ai/flo_ai/llm/anthropic_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -240,3 +240,38 @@ def format_tools_for_llm(self, tools: List['Tool']) -> List[Dict[str, Any]]:
def format_image_in_message(self, image: ImageMessageContent) -> str:
"""Format a image in the message"""
raise NotImplementedError('Not implemented image for LLM Anthropic')

def get_assistant_message_for_tool_call(
self, response: Dict[str, Any]
) -> Optional[Any]:
"""
Get the assistant message content for tool calls.
For Claude, this returns the raw_content which includes tool_use blocks.
For other LLMs, returns None to use default text content.
"""
if isinstance(response, dict) and 'raw_content' in response:
return response['raw_content']
return None

def get_tool_use_id(self, function_call: Dict[str, Any]) -> Optional[str]:
"""
Extract tool_use_id from function call if available.
Returns the ID for Claude's tool_use tracking, None for other LLMs.
"""
return function_call.get('id')

def format_function_result_message(
self, function_name: str, content: str, tool_use_id: Optional[str] = None
) -> Dict[str, Any]:
"""
Format a function result message for the LLM.
For Claude, includes tool_use_id in the message.
"""
message = {
'role': 'function',
'name': function_name,
'content': content,
}
if tool_use_id:
message['tool_use_id'] = tool_use_id
return message
39 changes: 37 additions & 2 deletions flo_ai/flo_ai/llm/base_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,13 @@ async def stream(
async def get_function_call(
self, response: Dict[str, Any]
) -> Optional[Dict[str, Any]]:
"""Extract function call information from LLM response"""
if hasattr(response, 'function_call') and response.function_call:
result = {
'name': response.function_call.name,
'arguments': response.function_call.arguments,
}
# Include ID if available (for Claude's tool_use tracking)
# Include ID if available (LLM-specific)
if hasattr(response.function_call, 'id'):
result['id'] = response.function_call.id
return result
Expand All @@ -50,12 +51,46 @@ async def get_function_call(
'name': response['function_call']['name'],
'arguments': response['function_call']['arguments'],
}
# Include ID if available (for Claude's tool_use tracking)
# Include ID if available (LLM-specific)
if 'id' in response['function_call']:
result['id'] = response['function_call']['id']
return result
return None

def get_assistant_message_for_tool_call(
self, response: Dict[str, Any]
) -> Optional[Any]:
"""
Get the assistant message content for tool calls.
Override in LLM-specific implementations if special handling is needed.
Returns None to use default text content extraction.
"""
return None

def get_tool_use_id(self, function_call: Dict[str, Any]) -> Optional[str]:
"""
Extract tool_use_id from function call if available.
Override in LLM-specific implementations if IDs are used.
Returns None by default.
"""
return function_call.get('id')

def format_function_result_message(
self, function_name: str, content: str, tool_use_id: Optional[str] = None
) -> Dict[str, Any]:
"""
Format a function result message for the LLM.
Override in LLM-specific implementations for special formatting.
"""
message = {
'role': 'function',
'name': function_name,
'content': content,
}
if tool_use_id:
message['tool_use_id'] = tool_use_id
return message

@abstractmethod
def get_message_content(self, response: Dict[str, Any]) -> str:
"""Extract message content from response"""
Expand Down
33 changes: 15 additions & 18 deletions flo_ai/flo_ai/models/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,19 +280,21 @@ async def _run_with_tools(
continue
break

# If there's a function call, add the assistant's response with raw content
# This is required for Claude's tool use flow
raw_content = response.get('raw_content')
if raw_content:
# For Claude, use the raw content which includes tool_use blocks
# If there's a function call, add the assistant's response
# LLM-specific implementations handle special formatting (e.g., Claude's raw_content)
assistant_message_content = (
self.llm.get_assistant_message_for_tool_call(response)
)
if assistant_message_content:
# LLM returned special formatting (e.g., Claude's raw_content)
messages.append(
{
'role': self.act_as,
'content': raw_content,
'content': assistant_message_content,
}
)
else:
# For other LLMs, use text content
# Use default text content extraction
assistant_text = self.llm.get_message_content(response)
if assistant_text:
messages.append(
Expand All @@ -305,9 +307,8 @@ async def _run_with_tools(
# Execute the tool
try:
function_name = function_call['name']
tool_use_id = function_call.get(
'id', 'unknown'
) # Get the tool_use_id from Claude
# Get tool_use_id if available (LLM-specific, e.g., Claude)
tool_use_id = self.llm.get_tool_use_id(function_call)
if isinstance(function_call['arguments'], str):
function_args = json.loads(function_call['arguments'])
else:
Expand Down Expand Up @@ -356,15 +357,11 @@ async def _run_with_tools(
)

# Add the function response to messages for context
# Include tool_use_id for Claude's tool result format
messages.append(
{
'role': MessageType.FUNCTION,
'name': function_name,
'content': str(function_response),
'tool_use_id': tool_use_id,
}
# LLM-specific implementations format the message appropriately
function_result_msg = self.llm.format_function_result_message(
function_name, str(function_response), tool_use_id
)
messages.append(function_result_msg)

except (json.JSONDecodeError, KeyError, ToolExecutionError) as e:
# Record tool call failure
Expand Down