Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Added

- Add async Anthropic message stream wrappers and manager wrappers, with wrapper
tests ([#4346](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4346))
- `AsyncMessagesStreamWrapper` for async message stream telemetry
- `AsyncMessagesStreamManagerWrapper` for async `Messages.stream()` telemetry
- Add sync streaming support for `Messages.create(stream=True)` and `Messages.stream()`
([#4155](https://github.com/open-telemetry/opentelemetry-python-contrib/pull/4155))
- `StreamWrapper` for handling `Messages.create(stream=True)` telemetry
Expand All @@ -22,4 +26,3 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Captures response attributes: `gen_ai.response.id`, `gen_ai.response.model`, `gen_ai.response.finish_reasons`, `gen_ai.usage.input_tokens`, `gen_ai.usage.output_tokens`
- Error handling with `error.type` attribute
- Minimum supported anthropic version is 0.16.0 (SDK uses modern `anthropic.resources.messages` module structure introduced in this version)

Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
)
from opentelemetry.util.genai.types import (
InputMessage,
LLMInvocation,
MessagePart,
OutputMessage,
)
Expand Down Expand Up @@ -153,6 +154,41 @@ def get_output_messages_from_message(
]


def set_invocation_response_attributes(
invocation: LLMInvocation,
message: Message | None,
capture_content: bool,
) -> None:
if message is None:
return

if message.model:
invocation.response_model_name = message.model

if message.id:
invocation.response_id = message.id

finish_reason = normalize_finish_reason(message.stop_reason)
if finish_reason:
invocation.finish_reasons = [finish_reason]

if message.usage:
tokens = extract_usage_tokens(message.usage)
invocation.input_tokens = tokens.input_tokens
invocation.output_tokens = tokens.output_tokens
if tokens.cache_creation_input_tokens is not None:
invocation.attributes[GEN_AI_USAGE_CACHE_CREATION_INPUT_TOKENS] = (
tokens.cache_creation_input_tokens
)
if tokens.cache_read_input_tokens is not None:
invocation.attributes[GEN_AI_USAGE_CACHE_READ_INPUT_TOKENS] = (
tokens.cache_read_input_tokens
)

if capture_content:
invocation.output_messages = get_output_messages_from_message(message)


def extract_params( # pylint: disable=too-many-locals
*,
max_tokens: int | None = None,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

"""Patching functions for Anthropic instrumentation."""

from __future__ import annotations

import logging
from typing import TYPE_CHECKING, Any, Callable, Union, cast

Expand Down Expand Up @@ -56,7 +58,7 @@ def messages_create(
Union[
"AnthropicMessage",
"AnthropicStream[RawMessageStreamEvent]",
MessagesStreamWrapper,
MessagesStreamWrapper[None],
],
]:
"""Wrap the `create` method of the `Messages` class to trace it."""
Expand All @@ -76,7 +78,7 @@ def traced_method(
) -> Union[
"AnthropicMessage",
"AnthropicStream[RawMessageStreamEvent]",
MessagesStreamWrapper,
MessagesStreamWrapper[None],
]:
params = extract_params(*args, **kwargs)
attributes = get_llm_request_attributes(params, instance)
Expand Down Expand Up @@ -121,13 +123,6 @@ def traced_method(
raise

return cast(
Callable[
...,
Union[
"AnthropicMessage",
"AnthropicStream[RawMessageStreamEvent]",
MessagesStreamWrapper,
],
],
'Callable[..., Union["AnthropicMessage", "AnthropicStream[RawMessageStreamEvent]", MessagesStreamWrapper[None]]]',
traced_method,
)
Loading
Loading