Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions sentry_sdk/consts.py
Original file line number Diff line number Diff line change
Expand Up @@ -465,6 +465,12 @@ class SPANDATA:
Example: "ResearchAssistant"
"""

GEN_AI_CONVERSATION_ID = "gen_ai.conversation.id"
"""
The unique identifier for the conversation/thread with the AI model.
Example: "conv_abc123"
"""

GEN_AI_CHOICE = "gen_ai.choice"
"""
The model's response message.
Expand Down
16 changes: 16 additions & 0 deletions sentry_sdk/integrations/openai_agents/patches/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
from functools import wraps

import sentry_sdk
from sentry_sdk.consts import SPANDATA
from sentry_sdk.integrations import DidNotEnable
from sentry_sdk.utils import capture_internal_exceptions, reraise

Expand Down Expand Up @@ -34,7 +35,13 @@ async def wrapper(*args: "Any", **kwargs: "Any") -> "Any":
with sentry_sdk.isolation_scope():
# Clone agent because agent invocation spans are attached per run.
agent = args[0].clone()

with agent_workflow_span(agent):
# Set conversation ID on workflow span early so it's captured even on errors
conversation_id = kwargs.get("conversation_id")
if conversation_id:
agent._sentry_conversation_id = conversation_id
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing conversation_id on workflow span in async runner

Medium Severity

In _create_run_wrapper, the comment says "Set conversation ID on workflow span early" but the code only stores conversation_id on the agent object (agent._sentry_conversation_id). Unlike the streaming version _create_run_streamed_wrapper, there's no call to set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conversation_id) on the workflow span itself. The workflow span (transaction) will be missing the gen_ai.conversation.id field when using Runner.run().

Fix in Cursor Fix in Web

Comment on lines 37 to +43
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: The async wrapper _create_run_wrapper fails to set the conversation_id on the workflow span, unlike its synchronous counterpart, leading to missing telemetry data.
Severity: MEDIUM

Suggested Fix

Modify the _create_run_wrapper function to get a handle on the span created by agent_workflow_span. Then, call workflow_span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conversation_id) within the if conversation_id: block, similar to the implementation in the synchronous wrapper.

Prompt for AI Agent
Review the code at the location below. A potential bug has been identified by an AI
agent.
Verify if this is a real issue. If it is, propose a fix; if not, explain why it's not
valid.

Location: sentry_sdk/integrations/openai_agents/patches/runner.py#L37-L43

Potential issue: In the async wrapper function `_create_run_wrapper`, the
`conversation_id` is set on `agent._sentry_conversation_id` but is not attached to the
workflow span itself. This is inconsistent with the synchronous wrapper
`_create_run_streamed_wrapper`, which explicitly calls `workflow_span.set_data()` to
attach the ID. The comment in the async function, "Set conversation ID on workflow span
early", indicates the intent was to set it on the span. As a result, when async agent
workflows are executed, the root workflow span will be missing the
`gen_ai.conversation.id` attribute, leading to inconsistent observability data between
async and sync execution paths.

Did we get this right? 👍 / 👎 to inform future reviews.


Comment on lines 37 to +44
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: The async wrapper for _create_run_wrapper fails to set the conversation_id on the root workflow_span, unlike the sync implementation, causing inconsistent tracing data.
Severity: MEDIUM

Suggested Fix

In the async wrapper within _create_run_wrapper, after setting agent._sentry_conversation_id, add a call to workflow_span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conversation_id) to ensure the root span also has the conversation ID, matching the sync wrapper's behavior.

Prompt for AI Agent
Review the code at the location below. A potential bug has been identified by an AI
agent.
Verify if this is a real issue. If it is, propose a fix; if not, explain why it's not
valid.

Location: sentry_sdk/integrations/openai_agents/patches/runner.py#L37-L44

Potential issue: In the async implementation of `_create_run_wrapper`, the
`conversation_id` is assigned to `agent._sentry_conversation_id`, but it is not set on
the root `workflow_span` itself. This is inconsistent with the sync/streaming
implementation, which does set the ID on the root span. As a result, the root
transaction span for asynchronous agent runs will be missing the
`gen_ai.conversation.id` attribute. This leads to an incomplete tracing experience, as
the comment "Set conversation ID on workflow span early" suggests the intention was to
capture it on the span.

Did we get this right? 👍 / 👎 to inform future reviews.

args = (agent, *args[1:])
try:
run_result = await original_func(*args, **kwargs)
Expand Down Expand Up @@ -91,10 +98,19 @@ def wrapper(*args: "Any", **kwargs: "Any") -> "Any":
# Clone agent because agent invocation spans are attached per run.
agent = args[0].clone()

# Capture conversation_id from kwargs if provided
conversation_id = kwargs.get("conversation_id")
if conversation_id:
agent._sentry_conversation_id = conversation_id

# Start workflow span immediately (before run_streamed returns)
workflow_span = agent_workflow_span(agent)
workflow_span.__enter__()

# Set conversation ID on workflow span early so it's captured even on errors
if conversation_id:
workflow_span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conversation_id)

# Store span on agent for cleanup
agent._sentry_workflow_span = workflow_span

Expand Down
7 changes: 7 additions & 0 deletions sentry_sdk/integrations/openai_agents/spans/ai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ def update_ai_client_span(
span: "sentry_sdk.tracing.Span",
response: "Any",
response_model: "Optional[str]" = None,
agent: "Optional[Agent]" = None,
) -> None:
"""Update AI client span with response data (works for streaming and non-streaming)."""
if hasattr(response, "usage") and response.usage:
Expand All @@ -59,3 +60,9 @@ def update_ai_client_span(
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model)
elif hasattr(response, "model") and response.model:
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, str(response.model))

# Set conversation ID from agent if available
if agent:
conv_id = getattr(agent, "_sentry_conversation_id", None)
if conv_id:
span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conv_id)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Callers don't pass agent to update_ai_client_span

Medium Severity

The update_ai_client_span function was modified to accept a new agent parameter (defaulting to None) for setting conversation_id on AI client spans. However, the callers in models.py at lines 128 and 179 were not updated to pass the agent argument. Since agent defaults to None, the conversation_id condition if agent: is never true, and gen_ai.conversation.id is never set on AI client spans.

Fix in Cursor Fix in Web

5 changes: 5 additions & 0 deletions sentry_sdk/integrations/openai_agents/spans/execute_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,8 @@ def update_execute_tool_span(

if should_send_default_pii():
span.set_data(SPANDATA.GEN_AI_TOOL_OUTPUT, result)

# Add conversation ID from agent
conv_id = getattr(agent, "_sentry_conversation_id", None)
if conv_id:
span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conv_id)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Duplicated pattern for setting conversation ID on spans

Low Severity

The same 3-line pattern for extracting and setting _sentry_conversation_id from an agent onto a span is repeated 4 times across execute_tool.py, handoff.py, invoke_agent.py, and ai_client.py. This pattern should be extracted to a utility function in utils.py (e.g., _set_conversation_id(span, agent)), following the existing pattern of _set_agent_data, _set_usage_data, etc.

Fix in Cursor Fix in Web

5 changes: 5 additions & 0 deletions sentry_sdk/integrations/openai_agents/spans/handoff.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,8 @@ def handoff_span(
origin=SPAN_ORIGIN,
) as span:
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "handoff")

# Add conversation ID from agent
conv_id = getattr(from_agent, "_sentry_conversation_id", None)
if conv_id:
span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conv_id)
5 changes: 5 additions & 0 deletions sentry_sdk/integrations/openai_agents/spans/invoke_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,11 @@ def update_invoke_agent_span(
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output, unpack=False
)

# Add conversation ID from agent
conv_id = getattr(agent, "_sentry_conversation_id", None)
if conv_id:
span.set_data(SPANDATA.GEN_AI_CONVERSATION_ID, conv_id)

span.__exit__(None, None, None)
delattr(context, "_sentry_agent_span")

Expand Down
179 changes: 179 additions & 0 deletions tests/integrations/openai_agents/test_openai_agents.py
Original file line number Diff line number Diff line change
Expand Up @@ -2710,3 +2710,182 @@ def mock_get_model(agent, run_config):

# Verify streaming flag is set
assert chat_span._data.get(SPANDATA.GEN_AI_RESPONSE_STREAMING) is True


@pytest.mark.asyncio
async def test_conversation_id_on_all_spans(
sentry_init, capture_events, test_agent, mock_model_response
):
"""
Test that gen_ai.conversation.id is set on all AI-related spans when passed to Runner.run().
"""

with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
with patch(
"agents.models.openai_responses.OpenAIResponsesModel.get_response"
) as mock_get_response:
mock_get_response.return_value = mock_model_response

sentry_init(
integrations=[OpenAIAgentsIntegration()],
traces_sample_rate=1.0,
)

events = capture_events()

result = await agents.Runner.run(
test_agent,
"Test input",
run_config=test_run_config,
conversation_id="conv_test_123",
)

assert result is not None

(transaction,) = events
spans = transaction["spans"]
invoke_agent_span, ai_client_span = spans

# Verify workflow span (transaction) has conversation_id
assert (
transaction["contexts"]["trace"]["data"]["gen_ai.conversation.id"]
== "conv_test_123"
)

# Verify invoke_agent span has conversation_id
assert invoke_agent_span["data"]["gen_ai.conversation.id"] == "conv_test_123"

# Verify ai_client span has conversation_id
assert ai_client_span["data"]["gen_ai.conversation.id"] == "conv_test_123"


@pytest.mark.asyncio
async def test_conversation_id_on_tool_span(sentry_init, capture_events, test_agent):
"""
Test that gen_ai.conversation.id is set on tool execution spans when passed to Runner.run().
"""

@agents.function_tool
def simple_tool(message: str) -> str:
"""A simple tool"""
return f"Result: {message}"

agent_with_tool = test_agent.clone(tools=[simple_tool])

with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
with patch(
"agents.models.openai_responses.OpenAIResponsesModel.get_response"
) as mock_get_response:
tool_call = ResponseFunctionToolCall(
id="call_123",
call_id="call_123",
name="simple_tool",
type="function_call",
arguments='{"message": "hello"}',
)

tool_response = ModelResponse(
output=[tool_call],
usage=Usage(
requests=1, input_tokens=10, output_tokens=5, total_tokens=15
),
response_id="resp_tool_456",
)

final_response = ModelResponse(
output=[
ResponseOutputMessage(
id="msg_final",
type="message",
status="completed",
content=[
ResponseOutputText(
text="Done",
type="output_text",
annotations=[],
)
],
role="assistant",
)
],
usage=Usage(
requests=1, input_tokens=15, output_tokens=10, total_tokens=25
),
response_id="resp_final_789",
)

mock_get_response.side_effect = [tool_response, final_response]

sentry_init(
integrations=[OpenAIAgentsIntegration()],
traces_sample_rate=1.0,
)

events = capture_events()

await agents.Runner.run(
agent_with_tool,
"Use the tool",
run_config=test_run_config,
conversation_id="conv_tool_test_456",
)

(transaction,) = events
spans = transaction["spans"]

# Find the tool span
tool_span = None
for span in spans:
if span.get("description", "").startswith("execute_tool"):
tool_span = span
break

assert tool_span is not None
# Tool span should have the conversation_id passed to Runner.run()
assert tool_span["data"]["gen_ai.conversation.id"] == "conv_tool_test_456"

# Workflow span (transaction) should have the same conversation_id
assert (
transaction["contexts"]["trace"]["data"]["gen_ai.conversation.id"]
== "conv_tool_test_456"
)


@pytest.mark.asyncio
async def test_no_conversation_id_when_not_provided(
sentry_init, capture_events, test_agent, mock_model_response
):
"""
Test that gen_ai.conversation.id is not set when not passed to Runner.run().
"""

with patch.dict(os.environ, {"OPENAI_API_KEY": "test-key"}):
with patch(
"agents.models.openai_responses.OpenAIResponsesModel.get_response"
) as mock_get_response:
mock_get_response.return_value = mock_model_response

sentry_init(
integrations=[OpenAIAgentsIntegration()],
traces_sample_rate=1.0,
)

events = capture_events()

# Don't pass conversation_id
result = await agents.Runner.run(
test_agent, "Test input", run_config=test_run_config
)

assert result is not None

(transaction,) = events
spans = transaction["spans"]
invoke_agent_span, ai_client_span = spans

# Verify conversation_id is NOT set on any spans
assert "gen_ai.conversation.id" not in transaction["contexts"]["trace"].get(
"data", {}
)
assert "gen_ai.conversation.id" not in invoke_agent_span.get("data", {})
assert "gen_ai.conversation.id" not in ai_client_span.get("data", {})
Loading