Lightweight framework for building multi-agent workflows with LLMs, supporting handoffs, guardrails, tools, and 100+ LLM providers
The SDK includes built-in distributed tracing that automatically tracks all agent operations, providing visibility into workflow execution, performance, and debugging capabilities. Traces consist of spans for each operation (LLM calls, tool executions, handoffs, etc.) and can be exported to external observability platforms.
Top-level trace object representing an entire workflow.
class Trace:
"""
Trace object for workflow.
Attributes:
- trace_id: str - Unique trace identifier
- name: str - Trace name (workflow name)
- group_id: str | None - Grouping identifier
- metadata: dict[str, Any] | None - Additional metadata
"""
def start(mark_as_current: bool = True) -> None:
"""
Start trace.
Parameters:
- mark_as_current: Set as current active trace
"""
def finish(reset_current: bool = True) -> None:
"""
Finish trace.
Parameters:
- reset_current: Clear current trace reference
"""
def trace(
workflow_name: str,
*,
trace_id: str | None = None,
group_id: str | None = None,
metadata: dict[str, Any] | None = None,
disabled: bool = False
) -> Trace:
"""
Create new trace.
Parameters:
- workflow_name: Name for the trace
- trace_id: Custom trace ID (auto-generated if None)
- group_id: Group identifier for related traces
- metadata: Additional trace metadata
- disabled: Whether tracing is disabled
Returns:
- Trace: Configured trace object
"""Usage example:
from agents.tracing import trace
# Create and start trace
my_trace = trace("customer_support_workflow")
my_trace.start()
try:
# Run agents
result = await Runner.run(agent, "Help request")
finally:
my_trace.finish()
# Or use as context manager (Python 3.10+)
with trace("my_workflow") as t:
result = await Runner.run(agent, "Input")Span representing a single operation within a trace.
class Span[TSpanData]:
"""
Span within trace.
Type Parameters:
- TSpanData: Type of span-specific data
Attributes:
- span_id: str - Unique span identifier
- name: str - Span name
- span_data: TSpanData - Span-specific data
- parent_span: Span | None - Parent span for nesting
"""
def start(mark_as_current: bool = True) -> None:
"""
Start span.
Parameters:
- mark_as_current: Set as current active span
"""
def finish(reset_current: bool = True) -> None:
"""
Finish span.
Parameters:
- reset_current: Clear current span reference
"""Functions for creating typed spans for different operations.
def agent_span(
name: str,
*,
handoffs: list = None,
output_type: type = None
) -> Span[AgentSpanData]:
"""
Create agent span.
Parameters:
- name: Span name
- handoffs: Available handoffs
- output_type: Agent output type
Returns:
- Span[AgentSpanData]: Agent span
"""
def function_span(
name: str,
*,
input: Any = None,
output: Any = None
) -> Span[FunctionSpanData]:
"""
Create function span.
Parameters:
- name: Function name
- input: Function input
- output: Function output
Returns:
- Span[FunctionSpanData]: Function span
"""
def generation_span(
name: str,
*,
model: str = None,
input: list = None,
output: list = None,
usage: Any = None
) -> Span[GenerationSpanData]:
"""
Create generation span for LLM calls.
Parameters:
- name: Span name
- model: Model identifier
- input: Input messages
- output: Output messages
- usage: Token usage
Returns:
- Span[GenerationSpanData]: Generation span
"""
def guardrail_span(
name: str,
*,
guardrail_type: str = None,
input: Any = None,
output: Any = None
) -> Span[GuardrailSpanData]:
"""
Create guardrail span.
Parameters:
- name: Guardrail name
- guardrail_type: Type of guardrail
- input: Guardrail input
- output: Guardrail output
Returns:
- Span[GuardrailSpanData]: Guardrail span
"""
def handoff_span(
name: str,
*,
from_agent: str = None,
to_agent: str = None
) -> Span[HandoffSpanData]:
"""
Create handoff span.
Parameters:
- name: Span name
- from_agent: Source agent name
- to_agent: Target agent name
Returns:
- Span[HandoffSpanData]: Handoff span
"""
def mcp_tools_span(
name: str,
*,
server_label: str = None,
tools: list = None
) -> Span[MCPListToolsSpanData]:
"""
Create MCP tools span.
Parameters:
- name: Span name
- server_label: MCP server label
- tools: List of tools
Returns:
- Span[MCPListToolsSpanData]: MCP tools span
"""
def custom_span(
name: str,
*,
custom_data: dict = None
) -> Span[CustomSpanData]:
"""
Create custom span.
Parameters:
- name: Span name
- custom_data: Custom span data
Returns:
- Span[CustomSpanData]: Custom span
"""
def response_span(
name: str,
*,
response_data: dict = None
) -> Span[ResponseSpanData]:
"""
Create response span.
Parameters:
- name: Span name
- response_data: Response data
Returns:
- Span[ResponseSpanData]: Response span
"""
def speech_span(
name: str,
*,
speech_data: dict = None
) -> Span[SpeechSpanData]:
"""Create speech span."""
def speech_group_span(
name: str,
*,
group_data: dict = None
) -> Span[SpeechGroupSpanData]:
"""Create speech group span."""
def transcription_span(
name: str,
*,
transcription_data: dict = None
) -> Span[TranscriptionSpanData]:
"""Create transcription span."""Data structures for different span types.
class SpanData(abc.ABC):
"""
Abstract base class for all span data types.
All span data types inherit from this class and must implement
the export() method and type property.
"""
@abc.abstractmethod
def export() -> dict[str, Any]:
"""
Export span data as dictionary.
Returns:
- dict[str, Any]: Exported span data
"""
@property
@abc.abstractmethod
def type() -> str:
"""
Get span type identifier.
Returns:
- str: Span type string
"""
class AgentSpanData(SpanData):
"""
Span data for agent.
Attributes:
- agent_name: str
- tools: list
- handoffs: list
- output_type: type | None
"""
class CustomSpanData:
"""
Custom span data.
Attributes:
- custom_data: dict
"""
class FunctionSpanData:
"""
Span data for function tool.
Attributes:
- function_name: str
- input: Any
- output: Any
"""
class GenerationSpanData:
"""
Span data for LLM generation.
Attributes:
- model: str
- input: list
- output: list
- usage: Usage
"""
class GuardrailSpanData:
"""
Span data for guardrail.
Attributes:
- guardrail_name: str
- guardrail_type: str
- input: Any
- output: Any
"""
class HandoffSpanData:
"""
Span data for handoff.
Attributes:
- from_agent: str
- to_agent: str
"""
class MCPListToolsSpanData:
"""
Span data for MCP list tools.
Attributes:
- server_label: str
- tools: list
"""
class ResponseSpanData:
"""Span data for response."""
class SpeechSpanData:
"""Span data for speech."""
class SpeechGroupSpanData:
"""Span data for speech group."""
class TranscriptionSpanData:
"""Span data for transcription."""
class SpanError:
"""
Error in span.
Attributes:
- message: str - Error message
- data: dict | None - Error data
"""Functions to access current active trace and span.
def get_current_trace() -> Trace | None:
"""
Get current active trace.
Returns:
- Trace | None: Current trace or None
"""
def get_current_span() -> Span | None:
"""
Get current active span.
Returns:
- Span | None: Current span or None
"""Usage example:
from agents.tracing import get_current_trace, get_current_span, custom_span
# Within agent execution
current_trace = get_current_trace()
if current_trace:
print(f"Running in trace: {current_trace.trace_id}")
# Add custom span
span = custom_span("custom_operation", custom_data={"key": "value"})
span.start()
try:
# Perform operation
result = perform_custom_operation()
finally:
span.finish()Interface for custom trace processing and export.
class TracingProcessor:
"""Processor for traces/spans."""
async def on_trace_start(trace: Trace) -> None:
"""
Called when trace starts.
Parameters:
- trace: Trace that started
"""
async def on_trace_end(trace: Trace) -> None:
"""
Called when trace ends.
Parameters:
- trace: Trace that ended
"""
async def on_span_start(span: Span) -> None:
"""
Called when span starts.
Parameters:
- span: Span that started
"""
async def on_span_end(span: Span) -> None:
"""
Called when span ends.
Parameters:
- span: Span that ended
"""Usage example:
from agents.tracing import TracingProcessor
import logging
class LoggingProcessor(TracingProcessor):
"""Log traces and spans."""
async def on_trace_start(self, trace):
logging.info(f"Trace started: {trace.name} ({trace.trace_id})")
async def on_trace_end(self, trace):
logging.info(f"Trace ended: {trace.name}")
async def on_span_start(self, span):
logging.debug(f"Span started: {span.name}")
async def on_span_end(self, span):
logging.debug(f"Span ended: {span.name}")
# Register processor
from agents.tracing import add_trace_processor
add_trace_processor(LoggingProcessor())Provider for creating and managing traces.
class TraceProvider:
"""Provider for trace creation."""
def create_trace(...) -> Trace:
"""Create new trace."""
def create_span(...) -> Span:
"""Create new span."""
def register_processor(processor: TracingProcessor) -> None:
"""Register trace processor."""
def set_processors(processors: list[TracingProcessor]) -> None:
"""Set trace processors."""
def set_disabled(disabled: bool) -> None:
"""Enable/disable tracing."""
def shutdown() -> None:
"""Shutdown provider."""
class DefaultTraceProvider(TraceProvider):
"""Default trace provider implementation."""Functions for configuring tracing globally.
def add_trace_processor(processor: TracingProcessor) -> None:
"""
Add trace processor.
Parameters:
- processor: Processor to add
"""
def set_trace_processors(processors: list[TracingProcessor]) -> None:
"""
Set trace processors, replacing existing.
Parameters:
- processors: List of processors
"""
def set_tracing_disabled(disabled: bool) -> None:
"""
Globally enable/disable tracing.
Parameters:
- disabled: Whether to disable tracing
"""
def set_tracing_export_api_key(api_key: str) -> None:
"""
Set API key for trace export.
Parameters:
- api_key: API key for export service
"""
def set_trace_provider(provider: TraceProvider) -> None:
"""
Set trace provider.
Parameters:
- provider: Trace provider to use
"""
def get_trace_provider() -> TraceProvider:
"""
Get trace provider.
Returns:
- TraceProvider: Current trace provider
"""
def gen_trace_id() -> str:
"""
Generate trace ID.
Returns:
- str: New trace ID
"""
def gen_span_id() -> str:
"""
Generate span ID.
Returns:
- str: New span ID
"""Tracing is enabled by default for all agent runs:
from agents import Agent, Runner
agent = Agent(name="Assistant")
# Automatically traced
result = Runner.run_sync(agent, "Hello")Disable tracing globally or per-run:
from agents.tracing import set_tracing_disabled
from agents import RunConfig
# Globally disable
set_tracing_disabled(True)
# Per-run disable
config = RunConfig(tracing_disabled=True)
result = Runner.run_sync(agent, "Hello", run_config=config)Add custom metadata to traces:
from agents import RunConfig
config = RunConfig(
workflow_name="customer_support",
trace_metadata={
"user_id": "user_123",
"session_id": "session_456",
"environment": "production"
},
group_id="support_tickets" # Group related traces
)
result = Runner.run_sync(agent, "Help", run_config=config)Control whether sensitive data is included in traces:
config = RunConfig(
trace_include_sensitive_data=False # Exclude PII from traces
)The SDK supports integration with external observability platforms:
Example integration:
# Example: Logfire integration
import logfire
from agents.tracing import add_trace_processor
logfire.configure()
add_trace_processor(logfire.TracingProcessor())
# All agent runs now export to Logfire
result = Runner.run_sync(agent, "Hello")trace_include_sensitive_data=False for PIIgroup_id to group related workflowsInstall with Tessl CLI
npx tessl i tessl/pypi-openai-agents