Lightweight framework for building multi-agent workflows with LLMs, supporting handoffs, guardrails, tools, and 100+ LLM providers
Run results contain the complete output of agent execution including final output, items, usage statistics, and guardrail results. The SDK provides a comprehensive exception hierarchy for handling errors during agent execution.
Result of completed agent run with all outputs and metadata.
class RunResult:
"""
Result of completed agent run.
Attributes:
- input: str | list[TResponseInputItem] - Original input
- new_items: list[RunItem] - Generated items during run
- raw_responses: list[ModelResponse] - LLM responses
- final_output: Any - Last agent output
- input_guardrail_results: list[InputGuardrailResult] - Input guardrail results
- output_guardrail_results: list[OutputGuardrailResult] - Output guardrail results
- tool_input_guardrail_results: list[ToolInputGuardrailResult] - Tool input results
- tool_output_guardrail_results: list[ToolOutputGuardrailResult] - Tool output results
- context_wrapper: RunContextWrapper - Run context
Properties:
- last_agent: Agent - Last agent that ran
- last_response_id: str | None - Last response ID for continuation
"""
def final_output_as(cls: type[T], raise_if_incorrect_type: bool = True) -> T:
"""
Cast final output to specific type.
Parameters:
- cls: Type to cast to
- raise_if_incorrect_type: Raise error if type mismatch (default: True)
Returns:
- T: Final output cast to type
Raises:
- TypeError: If type mismatch and raise_if_incorrect_type=True
"""
def to_input_list() -> list[TResponseInputItem]:
"""
Create input list for next run.
Useful for manual conversation history management.
Returns:
- list[TResponseInputItem]: Items formatted for next input
"""
def release_agents(release_new_items: bool = True) -> None:
"""
Release agent references for memory management.
Parameters:
- release_new_items: Also release agents from items
"""Usage example:
from agents import Agent, Runner
from pydantic import BaseModel
class Answer(BaseModel):
answer: str
confidence: float
agent = Agent(
name="Assistant",
output_type=Answer
)
result = Runner.run_sync(agent, "What is 2+2?")
# Access final output
print(result.final_output)
# Type-safe access
answer = result.final_output_as(Answer)
print(f"Answer: {answer.answer}, Confidence: {answer.confidence}")
# Inspect items
for item in result.new_items:
print(item)
# Check usage
for response in result.raw_responses:
print(f"Tokens: {response.usage.total_tokens}")
# Continue conversation
next_input = result.to_input_list() + ["What about 3+3?"]
next_result = Runner.run_sync(agent, next_input)Result of streaming agent run with progress tracking.
class RunResultStreaming:
"""
Result of streaming agent run.
Attributes:
- current_agent: Agent - Currently running agent
- current_turn: int - Current turn number
- max_turns: int - Maximum turns
- is_complete: bool - Whether run is complete
- trace: Trace | None - Associated trace
"""
async def stream_events() -> AsyncIterator[StreamEvent]:
"""
Stream events from agent execution.
Yields:
- StreamEvent: Events as they occur
Returns:
- AsyncIterator: Event stream
"""
def cancel(mode: Literal["immediate", "after_turn"]) -> None:
"""
Cancel streaming run.
Parameters:
- mode: Cancellation mode
- "immediate": Cancel immediately
- "after_turn": Cancel after current turn completes
"""Usage example:
import asyncio
async def streaming_example():
result = Runner.run_streamed(agent, "Tell a story")
print(f"Max turns: {result.max_turns}")
async for event in result.stream_events():
if event.type == "raw_response_event":
print(event.data, end='')
if result.current_turn > 5:
result.cancel("immediate")
break
print(f"Completed: {result.is_complete}")
print(f"Final output: {result.final_output}")
asyncio.run(streaming_example())Token usage information across all LLM calls.
class Usage:
"""
Token usage tracking.
Attributes:
- requests: int - Total requests made
- input_tokens: int - Total input tokens
- input_tokens_details: InputTokensDetails - Detailed input token breakdown
- output_tokens: int - Total output tokens
- output_tokens_details: OutputTokensDetails - Detailed output token breakdown
- total_tokens: int - Total tokens (input + output)
- request_usage_entries: list[RequestUsage] - Per-request breakdown
"""
def add(other: Usage) -> None:
"""
Aggregate another Usage object.
Parameters:
- other: Usage object to add
"""
class RequestUsage:
"""
Usage for single request.
Attributes:
- input_tokens: int - Input tokens for this request
- output_tokens: int - Output tokens for this request
- total_tokens: int - Total tokens for this request
- input_tokens_details: InputTokensDetails - Input token details
- output_tokens_details: OutputTokensDetails - Output token details
"""Usage example:
result = Runner.run_sync(agent, "Long task with multiple tool calls")
usage = result.raw_responses[0].usage
print(f"Total tokens: {usage.total_tokens}")
print(f"Input tokens: {usage.input_tokens}")
print(f"Output tokens: {usage.output_tokens}")
print(f"Requests: {usage.requests}")
# Per-request breakdown
for entry in usage.request_usage_entries:
print(f"Request: {entry.total_tokens} tokens")
# Aggregate usage across multiple runs
total_usage = Usage()
for response in result.raw_responses:
total_usage.add(response.usage)Base class for all SDK exceptions.
class AgentsException(Exception):
"""
Base exception for all SDK exceptions.
Attributes:
- run_data: RunErrorDetails | None - Error details from run
"""Raised when agent reaches maximum turn limit.
class MaxTurnsExceeded(AgentsException):
"""
Raised when max turns exceeded.
Attributes:
- message: str - Error message
"""Usage example:
from agents import MaxTurnsExceeded
try:
result = Runner.run_sync(
agent,
"Complex task",
max_turns=5
)
except MaxTurnsExceeded as e:
print(f"Agent exceeded max turns: {e.message}")
# Access partial results
if e.run_data:
print(f"Items generated: {len(e.run_data.new_items)}")Raised for unexpected model behavior.
class ModelBehaviorError(AgentsException):
"""
Raised for unexpected model behavior.
Attributes:
- message: str - Error message
"""Raised for user-caused errors (invalid configuration, etc.).
class UserError(AgentsException):
"""
Raised for user errors.
Attributes:
- message: str - Error message
"""Exceptions raised when guardrails trigger.
class InputGuardrailTripwireTriggered(AgentsException):
"""
Raised when input guardrail trips.
Attributes:
- guardrail_result: InputGuardrailResult - Guardrail result with details
"""
class OutputGuardrailTripwireTriggered(AgentsException):
"""
Raised when output guardrail trips.
Attributes:
- guardrail_result: OutputGuardrailResult - Guardrail result with details
"""
class ToolInputGuardrailTripwireTriggered(AgentsException):
"""
Raised when tool input guardrail trips.
Attributes:
- guardrail: ToolInputGuardrail - The guardrail that tripped
- output: ToolGuardrailFunctionOutput - Check output with details
"""
class ToolOutputGuardrailTripwireTriggered(AgentsException):
"""
Raised when tool output guardrail trips.
Attributes:
- guardrail: ToolOutputGuardrail - The guardrail that tripped
- output: ToolGuardrailFunctionOutput - Check output with details
"""Usage example:
from agents import InputGuardrailTripwireTriggered, OutputGuardrailTripwireTriggered
try:
result = Runner.run_sync(agent, user_input)
except InputGuardrailTripwireTriggered as e:
print(f"Input blocked by: {e.guardrail_result.guardrail.get_name()}")
print(f"Reason: {e.guardrail_result.output.output_info}")
except OutputGuardrailTripwireTriggered as e:
print(f"Output blocked by: {e.guardrail_result.guardrail.get_name()}")
print(f"Reason: {e.guardrail_result.output.output_info}")Error context from failed runs.
class RunErrorDetails:
"""
Error data from agent run.
Attributes:
- input: str | list[TResponseInputItem] - Original input
- new_items: list[RunItem] - Items generated before error
- raw_responses: list[ModelResponse] - Responses before error
- last_agent: Agent - Last agent that ran
- context_wrapper: RunContextWrapper - Run context
- input_guardrail_results: list[InputGuardrailResult] - Input results
- output_guardrail_results: list[OutputGuardrailResult] - Output results
"""from agents import AgentsException, MaxTurnsExceeded, ModelBehaviorError
try:
result = Runner.run_sync(agent, user_input)
print(result.final_output)
except MaxTurnsExceeded as e:
print("Agent took too many turns, try simplifying the task")
except ModelBehaviorError as e:
print(f"Model error: {e.message}")
except AgentsException as e:
print(f"Agent error: {e}")
except Exception as e:
print(f"Unexpected error: {e}")from agents import (
InputGuardrailTripwireTriggered,
OutputGuardrailTripwireTriggered,
ToolInputGuardrailTripwireTriggered,
ToolOutputGuardrailTripwireTriggered
)
try:
result = Runner.run_sync(agent, user_input)
except InputGuardrailTripwireTriggered as e:
# Handle input rejection
return {"error": "Input not allowed", "reason": e.guardrail_result.output.output_info}
except OutputGuardrailTripwireTriggered as e:
# Handle output rejection
return {"error": "Output filtered", "reason": e.guardrail_result.output.output_info}
except (ToolInputGuardrailTripwireTriggered, ToolOutputGuardrailTripwireTriggered) as e:
# Handle tool guardrail
return {"error": "Tool call blocked", "reason": e.output.output_info}import asyncio
from agents import AgentsException
async def run_with_retry(agent, input, max_retries=3):
"""Run agent with retry logic."""
for attempt in range(max_retries):
try:
return await Runner.run(agent, input)
except MaxTurnsExceeded:
if attempt == max_retries - 1:
raise
# Increase max_turns on retry
max_turns = (attempt + 2) * 10
print(f"Retry {attempt + 1} with max_turns={max_turns}")
except ModelBehaviorError as e:
if attempt == max_retries - 1:
raise
# Wait before retry
await asyncio.sleep(2 ** attempt)try:
result = Runner.run_sync(agent, "Complex multi-step task")
except MaxTurnsExceeded as e:
# Access partial results
if e.run_data:
print(f"Generated {len(e.run_data.new_items)} items before error")
# Use partial results
for item in e.run_data.new_items:
if isinstance(item, MessageOutputItem):
print(f"Partial output: {item}")final_output_as() for type-safe output accessrelease_agents() for long-running applicationsInstall with Tessl CLI
npx tessl i tessl/pypi-openai-agents