CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-openai-agents

Lightweight framework for building multi-agent workflows with LLMs, supporting handoffs, guardrails, tools, and 100+ LLM providers

Overview
Eval results
Files

lifecycle.mddocs/

Lifecycle Hooks

Lifecycle hooks provide callbacks at key points during agent execution, enabling observability, logging, custom metrics, and workflow control. The SDK offers two types of hooks: run-level hooks for workflow-wide events and agent-level hooks for individual agent events.

Capabilities

Run Hooks

Lifecycle hooks for entire agent runs, tracking all agents in a workflow.

class RunHooks[TContext]:
    """
    Lifecycle hooks for agent runs.

    Type Parameters:
    - TContext: Type of context object

    All methods are async and optional to override.
    """

    async def on_llm_start(
        context: TContext | None,
        agent: Agent,
        system_prompt: str | None,
        input_items: list[TResponseInputItem]
    ) -> None:
        """
        Called before LLM call.

        Parameters:
        - context: Context object
        - agent: Current agent
        - system_prompt: System prompt being used
        - input_items: Input items for LLM
        """

    async def on_llm_end(
        context: TContext | None,
        agent: Agent,
        response: ModelResponse
    ) -> None:
        """
        Called after LLM call.

        Parameters:
        - context: Context object
        - agent: Current agent
        - response: LLM response
        """

    async def on_agent_start(
        context: TContext | None,
        agent: Agent
    ) -> None:
        """
        Called before agent invoked.

        Parameters:
        - context: Context object
        - agent: Agent starting
        """

    async def on_agent_end(
        context: TContext | None,
        agent: Agent,
        output: Any
    ) -> None:
        """
        Called when agent produces output.

        Parameters:
        - context: Context object
        - agent: Agent ending
        - output: Agent output
        """

    async def on_handoff(
        context: TContext | None,
        from_agent: Agent,
        to_agent: Agent
    ) -> None:
        """
        Called when handoff occurs.

        Parameters:
        - context: Context object
        - from_agent: Source agent
        - to_agent: Target agent
        """

    async def on_tool_start(
        context: TContext | None,
        agent: Agent,
        tool: Tool
    ) -> None:
        """
        Called before tool invoked.

        Parameters:
        - context: Context object
        - agent: Agent using tool
        - tool: Tool being invoked
        """

    async def on_tool_end(
        context: TContext | None,
        agent: Agent,
        tool: Tool,
        result: Any
    ) -> None:
        """
        Called after tool invoked.

        Parameters:
        - context: Context object
        - agent: Agent that used tool
        - tool: Tool that was invoked
        - result: Tool result
        """

Usage example:

from agents import RunHooks, Runner
import logging

class LoggingHooks(RunHooks):
    """Log all agent operations."""

    async def on_agent_start(self, context, agent):
        logging.info(f"Agent started: {agent.name}")

    async def on_agent_end(self, context, agent, output):
        logging.info(f"Agent ended: {agent.name}, output: {output}")

    async def on_tool_start(self, context, agent, tool):
        logging.info(f"Tool called: {tool.name}")

    async def on_tool_end(self, context, agent, tool, result):
        logging.info(f"Tool result: {result}")

    async def on_handoff(self, context, from_agent, to_agent):
        logging.info(f"Handoff: {from_agent.name} -> {to_agent.name}")

    async def on_llm_start(self, context, agent, system_prompt, input_items):
        logging.info(f"LLM call for {agent.name}")

    async def on_llm_end(self, context, agent, response):
        logging.info(f"LLM response: {response.usage.total_tokens} tokens")

hooks = LoggingHooks()
result = await Runner.run(agent, "Hello", hooks=hooks)

Agent Hooks

Lifecycle hooks for specific agents.

class AgentHooks[TContext]:
    """
    Lifecycle hooks for specific agent.

    Type Parameters:
    - TContext: Type of context object

    All methods are async and optional to override.
    """

    async def on_start(
        context: TContext | None,
        agent: Agent
    ) -> None:
        """
        Called when agent starts.

        Parameters:
        - context: Context object
        - agent: This agent
        """

    async def on_end(
        context: TContext | None,
        agent: Agent,
        output: Any
    ) -> None:
        """
        Called when agent produces output.

        Parameters:
        - context: Context object
        - agent: This agent
        - output: Agent output
        """

    async def on_handoff(
        context: TContext | None,
        agent: Agent,
        source: Agent
    ) -> None:
        """
        Called when handed off to this agent.

        Parameters:
        - context: Context object
        - agent: This agent (target)
        - source: Source agent
        """

    async def on_tool_start(
        context: TContext | None,
        agent: Agent,
        tool: Tool
    ) -> None:
        """
        Called before tool invoked by this agent.

        Parameters:
        - context: Context object
        - agent: This agent
        - tool: Tool being invoked
        """

    async def on_tool_end(
        context: TContext | None,
        agent: Agent,
        tool: Tool,
        result: Any
    ) -> None:
        """
        Called after tool invoked by this agent.

        Parameters:
        - context: Context object
        - agent: This agent
        - tool: Tool that was invoked
        - result: Tool result
        """

    async def on_llm_start(
        context: TContext | None,
        agent: Agent,
        system_prompt: str | None,
        input_items: list[TResponseInputItem]
    ) -> None:
        """
        Called before LLM call for this agent.

        Parameters:
        - context: Context object
        - agent: This agent
        - system_prompt: System prompt
        - input_items: Input items
        """

    async def on_llm_end(
        context: TContext | None,
        agent: Agent,
        response: ModelResponse
    ) -> None:
        """
        Called after LLM call for this agent.

        Parameters:
        - context: Context object
        - agent: This agent
        - response: LLM response
        """

Usage example:

from agents import Agent, AgentHooks

class SpecialistHooks(AgentHooks):
    """Hooks for specialist agent."""

    async def on_start(self, context, agent):
        print(f"{agent.name} is starting work")
        # Initialize resources
        context.start_time = time.time()

    async def on_end(self, context, agent, output):
        duration = time.time() - context.start_time
        print(f"{agent.name} completed in {duration:.2f}s")

    async def on_tool_start(self, context, agent, tool):
        print(f"Using tool: {tool.name}")

specialist = Agent(
    name="Specialist",
    instructions="Provide expert analysis",
    hooks=SpecialistHooks()
)

Hook Patterns

Metrics Collection

from agents import RunHooks
import time

class MetricsHooks(RunHooks):
    """Collect metrics during agent execution."""

    def __init__(self):
        self.metrics = {
            "llm_calls": 0,
            "tool_calls": 0,
            "handoffs": 0,
            "total_tokens": 0,
            "duration": 0
        }
        self.start_time = None

    async def on_agent_start(self, context, agent):
        self.start_time = time.time()

    async def on_agent_end(self, context, agent, output):
        if self.start_time:
            self.metrics["duration"] = time.time() - self.start_time

    async def on_llm_end(self, context, agent, response):
        self.metrics["llm_calls"] += 1
        self.metrics["total_tokens"] += response.usage.total_tokens

    async def on_tool_end(self, context, agent, tool, result):
        self.metrics["tool_calls"] += 1

    async def on_handoff(self, context, from_agent, to_agent):
        self.metrics["handoffs"] += 1

# Use hooks
hooks = MetricsHooks()
result = await Runner.run(agent, "Task", hooks=hooks)
print(f"Metrics: {hooks.metrics}")

Cost Tracking

class CostTrackingHooks(RunHooks):
    """Track LLM costs during execution."""

    COST_PER_1K_TOKENS = {
        "gpt-4o": {"input": 0.0025, "output": 0.01},
        "gpt-4o-mini": {"input": 0.00015, "output": 0.0006}
    }

    def __init__(self):
        self.total_cost = 0.0

    async def on_llm_end(self, context, agent, response):
        model = agent.model or "gpt-4o"
        costs = self.COST_PER_1K_TOKENS.get(model, {"input": 0, "output": 0})

        input_cost = (response.usage.input_tokens / 1000) * costs["input"]
        output_cost = (response.usage.output_tokens / 1000) * costs["output"]
        self.total_cost += input_cost + output_cost

        print(f"Call cost: ${input_cost + output_cost:.4f}")

hooks = CostTrackingHooks()
result = await Runner.run(agent, "Task", hooks=hooks)
print(f"Total cost: ${hooks.total_cost:.4f}")

Audit Logging

class AuditHooks(RunHooks):
    """Audit log for compliance."""

    def __init__(self, user_id: str):
        self.user_id = user_id
        self.audit_log = []

    async def on_agent_start(self, context, agent):
        self.audit_log.append({
            "timestamp": time.time(),
            "event": "agent_start",
            "agent": agent.name,
            "user": self.user_id
        })

    async def on_tool_start(self, context, agent, tool):
        self.audit_log.append({
            "timestamp": time.time(),
            "event": "tool_call",
            "agent": agent.name,
            "tool": tool.name,
            "user": self.user_id
        })

    async def on_llm_start(self, context, agent, system_prompt, input_items):
        self.audit_log.append({
            "timestamp": time.time(),
            "event": "llm_call",
            "agent": agent.name,
            "user": self.user_id
        })

    def save_audit_log(self, filename):
        import json
        with open(filename, 'w') as f:
            json.dump(self.audit_log, f, indent=2)

hooks = AuditHooks(user_id="user_123")
result = await Runner.run(agent, "Task", hooks=hooks)
hooks.save_audit_log("audit.json")

Error Handling

class ErrorHandlingHooks(RunHooks):
    """Capture and handle errors during execution."""

    def __init__(self):
        self.errors = []

    async def on_tool_end(self, context, agent, tool, result):
        # Check for tool errors
        if isinstance(result, Exception):
            self.errors.append({
                "tool": tool.name,
                "error": str(result),
                "agent": agent.name
            })

    async def on_llm_end(self, context, agent, response):
        # Check for refusals
        for item in response.output:
            if hasattr(item, 'refusal') and item.refusal:
                self.errors.append({
                    "type": "refusal",
                    "message": item.refusal,
                    "agent": agent.name
                })

hooks = ErrorHandlingHooks()
try:
    result = await Runner.run(agent, "Task", hooks=hooks)
finally:
    if hooks.errors:
        print(f"Errors encountered: {hooks.errors}")

Rate Limiting

import asyncio
from collections import deque

class RateLimitingHooks(RunHooks):
    """Implement rate limiting for LLM calls."""

    def __init__(self, max_calls_per_minute=10):
        self.max_calls = max_calls_per_minute
        self.calls = deque()

    async def on_llm_start(self, context, agent, system_prompt, input_items):
        now = time.time()

        # Remove calls older than 1 minute
        while self.calls and now - self.calls[0] > 60:
            self.calls.popleft()

        # Wait if at rate limit
        if len(self.calls) >= self.max_calls:
            wait_time = 60 - (now - self.calls[0])
            print(f"Rate limit reached, waiting {wait_time:.1f}s")
            await asyncio.sleep(wait_time)

        self.calls.append(now)

hooks = RateLimitingHooks(max_calls_per_minute=5)
result = await Runner.run(agent, "Task", hooks=hooks)

Progress Reporting

class ProgressHooks(RunHooks):
    """Report progress during execution."""

    def __init__(self):
        self.steps = []

    async def on_agent_start(self, context, agent):
        self.steps.append(f"Starting {agent.name}")
        print(f"[{len(self.steps)}] Starting {agent.name}")

    async def on_tool_start(self, context, agent, tool):
        self.steps.append(f"Calling {tool.name}")
        print(f"[{len(self.steps)}] Calling {tool.name}")

    async def on_handoff(self, context, from_agent, to_agent):
        self.steps.append(f"Handoff: {from_agent.name} -> {to_agent.name}")
        print(f"[{len(self.steps)}] Handoff to {to_agent.name}")

    async def on_agent_end(self, context, agent, output):
        self.steps.append(f"Completed {agent.name}")
        print(f"[{len(self.steps)}] Completed {agent.name}")

hooks = ProgressHooks()
result = await Runner.run(agent, "Task", hooks=hooks)
print(f"Total steps: {len(hooks.steps)}")

Combining Hooks

Use multiple hooks together:

class CombinedHooks(RunHooks):
    """Combine multiple hook behaviors."""

    def __init__(self):
        self.metrics_hooks = MetricsHooks()
        self.cost_hooks = CostTrackingHooks()
        self.audit_hooks = AuditHooks("user_123")

    async def on_llm_end(self, context, agent, response):
        # Call all sub-hooks
        await self.metrics_hooks.on_llm_end(context, agent, response)
        await self.cost_hooks.on_llm_end(context, agent, response)
        await self.audit_hooks.on_llm_end(context, agent, response)

    # Implement other methods similarly...

hooks = CombinedHooks()
result = await Runner.run(agent, "Task", hooks=hooks)

Best Practices

  1. Keep Hooks Lightweight: Avoid heavy operations in hooks to prevent slowing down execution
  2. Error Handling: Handle exceptions in hooks to prevent breaking agent execution
  3. Async Operations: Use async properly for I/O operations in hooks
  4. Selective Overrides: Only override methods you need
  5. State Management: Store state in hook instance variables
  6. Composability: Design hooks to be composable with others
  7. Testing: Test hooks independently from agents
  8. Documentation: Document what each hook does for team members
  9. Performance: Monitor hook performance impact
  10. Logging: Use appropriate log levels in hooks

Install with Tessl CLI

npx tessl i tessl/pypi-openai-agents

docs

core-agents.md

guardrails.md

handoffs.md

index.md

items-streaming.md

lifecycle.md

mcp.md

memory-sessions.md

model-providers.md

realtime.md

results-exceptions.md

tools.md

tracing.md

voice-pipeline.md

tile.json