Lightweight framework for building multi-agent workflows with LLMs, supporting handoffs, guardrails, tools, and 100+ LLM providers
Guardrails provide configurable safety checks for agent inputs, outputs, and tool usage. They enable content filtering, validation, and safety enforcement with tripwire mechanisms that can halt execution when violations are detected.
Validate and check agent inputs before processing.
class InputGuardrail[TContext]:
"""
Input guardrail for checking agent input.
Type Parameters:
- TContext: Type of context object
Attributes:
- guardrail_function: Callable - Guardrail logic
- name: str | None - Guardrail name for identification
- run_in_parallel: bool - Run concurrently with agent
"""
def get_name() -> str:
"""
Get guardrail name.
Returns:
- str: Guardrail name (function name if not specified)
"""
async def run(
agent: Agent,
input: str | list[TResponseInputItem],
context: TContext | None
) -> InputGuardrailResult:
"""
Execute guardrail check.
Parameters:
- agent: Agent receiving input
- input: Input to check
- context: Context object
Returns:
- InputGuardrailResult: Guardrail result
"""
class InputGuardrailResult:
"""
Result of input guardrail.
Attributes:
- guardrail: InputGuardrail - The guardrail that ran
- output: GuardrailFunctionOutput - Check output
"""
def input_guardrail(
func: Callable | None = None,
*,
name: str | None = None,
run_in_parallel: bool = False
):
"""
Decorator to create InputGuardrail from function.
Parameters:
- func: Function to wrap
- name: Custom guardrail name
- run_in_parallel: Run concurrently with agent
Returns:
- InputGuardrail or decorator
"""Usage example:
from agents import Agent, Runner, input_guardrail, GuardrailFunctionOutput
@input_guardrail
def content_filter(input: str) -> GuardrailFunctionOutput:
"""Filter inappropriate content from input."""
if "offensive_word" in input.lower():
return GuardrailFunctionOutput(
output_info={"reason": "Inappropriate content detected"},
tripwire_triggered=True
)
return GuardrailFunctionOutput(
output_info={"status": "clean"},
tripwire_triggered=False
)
@input_guardrail(name="length_check", run_in_parallel=True)
async def check_input_length(input: str) -> GuardrailFunctionOutput:
"""Check if input is too long."""
if len(input) > 1000:
return GuardrailFunctionOutput(
output_info={"length": len(input), "max": 1000},
tripwire_triggered=True
)
return GuardrailFunctionOutput(
output_info={"length": len(input)},
tripwire_triggered=False
)
agent = Agent(
name="Assistant",
instructions="Help users.",
input_guardrails=[content_filter, check_input_length]
)
try:
result = Runner.run_sync(agent, "User input here")
except InputGuardrailTripwireTriggered as e:
print(f"Input blocked: {e.guardrail_result.output.output_info}")Validate and check agent outputs before returning to user.
class OutputGuardrail[TContext]:
"""
Output guardrail for checking agent output.
Type Parameters:
- TContext: Type of context object
Attributes:
- guardrail_function: Callable - Guardrail logic
- name: str | None - Guardrail name for identification
"""
def get_name() -> str:
"""
Get guardrail name.
Returns:
- str: Guardrail name
"""
async def run(
context: TContext | None,
agent: Agent,
agent_output: Any
) -> OutputGuardrailResult:
"""
Execute guardrail check.
Parameters:
- context: Context object
- agent: Agent that produced output
- agent_output: Output to check
Returns:
- OutputGuardrailResult: Guardrail result
"""
class OutputGuardrailResult:
"""
Result of output guardrail.
Attributes:
- guardrail: OutputGuardrail - The guardrail that ran
- agent_output: Any - The checked output
- agent: Agent - Agent that produced output
- output: GuardrailFunctionOutput - Check output
"""
def output_guardrail(
func: Callable | None = None,
*,
name: str | None = None
):
"""
Decorator to create OutputGuardrail from function.
Parameters:
- func: Function to wrap
- name: Custom guardrail name
Returns:
- OutputGuardrail or decorator
"""Usage example:
from agents import Agent, Runner, output_guardrail, GuardrailFunctionOutput
@output_guardrail
def pii_filter(output: str) -> GuardrailFunctionOutput:
"""Filter personally identifiable information from output."""
if contains_email(output) or contains_phone(output):
return GuardrailFunctionOutput(
output_info={"reason": "PII detected in output"},
tripwire_triggered=True
)
return GuardrailFunctionOutput(
output_info={"status": "clean"},
tripwire_triggered=False
)
@output_guardrail(name="quality_check")
async def quality_validator(output: str) -> GuardrailFunctionOutput:
"""Validate output quality."""
if len(output) < 10:
return GuardrailFunctionOutput(
output_info={"reason": "Output too short"},
tripwire_triggered=True
)
return GuardrailFunctionOutput(
output_info={"quality_score": calculate_quality(output)},
tripwire_triggered=False
)
agent = Agent(
name="Assistant",
instructions="Provide helpful responses.",
output_guardrails=[pii_filter, quality_validator]
)
try:
result = Runner.run_sync(agent, "Tell me about yourself")
print(result.final_output)
except OutputGuardrailTripwireTriggered as e:
print(f"Output blocked: {e.guardrail_result.output.output_info}")Result type for guardrail checks.
class GuardrailFunctionOutput:
"""
Output of guardrail function.
Attributes:
- output_info: Any - Information about checks performed
- tripwire_triggered: bool - Whether to halt execution
"""Usage example:
from agents import GuardrailFunctionOutput
def create_guardrail_result(is_safe: bool, details: dict):
"""Helper to create guardrail results."""
return GuardrailFunctionOutput(
output_info=details,
tripwire_triggered=not is_safe
)
@input_guardrail
def safety_check(input: str) -> GuardrailFunctionOutput:
is_safe, details = perform_safety_check(input)
return create_guardrail_result(is_safe, details)Guardrails for tool inputs before execution.
class ToolInputGuardrail[TContext]:
"""
Guardrail for tool input.
Type Parameters:
- TContext: Type of context object
Attributes:
- guardrail_function: Callable - Guardrail logic
- name: str | None - Guardrail name
"""
def get_name() -> str:
"""
Get guardrail name.
Returns:
- str: Guardrail name
"""
async def run(data: ToolInputGuardrailData) -> ToolGuardrailFunctionOutput:
"""
Execute guardrail check.
Parameters:
- data: Tool input data
Returns:
- ToolGuardrailFunctionOutput: Check result
"""
class ToolInputGuardrailResult:
"""
Result of tool input guardrail.
Attributes:
- guardrail: ToolInputGuardrail - The guardrail that ran
- output: ToolGuardrailFunctionOutput - Check output
"""
class ToolInputGuardrailData:
"""
Data for tool input guardrail.
Attributes:
- context: TContext | None - Context object
- agent: Agent - Current agent
"""
def tool_input_guardrail(
func: Callable | None = None,
*,
name: str | None = None
):
"""
Create ToolInputGuardrail from function.
Parameters:
- func: Function to wrap
- name: Custom guardrail name
Returns:
- ToolInputGuardrail or decorator
"""Usage example:
from agents import function_tool, tool_input_guardrail, ToolGuardrailFunctionOutput
@tool_input_guardrail
def validate_file_path(data) -> ToolGuardrailFunctionOutput:
"""Ensure file operations are within allowed directories."""
tool_input = data.tool_input
if "path" in tool_input:
path = tool_input["path"]
if not is_safe_path(path):
return ToolGuardrailFunctionOutput.reject_content(
message="File path not allowed",
output_info={"path": path}
)
return ToolGuardrailFunctionOutput.allow(
output_info={"validation": "passed"}
)
@function_tool
def read_file(path: str) -> str:
"""Read file contents."""
with open(path) as f:
return f.read()
# Apply guardrail to tool
read_file.tool_input_guardrails = [validate_file_path]
agent = Agent(
name="File Agent",
tools=[read_file]
)Guardrails for tool outputs after execution.
class ToolOutputGuardrail[TContext]:
"""
Guardrail for tool output.
Type Parameters:
- TContext: Type of context object
Attributes:
- guardrail_function: Callable - Guardrail logic
- name: str | None - Guardrail name
"""
def get_name() -> str:
"""
Get guardrail name.
Returns:
- str: Guardrail name
"""
async def run(data: ToolOutputGuardrailData) -> ToolGuardrailFunctionOutput:
"""
Execute guardrail check.
Parameters:
- data: Tool output data
Returns:
- ToolGuardrailFunctionOutput: Check result
"""
class ToolOutputGuardrailResult:
"""
Result of tool output guardrail.
Attributes:
- guardrail: ToolOutputGuardrail - The guardrail that ran
- output: ToolGuardrailFunctionOutput - Check output
"""
class ToolOutputGuardrailData:
"""
Data for tool output guardrail.
Extends ToolInputGuardrailData with output.
Attributes:
- context: TContext | None - Context object
- agent: Agent - Current agent
- output: Any - Tool output to check
"""
def tool_output_guardrail(
func: Callable | None = None,
*,
name: str | None = None
):
"""
Create ToolOutputGuardrail from function.
Parameters:
- func: Function to wrap
- name: Custom guardrail name
Returns:
- ToolOutputGuardrail or decorator
"""Usage example:
from agents import function_tool, tool_output_guardrail, ToolGuardrailFunctionOutput
@tool_output_guardrail
def sanitize_output(data) -> ToolGuardrailFunctionOutput:
"""Remove sensitive data from tool output."""
output = data.output
if isinstance(output, str) and contains_secrets(output):
return ToolGuardrailFunctionOutput.reject_content(
message="Tool output contains sensitive data",
output_info={"sanitized": True}
)
return ToolGuardrailFunctionOutput.allow(
output_info={"check": "passed"}
)
@function_tool
def fetch_data(query: str) -> dict:
"""Fetch data from API."""
return api_call(query)
# Apply guardrail to tool
fetch_data.tool_output_guardrails = [sanitize_output]
agent = Agent(
name="Data Agent",
tools=[fetch_data]
)Result type for tool guardrail checks with behavior specification.
class ToolGuardrailFunctionOutput:
"""
Output of tool guardrail function.
Attributes:
- output_info: Any - Check information
- behavior: RejectContentBehavior | RaiseExceptionBehavior | AllowBehavior - Response behavior
"""
@classmethod
def allow(output_info: Any) -> ToolGuardrailFunctionOutput:
"""
Allow normal execution.
Parameters:
- output_info: Information about check
Returns:
- ToolGuardrailFunctionOutput: Allow result
"""
@classmethod
def reject_content(
message: str,
output_info: Any
) -> ToolGuardrailFunctionOutput:
"""
Reject with message to LLM.
Parameters:
- message: Message explaining rejection
- output_info: Information about check
Returns:
- ToolGuardrailFunctionOutput: Reject result
"""
@classmethod
def raise_exception(output_info: Any) -> ToolGuardrailFunctionOutput:
"""
Halt execution with exception.
Parameters:
- output_info: Information about check
Returns:
- ToolGuardrailFunctionOutput: Exception result
"""Behavior types:
class RejectContentBehavior:
"""
Reject tool call behavior.
Attributes:
- type: Literal["reject_content"]
- message: str - Rejection message for LLM
"""
class RaiseExceptionBehavior:
"""
Raise exception behavior.
Attributes:
- type: Literal["raise_exception"]
"""
class AllowBehavior:
"""
Allow normal execution behavior.
Attributes:
- type: Literal["allow"]
"""Usage example:
from agents import tool_input_guardrail, ToolGuardrailFunctionOutput
@tool_input_guardrail
def validate_api_call(data) -> ToolGuardrailFunctionOutput:
"""Validate API calls against rate limits."""
if rate_limit_exceeded():
# Reject with message to LLM
return ToolGuardrailFunctionOutput.reject_content(
message="Rate limit exceeded. Please try again later.",
output_info={"rate_limit": True}
)
if critical_failure():
# Halt execution completely
return ToolGuardrailFunctionOutput.raise_exception(
output_info={"error": "Critical failure"}
)
# Allow execution
return ToolGuardrailFunctionOutput.allow(
output_info={"status": "ok"}
)Exceptions raised when guardrail tripwires are triggered.
class InputGuardrailTripwireTriggered(AgentsException):
"""
Raised when input guardrail trips.
Attributes:
- guardrail_result: InputGuardrailResult - Guardrail result
"""
class OutputGuardrailTripwireTriggered(AgentsException):
"""
Raised when output guardrail trips.
Attributes:
- guardrail_result: OutputGuardrailResult - Guardrail result
"""
class ToolInputGuardrailTripwireTriggered(AgentsException):
"""
Raised when tool input guardrail trips.
Attributes:
- guardrail: ToolInputGuardrail - The guardrail
- output: ToolGuardrailFunctionOutput - Check output
"""
class ToolOutputGuardrailTripwireTriggered(AgentsException):
"""
Raised when tool output guardrail trips.
Attributes:
- guardrail: ToolOutputGuardrail - The guardrail
- output: ToolGuardrailFunctionOutput - Check output
"""Apply guardrails across all agents in a run.
from agents import Agent, Runner, RunConfig, input_guardrail, output_guardrail
@input_guardrail
def global_input_filter(input: str):
"""Apply to all agents in run."""
...
@output_guardrail
def global_output_filter(output: str):
"""Apply to all agents in run."""
...
config = RunConfig(
input_guardrails=[global_input_filter],
output_guardrails=[global_output_filter]
)
result = Runner.run_sync(agent, "Hello", run_config=config)Run input guardrails in parallel with agent processing.
from agents import input_guardrail
@input_guardrail(run_in_parallel=True)
async def async_moderation_check(input: str):
"""
Run moderation API call in parallel.
Results checked before returning final output.
"""
result = await moderation_api.check(input)
if result.flagged:
return GuardrailFunctionOutput(
output_info=result.categories,
tripwire_triggered=True
)
return GuardrailFunctionOutput(
output_info={"safe": True},
tripwire_triggered=False
)
agent = Agent(
name="Assistant",
input_guardrails=[async_moderation_check]
)output_info for understanding guardrail decisionsreject_content for recoverable issues, raise_exception for critical failuresrun_in_parallel=True for expensive checks that don't block agent startupInstall with Tessl CLI
npx tessl i tessl/pypi-openai-agents