Core agent orchestration for running LLM-powered agents with tools, handoffs, and structured outputs.
The Agent class defines an LLM-powered agent with instructions, tools, and configuration.
/**
* Create an agent with instructions, tools, and configuration
*/
class Agent<TContext = any, TOutput = any> {
constructor(config: AgentConfiguration<TContext, TOutput>);
/**
* Create agent with automatic output type inference from handoffs
*/
static create<TOutput, Handoffs>(
config: AgentConfiguration
): Agent<any, ResolvedAgentOutput<TOutput, Handoffs>>;
/**
* Create a copy of this agent with modifications
*/
clone(config: Partial<AgentConfiguration<TContext, TOutput>>): Agent<TContext, TOutput>;
/**
* Transform this agent into a tool callable by other agents
*/
asTool(options?: AgentAsToolOptions): FunctionTool<TContext, Agent>;
/**
* Get the resolved system prompt for this agent
*/
getSystemPrompt(runContext: RunContext<TContext>): string | Promise<string>;
/**
* Get the resolved prompt template for this agent (OpenAI Responses API only)
*/
getPrompt(runContext: RunContext<TContext>): Prompt | Promise<Prompt>;
/**
* Fetch tools from MCP servers
*/
getMcpTools(runContext: RunContext<TContext>): Promise<FunctionTool[]>;
/**
* Get all configured tools (MCP + function tools)
*/
getAllTools(runContext: RunContext<TContext>): Promise<Tool[]>;
/**
* Get available handoffs for this agent
*/
getEnabledHandoffs(runContext: RunContext<TContext>): Promise<Handoff[]>;
/**
* Parse final output based on outputType configuration
*/
processFinalOutput(output: AgentOutputItem[]): ResolvedAgentOutput<TOutput>;
/**
* Serialize agent to JSON
*/
toJSON(): any;
// Properties
name: string;
outputSchemaName: string;
}
interface AgentConfiguration<TContext = any, TOutput = any> {
/** Agent identifier (required) */
name: string;
/** System prompt or function returning prompt */
instructions?: string | ((runContext: RunContext<TContext>, agent: Agent) => string | Promise<string>);
/** Prompt template for OpenAI Responses API */
prompt?: Prompt | ((runContext: RunContext<TContext>, agent: Agent) => Prompt | Promise<Prompt>);
/** Description shown when agent is used as handoff target */
handoffDescription?: string;
/** Sub-agents this agent can handoff to */
handoffs?: (Agent | Handoff)[];
/** Model to use (defaults to gpt-4.1) */
model?: string | Model;
/** Model tuning parameters */
modelSettings?: ModelSettings;
/** Available tools */
tools?: Tool[];
/** MCP protocol servers providing tools */
mcpServers?: MCPServer[];
/** Input validation checks */
inputGuardrails?: InputGuardrail[];
/** Output validation checks */
outputGuardrails?: OutputGuardrail<TOutput>[];
/** Expected output type (text/JSON/Zod schema/handoffs) */
outputType?: AgentOutputType<TOutput>;
/** How to handle tool use */
toolUseBehavior?: ToolUseBehavior;
/** Reset tool choice after use (default: true) */
resetToolChoice?: boolean;
}
interface AgentAsToolOptions {
/** Override tool name */
name?: string;
/** Override tool description */
description?: string;
/** Input schema (defaults to text input) */
inputType?: z.ZodType | JsonObjectSchema;
}Usage Examples:
import { Agent } from '@openai/agents';
// Simple agent
const agent = new Agent({
name: 'Assistant',
instructions: 'You are a helpful assistant',
});
// Agent with dynamic instructions
const agent = new Agent({
name: 'PersonalizedBot',
instructions: (runContext) => {
return `You are helping ${runContext.context.userName}`;
},
});
// Agent with structured output
import { z } from 'zod';
const agent = new Agent({
name: 'DataExtractor',
instructions: 'Extract structured data from text',
outputType: z.object({
name: z.string(),
age: z.number(),
}),
});
// Agent with handoffs
const specialistAgent = new Agent({
name: 'Specialist',
handoffDescription: 'Expert in specialized topics',
instructions: 'You are a specialist',
});
const mainAgent = Agent.create({
name: 'Main',
instructions: 'You are the main agent',
handoffs: [specialistAgent],
});
// Clone agent with modifications
const clonedAgent = agent.clone({
name: 'Modified Assistant',
modelSettings: { temperature: 0.7 },
});Convenience function to run an agent without creating a Runner instance.
/**
* Execute an agent with input and return the result
* @param agent - The agent to run
* @param input - Input text, message items, or run state
* @param options - Run configuration options
* @returns RunResult (non-streaming) or StreamedRunResult (streaming)
*/
function run<TContext = any, TOutput = any>(
agent: Agent<TContext, TOutput>,
input: string | AgentInputItem[] | RunState,
options?: IndividualRunOptions<TContext>
): Promise<RunResult<TOutput>> | StreamedRunResult<TOutput>;
interface IndividualRunOptions<TContext = any> {
/** User context object passed to tools and guardrails */
context?: TContext;
/** Maximum number of agent turns (default: no limit) */
maxTurns?: number;
/** AbortSignal for cancellation */
signal?: AbortSignal;
/** Enable streaming (returns StreamedRunResult) */
stream?: boolean;
/** Session for persistent conversation history */
session?: Session;
/** Override model */
model?: string | Model;
/** Override model settings */
modelSettings?: ModelSettings;
/** Custom runner instance */
runner?: Runner<TContext>;
}Usage Examples:
import { Agent, run } from '@openai/agents';
// Basic run
const agent = new Agent({
name: 'Assistant',
instructions: 'You are helpful',
});
const result = await run(agent, 'Hello!');
console.log(result.finalOutput);
// Run with context
const result = await run(agent, 'What is my name?', {
context: { userName: 'Alice' },
});
// Run with streaming
const streamResult = run(agent, 'Write a story', {
stream: true,
});
for await (const event of streamResult) {
if (event.type === 'message_output_created') {
console.log(event.content);
}
}
// Run with max turns
const result = await run(agent, 'Complex task', {
maxTurns: 10,
});
// Run with session
import { MemorySession } from '@openai/agents';
const session = new MemorySession();
const result1 = await run(agent, 'My name is Alice', { session });
const result2 = await run(agent, 'What is my name?', { session });Orchestrates agent execution with configuration, guardrails, tracing, and session management.
/**
* Orchestrate agent execution with global configuration
*/
class Runner<TContext = any> {
constructor(config?: RunConfig<TContext>);
/**
* Execute an agent with input and return the result
* @param agent - The agent to run
* @param input - Input text, message items, or run state
* @param options - Run configuration options
* @returns RunResult (non-streaming) or StreamedRunResult (streaming)
*/
run<TOutput = any>(
agent: Agent<TContext, TOutput>,
input: string | AgentInputItem[] | RunState,
options?: IndividualRunOptions<TContext>
): Promise<RunResult<TOutput>> | StreamedRunResult<TOutput>;
}
interface RunConfig<TContext = any> {
/** Override agent models */
model?: string | Model;
/** Provider for resolving model names */
modelProvider?: ModelProvider;
/** Global model settings override */
modelSettings?: ModelSettings;
/** Global handoff input filter */
handoffInputFilter?: HandoffInputFilter;
/** Global input guardrails */
inputGuardrails?: InputGuardrail[];
/** Global output guardrails */
outputGuardrails?: OutputGuardrail[];
/** Disable tracing */
tracingDisabled?: boolean;
/** Include sensitive data in traces */
traceIncludeSensitiveData?: boolean;
/** Trace workflow name */
workflowName?: string;
/** Custom trace ID */
traceId?: string;
/** Group multiple traces */
groupId?: string;
/** Additional trace metadata */
traceMetadata?: Record<string, string>;
/** Customize session history handling */
sessionInputCallback?: SessionInputCallback;
/** Filter/edit inputs before model call */
callModelInputFilter?: CallModelInputFilter;
}
type HandoffInputFilter = (input: HandoffInputData) => HandoffInputData;
type SessionInputCallback = (input: AgentInputItem[], session: Session) => Promise<AgentInputItem[]>;
type CallModelInputFilter = (request: ModelRequest, agent: Agent) => ModelRequest | Promise<ModelRequest>;Usage Examples:
import { Agent, Runner } from '@openai/agents';
// Create runner with global configuration
const runner = new Runner({
modelSettings: {
temperature: 0.7,
maxTokens: 2000,
},
tracingDisabled: false,
workflowName: 'MyWorkflow',
});
const agent = new Agent({
name: 'Assistant',
instructions: 'You are helpful',
});
// Run agent using runner
const result = await runner.run(agent, 'Hello!');
// Runner with global guardrails
import { InputGuardrail } from '@openai/agents';
const profanityGuardrail: InputGuardrail = {
name: 'profanity_check',
execute: async ({ input }) => {
const hasProfanity = false; // Check logic here
return {
tripwireTriggered: hasProfanity,
outputInfo: { filtered: true },
};
},
};
const runner = new Runner({
inputGuardrails: [profanityGuardrail],
});Result object from non-streaming agent execution.
/**
* Result of non-streaming agent execution
*/
interface RunResult<TOutput = any> {
/** Original input */
input: string | AgentInputItem[];
/** Model output items */
output: AgentOutputItem[];
/** Complete conversation history */
history: AgentInputItem[];
/** Run items with agent associations */
newItems: RunItem[];
/** Raw LLM responses */
rawResponses: ModelResponse[];
/** Last response identifier */
lastResponseId?: string;
/** Last executed agent */
lastAgent?: Agent;
/** Parsed final output based on agent outputType */
finalOutput?: ResolvedAgentOutput<TOutput>;
/** Tool approval requests */
interruptions: RunToolApprovalItem[];
/** Input guardrail results */
inputGuardrailResults: InputGuardrailResult[];
/** Output guardrail results */
outputGuardrailResults: OutputGuardrailResult[];
/** Internal run state (for resuming) */
state: RunState;
}
type ResolvedAgentOutput<TOutput> =
| string // Text output
| TOutput // Structured output
| Agent; // Handoff outputUsage Examples:
import { Agent, run } from '@openai/agents';
const agent = new Agent({
name: 'Assistant',
instructions: 'You are helpful',
});
const result = await run(agent, 'Hello!');
// Access final output
console.log(result.finalOutput);
// Access conversation history
console.log(result.history);
// Access last agent
console.log(result.lastAgent?.name);
// Check guardrail results
if (result.inputGuardrailResults.some(r => r.tripwireTriggered)) {
console.log('Input guardrail was triggered');
}
// Resume from state
const resumedResult = await run(agent, result.state);Result object from streaming agent execution with event iteration.
/**
* Result of streaming agent execution
* Implements AsyncIterable<RunStreamEvent>
*/
interface StreamedRunResult<TOutput = any> extends RunResult<TOutput> {
/** Currently executing agent */
currentAgent?: Agent;
/** Current turn number */
currentTurn: number;
/** Maximum turns allowed */
maxTurns?: number;
/** Whether stream was cancelled */
cancelled: boolean;
/** Error if occurred */
error?: unknown;
/** Promise that resolves when stream completes */
completed: Promise<void>;
/**
* Get ReadableStream of events
*/
toStream(): ReadableStream<RunStreamEvent>;
/**
* Get stream of text output only
*/
toTextStream(options?: TextStreamOptions): ReadableStream<string>;
/**
* Iterate over events
*/
[Symbol.asyncIterator](): AsyncIterator<RunStreamEvent>;
}
interface TextStreamOptions {
/** Include reasoning in text stream */
includeReasoning?: boolean;
}
type RunStreamEvent =
| RunRawModelStreamEvent
| RunItemStreamEvent
| RunAgentUpdatedStreamEvent;
interface RunItemStreamEvent {
type:
| 'message_output_created'
| 'tool_called'
| 'tool_call_completed'
| 'reasoning_created'
| 'handoff_called'
| 'handoff_completed'
| 'tool_approval_requested';
item: RunItem;
agent: Agent;
}
interface RunAgentUpdatedStreamEvent {
type: 'agent_updated';
agent: Agent;
}Usage Examples:
import { Agent, run } from '@openai/agents';
const agent = new Agent({
name: 'Assistant',
instructions: 'You are helpful',
});
// Stream with async iteration
const streamResult = run(agent, 'Write a story', {
stream: true,
});
for await (const event of streamResult) {
if (event.type === 'message_output_created') {
console.log('Message:', event.item.message);
} else if (event.type === 'tool_called') {
console.log('Tool called:', event.item.toolCall.name);
}
}
// Wait for completion
await streamResult.completed;
console.log('Final output:', streamResult.finalOutput);
// Use ReadableStream
const stream = streamResult.toStream();
const reader = stream.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
console.log('Event:', value);
}
// Text-only stream
const textStream = streamResult.toTextStream();
const textReader = textStream.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
process.stdout.write(value);
}Context object passed to tools, guardrails, and callbacks.
/**
* Context passed to tools, guardrails, and callbacks during agent execution
*/
class RunContext<TContext = any> {
/** User-provided context object */
context: TContext;
/** Token usage statistics */
usage: Usage;
/**
* Check if a tool call has been approved
*/
isToolApproved(options: { toolName: string; callId: string }): boolean;
/**
* Approve a tool call
*/
approveTool(
approvalItem: RunToolApprovalItem,
options?: { alwaysApprove?: boolean }
): Promise<void>;
/**
* Reject a tool call
*/
rejectTool(
approvalItem: RunToolApprovalItem,
options?: { alwaysReject?: boolean }
): Promise<void>;
/**
* Serialize context to JSON
*/
toJSON(): any;
}Usage Examples:
import { z } from 'zod';
import { Agent, run, tool } from '@openai/agents';
const approvedTool = tool({
name: 'sensitive_operation',
description: 'Perform sensitive operation',
parameters: z.object({}),
needsApproval: true,
execute: async (input, context) => {
// Access user context
console.log('User:', context.context.userName);
// Check usage
console.log('Tokens used:', context.usage.totalTokens);
return 'Operation completed';
},
});
const agent = new Agent({
name: 'Assistant',
instructions: 'You are helpful',
tools: [approvedTool],
});
const result = await run(agent, 'Do the operation', {
context: { userName: 'Alice' },
});
// Handle approvals
if (result.interruptions.length > 0) {
const approval = result.interruptions[0];
await result.state.runContext.approveTool(approval);
// Resume with approval
const resumed = await run(agent, result.state);
}Classes representing items in agent execution history.
/**
* Assistant message output
*/
class RunMessageOutputItem {
agent: Agent;
message: AssistantMessageItem;
}
/**
* Tool invocation
*/
class RunToolCallItem {
agent: Agent;
toolCall: FunctionCallItem | ComputerUseCallItem | ShellCallItem | ApplyPatchCallItem;
}
/**
* Tool result
*/
class RunToolCallOutputItem {
agent: Agent;
toolCallOutput: FunctionCallResultItem | ComputerCallResultItem | ShellCallResultItem | ApplyPatchCallResultItem;
}
/**
* Reasoning content
*/
class RunReasoningItem {
agent: Agent;
reasoning: ReasoningItem;
}
/**
* Handoff invocation
*/
class RunHandoffCallItem {
agent: Agent;
handoffCall: FunctionCallItem;
}
/**
* Handoff result
*/
class RunHandoffOutputItem {
agent: Agent;
handoffOutput: FunctionCallResultItem;
}
/**
* Tool approval request
*/
class RunToolApprovalItem {
agent: Agent;
toolCall: FunctionCallItem;
approved: boolean | null;
}
/**
* Extract all text from run items
*/
function extractAllTextOutput(items: (AgentOutputItem | RunItem)[]): string;Usage Examples:
import { Agent, run, extractAllTextOutput } from '@openai/agents';
const agent = new Agent({
name: 'Assistant',
instructions: 'You are helpful',
});
const result = await run(agent, 'Hello!');
// Iterate over run items
for (const item of result.newItems) {
if (item instanceof RunMessageOutputItem) {
console.log('Message:', item.message.content);
} else if (item instanceof RunToolCallItem) {
console.log('Tool call:', item.toolCall.name);
} else if (item instanceof RunToolCallOutputItem) {
console.log('Tool result:', item.toolCallOutput.result);
}
}
// Extract all text
const allText = extractAllTextOutput(result.output);
console.log('All output text:', allText);The Agent class extends AgentHooks, providing lifecycle event hooks for monitoring agent execution.
/**
* Event emitter for agent lifecycle events
*/
class AgentHooks<TContext = any, TOutput = any> extends EventEmitterDelegate<AgentHookEvents<TContext, TOutput>> {
on<K extends keyof AgentHookEvents>(event: K, handler: (...args: AgentHookEvents[K]) => void): void;
off<K extends keyof AgentHookEvents>(event: K, handler: (...args: AgentHookEvents[K]) => void): void;
emit<K extends keyof AgentHookEvents>(event: K, ...args: AgentHookEvents[K]): void;
}
type AgentHookEvents<TContext = any, TOutput = any> = {
/** Emitted when agent starts execution */
agent_start: [context: RunContext<TContext>, agent: Agent<TContext, TOutput>];
/** Emitted when agent completes execution */
agent_end: [context: RunContext<TContext>, output: string];
/** Emitted when agent hands off to another agent */
agent_handoff: [context: RunContext<TContext>, nextAgent: Agent<any, any>];
/** Emitted when agent starts executing a tool */
agent_tool_start: [
context: RunContext<TContext>,
tool: Tool<any>,
details: { toolCall: ToolCallItem }
];
/** Emitted when agent finishes executing a tool */
agent_tool_end: [
context: RunContext<TContext>,
tool: Tool<any>,
result: string,
details: { toolCall: ToolCallItem }
];
};Usage Examples:
import { Agent, run } from '@openai/agents';
const agent = new Agent({
name: 'MonitoredAgent',
instructions: 'You are a helpful assistant',
});
// Listen to agent start event
agent.on('agent_start', (context, agent) => {
console.log(`Agent ${agent.name} started`);
});
// Listen to agent end event
agent.on('agent_end', (context, output) => {
console.log(`Agent completed with output: ${output}`);
});
// Listen to tool execution
agent.on('agent_tool_start', (context, tool, details) => {
console.log(`Tool ${tool.name} starting`);
});
agent.on('agent_tool_end', (context, tool, result, details) => {
console.log(`Tool ${tool.name} completed with result: ${result}`);
});
// Listen to handoffs
agent.on('agent_handoff', (context, nextAgent) => {
console.log(`Handing off to agent: ${nextAgent.name}`);
});
const result = await run(agent, 'Hello!');