Configure and use different model providers.
Interface for implementing model providers.
/**
* Model provider interface for resolving model names to implementations
*/
interface ModelProvider {
/**
* Get a model instance by name
* @param modelName - Model identifier (optional, uses default if not provided)
* @returns Model instance
*/
getModel(modelName?: string): Promise<Model> | Model;
}Usage Examples:
import { ModelProvider, setDefaultModelProvider } from '@openai/agents';
// Custom model provider
class CustomModelProvider implements ModelProvider {
async getModel(modelName?: string): Promise<Model> {
const name = modelName || 'default-model';
// Return custom model implementation
return new CustomModel(name);
}
}
const provider = new CustomModelProvider();
setDefaultModelProvider(provider);Interface for implementing model adapters.
/**
* Model interface for LLM implementations
*/
interface Model {
/**
* Get a non-streaming response from the model
* @param request - Model request with messages and configuration
* @returns Model response
*/
getResponse(request: ModelRequest): Promise<ModelResponse>;
/**
* Get a streaming response from the model
* @param request - Model request with messages and configuration
* @returns Async iterable of stream events
*/
getStreamedResponse(request: ModelRequest): AsyncIterable<StreamEvent>;
}
interface ModelRequest {
/** Input messages */
messages: AgentInputItem[];
/** Tools available to model */
tools?: Tool[];
/** Model settings */
settings?: ModelSettings;
/** Output schema for structured output */
outputSchema?: JsonObjectSchema;
/** Additional provider-specific data */
providerData?: Record<string, any>;
}
interface ModelResponse {
/** Response ID */
id: string;
/** Output items */
output: AgentOutputItem[];
/** Token usage */
usage: Usage;
/** Stop reason */
stopReason?: string;
/** Raw provider response */
rawResponse?: any;
}Usage Examples:
import { Model, ModelRequest, ModelResponse } from '@openai/agents';
// Custom model implementation
class CustomModel implements Model {
constructor(private modelName: string) {}
async getResponse(request: ModelRequest): Promise<ModelResponse> {
// Call your LLM API
const response = await fetch('https://api.example.com/chat', {
method: 'POST',
body: JSON.stringify({
model: this.modelName,
messages: request.messages,
tools: request.tools,
settings: request.settings,
}),
});
const data = await response.json();
return {
id: data.id,
output: data.output,
usage: data.usage,
stopReason: data.stop_reason,
rawResponse: data,
};
}
async *getStreamedResponse(request: ModelRequest): AsyncIterable<StreamEvent> {
// Stream from your LLM API
const response = await fetch('https://api.example.com/chat/stream', {
method: 'POST',
body: JSON.stringify({
model: this.modelName,
messages: request.messages,
stream: true,
}),
});
const reader = response.body!.getReader();
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
if (done) break;
const chunk = decoder.decode(value);
// Parse and yield stream events
yield { type: 'text_delta', text: chunk };
}
}
}OpenAI model provider implementation.
/**
* Model provider for OpenAI models
*/
class OpenAIProvider implements ModelProvider {
constructor(options?: OpenAIProviderOptions);
/**
* Get OpenAI model by name
* @param modelName - Model name (e.g., 'gpt-4o', 'gpt-4-turbo')
* @returns Model instance
*/
getModel(modelName?: string): Model;
}
interface OpenAIProviderOptions {
/** OpenAI API key */
apiKey?: string;
/** Base URL for API */
baseURL?: string;
/** Organization ID */
organization?: string;
/** Project ID */
project?: string;
/** Use Responses API instead of Chat Completions */
useResponses?: boolean;
/** OpenAI client instance */
openAIClient?: OpenAI;
}Usage Examples:
import { OpenAIProvider, setDefaultModelProvider } from '@openai/agents';
import OpenAI from 'openai';
// Basic OpenAI provider
const provider = new OpenAIProvider({
apiKey: process.env.OPENAI_API_KEY,
});
setDefaultModelProvider(provider);
// With custom base URL
const customProvider = new OpenAIProvider({
apiKey: process.env.OPENAI_API_KEY,
baseURL: 'https://api.openai.com/v1',
organization: 'org-123',
project: 'proj-456',
});
setDefaultModelProvider(customProvider);
// With custom client
const client = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
timeout: 60000,
maxRetries: 3,
});
const clientProvider = new OpenAIProvider({
openAIClient: client,
});
setDefaultModelProvider(clientProvider);
// Use Responses API
const responsesProvider = new OpenAIProvider({
apiKey: process.env.OPENAI_API_KEY,
useResponses: true,
});
setDefaultModelProvider(responsesProvider);Specific OpenAI model implementations.
/**
* OpenAI Responses API model implementation
*/
class OpenAIResponsesModel implements Model {
constructor(options: OpenAIModelOptions);
getResponse(request: ModelRequest): Promise<ModelResponse>;
getStreamedResponse(request: ModelRequest): AsyncIterable<StreamEvent>;
}
/**
* OpenAI Chat Completions API model implementation
*/
class OpenAIChatCompletionsModel implements Model {
constructor(options: OpenAIModelOptions);
getResponse(request: ModelRequest): Promise<ModelResponse>;
getStreamedResponse(request: ModelRequest): AsyncIterable<StreamEvent>;
}
interface OpenAIModelOptions {
/** Model name */
modelName: string;
/** OpenAI client */
client: OpenAI;
/** API type */
apiType?: 'chat_completions' | 'responses';
}Usage Examples:
import { OpenAIChatCompletionsModel, OpenAIResponsesModel } from '@openai/agents';
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});
// Chat Completions model
const chatModel = new OpenAIChatCompletionsModel({
modelName: 'gpt-4o',
client,
});
// Responses API model
const responsesModel = new OpenAIResponsesModel({
modelName: 'gpt-4o',
client,
apiType: 'responses',
});Model configuration parameters.
/**
* Model tuning parameters
*/
interface ModelSettings {
/** Sampling temperature (0-2) */
temperature?: number;
/** Nucleus sampling threshold (0-1) */
topP?: number;
/** Frequency penalty (-2 to 2) */
frequencyPenalty?: number;
/** Presence penalty (-2 to 2) */
presencePenalty?: number;
/** Tool choice strategy */
toolChoice?: 'auto' | 'required' | 'none' | string;
/** Allow parallel tool calls */
parallelToolCalls?: boolean;
/** Context truncation strategy */
truncation?: 'auto' | 'disabled';
/** Maximum tokens to generate */
maxTokens?: number;
/** Store conversation for training */
store?: boolean;
/** Prompt cache retention */
promptCacheRetention?: 'in-memory' | '24h' | null;
/** Reasoning configuration */
reasoning?: {
effort?: 'none' | 'minimal' | 'low' | 'medium' | 'high' | null;
summary?: 'auto' | 'concise' | 'detailed' | null;
};
/** Text output configuration */
text?: {
verbosity?: 'low' | 'medium' | 'high' | null;
};
/** Provider-specific data */
providerData?: Record<string, any>;
}Usage Examples:
import { Agent, run, ModelSettings } from '@openai/agents';
// Agent with model settings
const agent = new Agent({
name: 'TunedAgent',
instructions: 'You are helpful',
modelSettings: {
temperature: 0.7,
topP: 0.9,
maxTokens: 2000,
frequencyPenalty: 0.5,
presencePenalty: 0.3,
},
});
// Override settings at runtime
const result = await run(agent, 'Hello', {
modelSettings: {
temperature: 0.3, // More focused
},
});
// Tool choice settings
const toolAgent = new Agent({
name: 'ToolAgent',
instructions: 'You use tools',
tools: [
/* tools */
],
modelSettings: {
toolChoice: 'required', // Must use a tool
parallelToolCalls: true,
},
});
// Reasoning settings
const reasoningAgent = new Agent({
name: 'ReasoningAgent',
instructions: 'Think deeply',
modelSettings: {
reasoning: {
effort: 'high',
summary: 'detailed',
},
},
});
// Prompt caching
const cachedAgent = new Agent({
name: 'CachedAgent',
instructions: 'You use prompt caching',
modelSettings: {
promptCacheRetention: '24h',
},
});
// Provider-specific settings
const customAgent = new Agent({
name: 'CustomAgent',
instructions: 'You have custom settings',
modelSettings: {
providerData: {
customParameter: 'value',
},
},
});Global configuration functions.
/**
* Set default model provider
* @param provider - Model provider instance
*/
function setDefaultModelProvider(provider: ModelProvider): void;
/**
* Get default model name
* @returns Default model name
*/
function getDefaultModel(): string;
/**
* Get default model settings
* @returns Default settings object
*/
function getDefaultModelSettings(): ModelSettings;
/**
* Set default OpenAI client
* @param client - OpenAI client instance
*/
function setDefaultOpenAIClient(client: OpenAI): void;
/**
* Set default OpenAI API key
* @param apiKey - API key
*/
function setDefaultOpenAIKey(apiKey: string): void;
/**
* Set OpenAI API type
* @param api - API type ('chat_completions' or 'responses')
*/
function setOpenAIAPI(api: 'chat_completions' | 'responses'): void;Usage Examples:
import {
OpenAIProvider,
setDefaultModelProvider,
setDefaultOpenAIClient,
setDefaultOpenAIKey,
setOpenAIAPI,
getDefaultModel,
getDefaultModelSettings,
} from '@openai/agents';
import OpenAI from 'openai';
// Set API key
setDefaultOpenAIKey(process.env.OPENAI_API_KEY!);
// Or set custom client
const client = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
timeout: 30000,
});
setDefaultOpenAIClient(client);
// Set API type
setOpenAIAPI('responses'); // Use Responses API
// or
setOpenAIAPI('chat_completions'); // Use Chat Completions API (default)
// Set custom provider
const customProvider = new OpenAIProvider({
apiKey: process.env.OPENAI_API_KEY,
baseURL: 'https://custom.openai.com/v1',
});
setDefaultModelProvider(customProvider);
// Get defaults
const defaultModel = getDefaultModel();
console.log('Default model:', defaultModel); // 'gpt-4.1'
const defaultSettings = getDefaultModelSettings();
console.log('Default settings:', defaultSettings);Override models at different levels.
interface AgentConfig {
/** Model to use (defaults to provider default) */
model?: string | Model;
}
interface RunnerConfig {
/** Override all agent models */
model?: string | Model;
/** Model provider */
modelProvider?: ModelProvider;
/** Global model settings override */
modelSettings?: ModelSettings;
}
interface RunOptions {
/** Override model for this run */
model?: string | Model;
/** Override model settings for this run */
modelSettings?: ModelSettings;
}Usage Examples:
import { Agent, Runner, run } from '@openai/agents';
// Agent-level model
const agent1 = new Agent({
name: 'GPT4Agent',
instructions: 'You are helpful',
model: 'gpt-4o',
});
// Runner-level override (affects all agents)
const runner = new Runner({
model: 'gpt-4-turbo',
modelSettings: {
temperature: 0.5,
},
});
await runner.run(agent1, 'Hello'); // Uses gpt-4-turbo despite agent config
// Run-level override (highest priority)
await run(agent1, 'Hello', {
model: 'gpt-4o-mini',
modelSettings: {
temperature: 0.9,
},
});
// Priority order: run > runner > agent > provider defaultUse different providers for different agents.
Usage Examples:
import { Agent, run, Model, ModelProvider } from '@openai/agents';
// Create multiple providers
const openaiProvider = new OpenAIProvider({
apiKey: process.env.OPENAI_API_KEY,
});
const customProvider = new CustomModelProvider();
// Agent with OpenAI model
const openaiAgent = new Agent({
name: 'OpenAIAgent',
instructions: 'You use OpenAI',
model: openaiProvider.getModel('gpt-4o'),
});
// Agent with custom model
const customAgent = new Agent({
name: 'CustomAgent',
instructions: 'You use custom model',
model: customProvider.getModel('custom-model'),
});
// Use different agents
await run(openaiAgent, 'Hello');
await run(customAgent, 'Hello');
// Or use runner with specific provider
const runner = new Runner({
modelProvider: customProvider,
});
await runner.run(openaiAgent, 'Hello'); // Still uses OpenAI (agent override)
await runner.run(customAgent, 'Hello'); // Uses custom providerFilter and modify model requests before execution.
interface RunnerConfig<TContext = any> {
/**
* Filter/edit inputs before model call
* Allows modification of requests before sending to model
*/
callModelInputFilter?: CallModelInputFilter;
}
type CallModelInputFilter = (
request: ModelRequest,
agent: Agent
) => ModelRequest | Promise<ModelRequest>;Usage Examples:
import { Agent, Runner, ModelRequest } from '@openai/agents';
// Add custom headers or metadata
const runner = new Runner({
callModelInputFilter: async (request, agent) => {
return {
...request,
providerData: {
...request.providerData,
customHeader: 'value',
agentName: agent.name,
},
};
},
});
// Modify temperature based on agent
const dynamicRunner = new Runner({
callModelInputFilter: async (request, agent) => {
// More creative for certain agents
if (agent.name === 'CreativeAgent') {
return {
...request,
settings: {
...request.settings,
temperature: 0.9,
},
};
}
return request;
},
});
// Add logging
const loggingRunner = new Runner({
callModelInputFilter: async (request, agent) => {
console.log(`Model call from agent: ${agent.name}`);
console.log(`Messages: ${request.messages.length}`);
console.log(`Tools: ${request.tools?.length || 0}`);
return request;
},
});
const agent = new Agent({
name: 'Agent',
instructions: 'You are helpful',
});
await loggingRunner.run(agent, 'Hello');Track token usage across model calls.
/**
* Token usage statistics
*/
class Usage {
/** Input tokens consumed */
inputTokens: number;
/** Output tokens generated */
outputTokens: number;
/** Total tokens */
totalTokens: number;
/** Detailed input token breakdown */
inputTokenDetails?: {
audio?: number;
text?: number;
cached?: number;
};
/** Detailed output token breakdown */
outputTokenDetails?: {
audio?: number;
text?: number;
reasoning?: number;
};
/**
* Add usage from another instance
* @param usage - Usage to add
*/
add(usage: Usage): void;
/**
* Convert to JSON
* @returns JSON representation
*/
toJSON(): object;
}Usage Examples:
import { Agent, run } from '@openai/agents';
const agent = new Agent({
name: 'Agent',
instructions: 'You are helpful',
});
const result = await run(agent, 'Tell me a story');
// Access usage from result
const usage = result.state.runContext.usage;
console.log('Input tokens:', usage.inputTokens);
console.log('Output tokens:', usage.outputTokens);
console.log('Total tokens:', usage.totalTokens);
// Detailed breakdown
if (usage.inputTokenDetails) {
console.log('Cached tokens:', usage.inputTokenDetails.cached);
console.log('Text tokens:', usage.inputTokenDetails.text);
}
if (usage.outputTokenDetails) {
console.log('Reasoning tokens:', usage.outputTokenDetails.reasoning);
}
// Track usage across multiple runs
const totalUsage = new Usage();
for (let i = 0; i < 5; i++) {
const result = await run(agent, `Query ${i}`);
totalUsage.add(result.state.runContext.usage);
}
console.log('Total usage:', totalUsage.toJSON());Handle model-specific errors.
Usage Examples:
import {
Agent,
run,
ModelBehaviorError,
SystemError,
UserError,
} from '@openai/agents';
const agent = new Agent({
name: 'Agent',
instructions: 'You are helpful',
});
try {
const result = await run(agent, 'Hello');
} catch (error) {
if (error instanceof ModelBehaviorError) {
console.error('Model behaved unexpectedly:', error.message);
// Retry with different prompt or settings
} else if (error instanceof SystemError) {
console.error('System error (API, network, etc):', error.message);
// Retry or alert operations
} else if (error instanceof UserError) {
console.error('User configuration error:', error.message);
// Fix configuration
} else {
console.error('Unknown error:', error);
}
}Additional utility functions for working with models and model configuration.
/**
* Get the currently configured default model provider
* @returns The default model provider instance
*/
function getDefaultModelProvider(): ModelProvider;
/**
* Check if the default model is GPT-5
* @returns True if default model is a GPT-5 variant
*/
function isGpt5Default(): boolean;
/**
* Check if a model requires reasoning settings
* @param modelName - Model name to check
* @returns True if model requires reasoning configuration
*/
function gpt5ReasoningSettingsRequired(modelName: string): boolean;
/**
* Environment variable name for setting default model
*/
const OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME: string;Usage Examples:
import {
getDefaultModelProvider,
isGpt5Default,
gpt5ReasoningSettingsRequired,
OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME,
Agent,
run
} from '@openai/agents';
// Get current provider
const provider = getDefaultModelProvider();
// Check if using GPT-5 by default
if (isGpt5Default()) {
console.log('Using GPT-5 model by default');
}
// Check if a specific model needs reasoning settings
const modelName = 'gpt-5-preview';
if (gpt5ReasoningSettingsRequired(modelName)) {
console.log('This model requires reasoning settings');
const agent = new Agent({
name: 'ReasoningAgent',
instructions: 'You are helpful',
model: modelName,
modelSettings: {
reasoning: {
effort: 'medium',
summary: 'concise'
}
}
});
await run(agent, 'Solve this complex problem...');
}
// Use environment variable for configuration
console.log(`Set default model with: ${OPENAI_DEFAULT_MODEL_ENV_VARIABLE_NAME}=gpt-4`);