Built-in tracing for visualizing and debugging agent runs.
Trace instance for tracking agent runs.
/**
* Trace instance representing a complete agent workflow execution
*/
class Trace {
/** Unique trace identifier */
traceId: string;
/** Group ID for grouping multiple traces */
groupId?: string;
/** Additional metadata */
metadata?: Record<string, any>;
/** Workflow name */
workflowName?: string;
/** Start timestamp */
startTime: number;
/** End timestamp */
endTime?: number;
/** Trace status */
status?: 'success' | 'error';
/** Root span */
rootSpan?: Span;
}Usage Examples:
import { getCurrentTrace, getOrCreateTrace } from '@openai/agents';
// Get current trace
const trace = getCurrentTrace();
if (trace) {
console.log('Trace ID:', trace.traceId);
console.log('Group ID:', trace.groupId);
console.log('Metadata:', trace.metadata);
}
// Create trace with custom options
const result = await getOrCreateTrace(
async (trace) => {
console.log('Running with trace:', trace.traceId);
// Your code here
return 'result';
},
{
workflowName: 'MyWorkflow',
groupId: 'group-123',
metadata: {
userId: 'user-456',
environment: 'production',
},
}
);Span instance for tracking individual operations.
/**
* Span instance representing a single operation within a trace
*/
class Span {
/** Unique span identifier */
spanId: string;
/** Span name/label */
name: string;
/** Start timestamp */
startTime: number;
/** End timestamp */
endTime?: number;
/** Span status */
status?: 'success' | 'error';
/** Parent span ID */
parentSpanId?: string;
/** Span attributes */
attributes?: Record<string, any>;
/** Events within span */
events?: SpanEvent[];
}
interface SpanEvent {
/** Event name */
name: string;
/** Event timestamp */
timestamp: number;
/** Event attributes */
attributes?: Record<string, any>;
}Usage Examples:
import { getCurrentSpan } from '@openai/agents';
// Get current span
const span = getCurrentSpan();
if (span) {
console.log('Span ID:', span.spanId);
console.log('Span name:', span.name);
console.log('Parent:', span.parentSpanId);
}Functions for working with traces.
/**
* Get the current active trace
* @returns Current trace or undefined
*/
function getCurrentTrace(): Trace | undefined;
/**
* Get the current active span
* @returns Current span or undefined
*/
function getCurrentSpan(): Span | undefined;
/**
* Run code within a trace context
* @param fn - Function to execute with trace
* @param options - Trace configuration
* @returns Function result
*/
function getOrCreateTrace<T>(
fn: (trace: Trace) => T | Promise<T>,
options?: TraceOptions
): Promise<T>;
/**
* Run code within a specific trace
* @param trace - Trace to use
* @param fn - Function to execute
* @returns Function result
*/
function withTrace<T>(
trace: Trace,
fn: () => T | Promise<T>
): Promise<T>;
/**
* Generate a unique trace ID
* @returns Trace ID string
*/
function generateTraceId(): string;
/**
* Generate a unique span ID
* @returns Span ID string
*/
function generateSpanId(): string;
/**
* Generate a unique group ID
* @returns Group ID string
*/
function generateGroupId(): string;
interface TraceOptions {
/** Workflow name */
workflowName?: string;
/** Custom trace ID */
traceId?: string;
/** Group ID for trace grouping */
groupId?: string;
/** Additional metadata */
metadata?: Record<string, any>;
}Usage Examples:
import {
getOrCreateTrace,
withTrace,
generateTraceId,
generateSpanId,
generateGroupId,
getCurrentTrace,
getCurrentSpan,
} from '@openai/agents';
// Create trace with custom ID
const customTraceId = generateTraceId();
const result1 = await getOrCreateTrace(
async (trace) => {
console.log('Custom trace ID:', trace.traceId);
return 'result';
},
{ traceId: customTraceId }
);
// Create trace with group
const groupId = generateGroupId();
await getOrCreateTrace(
async (trace) => {
console.log('Grouped trace:', trace.traceId);
},
{
groupId,
workflowName: 'BatchProcess',
metadata: { batchId: 'batch-1' },
}
);
// Run multiple traces in same group
for (let i = 0; i < 5; i++) {
await getOrCreateTrace(
async (trace) => {
console.log(`Processing item ${i}`);
},
{
groupId,
workflowName: 'BatchProcess',
metadata: { itemIndex: i },
}
);
}
// Use existing trace
const trace = getCurrentTrace();
if (trace) {
await withTrace(trace, async () => {
console.log('Running with existing trace');
});
}
// Generate IDs
const traceId = generateTraceId();
const spanId = generateSpanId();
console.log('Generated IDs:', { traceId, spanId });Configure tracing behavior globally or per-runner.
interface RunnerConfig<TContext = any> {
/** Disable tracing */
tracingDisabled?: boolean;
/** Include sensitive data in traces */
traceIncludeSensitiveData?: boolean;
/** Trace workflow name */
workflowName?: string;
/** Custom trace ID */
traceId?: string;
/** Group ID for trace grouping */
groupId?: string;
/** Additional trace metadata */
traceMetadata?: Record<string, string>;
}
/**
* Enable or disable tracing globally
* @param disabled - True to disable tracing
*/
function setTracingDisabled(disabled: boolean): void;Usage Examples:
import { Agent, Runner, run, setTracingDisabled } from '@openai/agents';
// Disable tracing globally
setTracingDisabled(true);
// Enable tracing globally
setTracingDisabled(false);
// Configure tracing per runner
const runner = new Runner({
tracingDisabled: false,
traceIncludeSensitiveData: false, // Don't include sensitive data
workflowName: 'CustomerSupport',
groupId: 'support-session-123',
traceMetadata: {
customerId: 'cust-456',
region: 'us-east',
},
});
const agent = new Agent({
name: 'SupportAgent',
instructions: 'You are helpful',
});
await runner.run(agent, 'Help me');
// Trace with custom ID
const runner2 = new Runner({
traceId: 'custom-trace-123',
workflowName: 'CustomWorkflow',
});
await runner2.run(agent, 'Hello');Process and export trace data.
/**
* Batch trace processor for efficient export
*/
class BatchTraceProcessor {
constructor(options?: BatchTraceProcessorOptions);
/** Process a trace */
process(trace: Trace): void;
/** Flush pending traces */
flush(): Promise<void>;
/** Shutdown processor */
shutdown(): Promise<void>;
}
interface BatchTraceProcessorOptions {
/** Maximum batch size */
batchSize?: number;
/** Flush interval in milliseconds */
flushIntervalMs?: number;
/** Exporter to use */
exporter?: TraceExporter;
}
/**
* Add a trace processor
* @param processor - Processor to add
*/
function addTraceProcessor(processor: BatchTraceProcessor): void;
/**
* Replace all trace processors
* @param processors - New processor list
*/
function setTraceProcessors(processors: BatchTraceProcessor[]): void;
/**
* Start the trace export loop
*/
function startTraceExportLoop(): void;Usage Examples:
import {
BatchTraceProcessor,
ConsoleSpanExporter,
addTraceProcessor,
setTraceProcessors,
startTraceExportLoop,
} from '@openai/agents';
// Console exporter for debugging
const consoleExporter = new ConsoleSpanExporter();
const consoleProcessor = new BatchTraceProcessor({
batchSize: 10,
flushIntervalMs: 5000,
exporter: consoleExporter,
});
addTraceProcessor(consoleProcessor);
// Start export loop
startTraceExportLoop();
// Replace all processors
const processor1 = new BatchTraceProcessor({
batchSize: 50,
exporter: consoleExporter,
});
setTraceProcessors([processor1]);
// Shutdown processors when done
await consoleProcessor.shutdown();Console-based trace exporter for debugging.
/**
* Exporter that logs traces to console
*/
class ConsoleSpanExporter {
constructor(options?: ConsoleSpanExporterOptions);
/** Export traces */
export(traces: Trace[]): Promise<void>;
/** Shutdown exporter */
shutdown(): Promise<void>;
}
interface ConsoleSpanExporterOptions {
/** Include sensitive data */
includeSensitiveData?: boolean;
/** Pretty print output */
prettyPrint?: boolean;
}Usage Examples:
import { ConsoleSpanExporter, BatchTraceProcessor, addTraceProcessor } from '@openai/agents';
// Basic console exporter
const exporter = new ConsoleSpanExporter();
const processor = new BatchTraceProcessor({
exporter,
batchSize: 5,
});
addTraceProcessor(processor);
// With options
const prettyExporter = new ConsoleSpanExporter({
includeSensitiveData: false,
prettyPrint: true,
});
const prettyProcessor = new BatchTraceProcessor({
exporter: prettyExporter,
});
addTraceProcessor(prettyProcessor);Export traces to OpenAI's tracing service.
/**
* Exporter for OpenAI tracing service
*/
class OpenAITracingExporter {
constructor(options?: OpenAITracingExporterOptions);
/** Export traces */
export(traces: Trace[]): Promise<void>;
/** Shutdown exporter */
shutdown(): Promise<void>;
}
interface OpenAITracingExporterOptions {
/** API key for tracing service */
apiKey?: string;
/** Base URL for tracing service */
baseURL?: string;
/** Batch size for uploads */
batchSize?: number;
/** Flush interval in milliseconds */
flushIntervalMs?: number;
}
/**
* Set default OpenAI tracing exporter
* Uses OPENAI_API_KEY from environment
*/
function setDefaultOpenAITracingExporter(): void;
/**
* Set tracing export API key
* @param apiKey - API key for tracing
*/
function setTracingExportApiKey(apiKey: string): void;Usage Examples:
import {
OpenAITracingExporter,
setDefaultOpenAITracingExporter,
setTracingExportApiKey,
BatchTraceProcessor,
addTraceProcessor,
} from '@openai/agents';
// Use default exporter (from environment)
setDefaultOpenAITracingExporter();
// Or set API key explicitly
setTracingExportApiKey(process.env.OPENAI_API_KEY!);
// Create custom OpenAI exporter
const exporter = new OpenAITracingExporter({
apiKey: process.env.OPENAI_API_KEY,
baseURL: 'https://api.openai.com/v1',
batchSize: 100,
flushIntervalMs: 10000,
});
const processor = new BatchTraceProcessor({
exporter,
});
addTraceProcessor(processor);
// Now all traces will be exported to OpenAI
import { Agent, run } from '@openai/agents';
const agent = new Agent({
name: 'TracedAgent',
instructions: 'You are helpful',
});
const result = await run(agent, 'Hello');
// Trace automatically sent to OpenAIAccess trace data after execution.
Usage Examples:
import { Agent, Runner, getCurrentTrace } from '@openai/agents';
const runner = new Runner({
workflowName: 'MyWorkflow',
traceMetadata: {
userId: 'user-123',
},
});
const agent = new Agent({
name: 'Agent',
instructions: 'You are helpful',
});
// Run and capture trace
await runner.run(agent, 'Hello');
const trace = getCurrentTrace();
if (trace) {
console.log('Trace ID:', trace.traceId);
console.log('Workflow:', trace.workflowName);
console.log('Duration:', trace.endTime! - trace.startTime, 'ms');
console.log('Status:', trace.status);
console.log('Metadata:', trace.metadata);
if (trace.rootSpan) {
console.log('Root span:', trace.rootSpan.name);
console.log('Span events:', trace.rootSpan.events?.length);
}
}Group related traces together.
Usage Examples:
import { Agent, Runner, generateGroupId } from '@openai/agents';
const groupId = generateGroupId();
// Process multiple items in same group
const items = ['item1', 'item2', 'item3'];
const agent = new Agent({
name: 'ProcessingAgent',
instructions: 'You process items',
});
for (const item of items) {
const runner = new Runner({
workflowName: 'ItemProcessing',
groupId, // Same group for all items
traceMetadata: {
itemId: item,
},
});
await runner.run(agent, `Process ${item}`);
}
// All traces will be grouped together in the tracing UI
console.log('Processed items in group:', groupId);Use traces for debugging agent behavior.
Usage Examples:
import {
Agent,
Runner,
ConsoleSpanExporter,
BatchTraceProcessor,
addTraceProcessor,
} from '@openai/agents';
// Enable console tracing for debugging
const debugExporter = new ConsoleSpanExporter({
prettyPrint: true,
includeSensitiveData: true, // Include all data for debugging
});
const debugProcessor = new BatchTraceProcessor({
exporter: debugExporter,
batchSize: 1, // Export immediately
flushIntervalMs: 0,
});
addTraceProcessor(debugProcessor);
// Create agent with issues
const agent = new Agent({
name: 'BuggyAgent',
instructions: 'You are helpful',
tools: [
/* tools here */
],
});
const runner = new Runner({
workflowName: 'DebugSession',
tracingDisabled: false,
traceIncludeSensitiveData: true,
});
try {
await runner.run(agent, 'Test input');
} catch (error) {
console.error('Agent failed:', error);
// Check console output for detailed trace
}
// Trace will show:
// - All function calls
// - Tool executions
// - Model requests/responses
// - Timing information
// - Error detailsCreate custom exporters for your infrastructure.
Usage Examples:
import { BatchTraceProcessor, addTraceProcessor, Trace } from '@openai/agents';
// Custom exporter interface
interface TraceExporter {
export(traces: Trace[]): Promise<void>;
shutdown(): Promise<void>;
}
// Custom database exporter
class DatabaseTraceExporter implements TraceExporter {
private db: any; // Your database client
constructor(db: any) {
this.db = db;
}
async export(traces: Trace[]): Promise<void> {
for (const trace of traces) {
await this.db.query(
'INSERT INTO traces (trace_id, workflow_name, start_time, end_time, metadata) VALUES (?, ?, ?, ?, ?)',
[
trace.traceId,
trace.workflowName,
trace.startTime,
trace.endTime,
JSON.stringify(trace.metadata),
]
);
}
}
async shutdown(): Promise<void> {
await this.db.close();
}
}
// Use custom exporter
const db = {}; // Your database connection
const dbExporter = new DatabaseTraceExporter(db);
const processor = new BatchTraceProcessor({
exporter: dbExporter,
batchSize: 50,
flushIntervalMs: 5000,
});
addTraceProcessor(processor);
// Custom metrics exporter
class MetricsTraceExporter implements TraceExporter {
async export(traces: Trace[]): Promise<void> {
for (const trace of traces) {
const duration = trace.endTime! - trace.startTime;
// Send to metrics service
await fetch('https://metrics.example.com/traces', {
method: 'POST',
body: JSON.stringify({
workflow: trace.workflowName,
duration,
status: trace.status,
timestamp: trace.startTime,
}),
});
}
}
async shutdown(): Promise<void> {
// Cleanup
}
}
const metricsExporter = new MetricsTraceExporter();
const metricsProcessor = new BatchTraceProcessor({
exporter: metricsExporter,
});
addTraceProcessor(metricsProcessor);Recommended patterns for production use.
Usage Examples:
import {
Agent,
Runner,
OpenAITracingExporter,
BatchTraceProcessor,
setTraceProcessors,
setTracingDisabled,
} from '@openai/agents';
// 1. Conditional tracing based on environment
const isProduction = process.env.NODE_ENV === 'production';
const isDevelopment = process.env.NODE_ENV === 'development';
if (isDevelopment) {
// Verbose tracing in development
const devExporter = new ConsoleSpanExporter({
prettyPrint: true,
includeSensitiveData: true,
});
const devProcessor = new BatchTraceProcessor({ exporter: devExporter });
setTraceProcessors([devProcessor]);
} else if (isProduction) {
// Production tracing to OpenAI
const prodExporter = new OpenAITracingExporter({
apiKey: process.env.OPENAI_API_KEY,
batchSize: 100,
flushIntervalMs: 10000,
});
const prodProcessor = new BatchTraceProcessor({ exporter: prodExporter });
setTraceProcessors([prodProcessor]);
} else {
// Disable tracing in test
setTracingDisabled(true);
}
// 2. Sample traces in high-volume scenarios
const shouldTrace = Math.random() < 0.1; // 10% sampling
const runner = new Runner({
tracingDisabled: !shouldTrace,
workflowName: 'HighVolumeWorkflow',
});
// 3. Add contextual metadata
const runner2 = new Runner({
workflowName: 'APIRequest',
traceMetadata: {
userId: 'user-123',
requestId: 'req-456',
endpoint: '/api/chat',
region: 'us-east-1',
},
});
// 4. Group related operations
const batchId = generateGroupId();
for (const item of items) {
const runner3 = new Runner({
groupId: batchId,
workflowName: 'BatchProcessing',
traceMetadata: {
batchId,
itemId: item.id,
},
});
await runner3.run(agent, item.prompt);
}
// 5. Don't include sensitive data in production
const runner4 = new Runner({
traceIncludeSensitiveData: false, // Never in production
workflowName: 'SecureWorkflow',
});