OpenTelemetry instrumentation for the OpenAI Python library, enabling automatic tracing and observability for OpenAI API calls
npx @tessl/cli install tessl/pypi-opentelemetry-instrumentation-openai@0.46.0OpenTelemetry instrumentation for the OpenAI Python library, enabling automatic tracing and observability for OpenAI API calls including prompts, completions, and embeddings. It integrates seamlessly with the OpenTelemetry ecosystem to provide distributed tracing capabilities for LLM applications.
pip install opentelemetry-instrumentation-openaifrom opentelemetry.instrumentation.openai import OpenAIInstrumentorAdditional imports for advanced usage:
from opentelemetry.instrumentation.openai.shared.config import Config
from opentelemetry.instrumentation.openai.utils import (
is_openai_v1,
should_send_prompts,
TRACELOOP_TRACE_CONTENT
)
from opentelemetry.instrumentation.openai.shared.event_models import (
MessageEvent,
ChoiceEvent,
ToolCall,
CompletionMessage
)
from opentelemetry.instrumentation.openai.shared.event_emitter import (
emit_event,
Roles
)from opentelemetry.instrumentation.openai import OpenAIInstrumentor
import openai
# Basic instrumentation setup
OpenAIInstrumentor().instrument()
# Now OpenAI calls will be automatically traced
client = openai.OpenAI()
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello, world!"}]
)Advanced usage with custom configuration:
async def upload_image(trace_id, span_id, image_name, base64_data):
# Custom image upload handler
return f"https://example.com/images/{image_name}"
def exception_handler(error):
# Custom exception logging
print(f"OpenAI instrumentation error: {error}")
def get_metrics_attributes():
# Custom metrics attributes
return {"service.name": "my-llm-app"}
# Configure instrumentor with custom settings
instrumentor = OpenAIInstrumentor(
enrich_assistant=True,
exception_logger=exception_handler,
get_common_metrics_attributes=get_metrics_attributes,
upload_base64_image=upload_image,
enable_trace_context_propagation=True,
use_legacy_attributes=False
)
instrumentor.instrument(
tracer_provider=tracer_provider,
meter_provider=meter_provider,
event_logger_provider=event_logger_provider
)The instrumentation works by wrapping OpenAI API calls at the client level:
Core functionality for setting up and managing OpenAI instrumentation lifecycle.
class OpenAIInstrumentor(BaseInstrumentor):
def __init__(
self,
enrich_assistant: bool = False,
exception_logger: Optional[Callable] = None,
get_common_metrics_attributes: Callable[[], dict] = lambda: {},
upload_base64_image: Optional[Callable[[str, str, str, str], Coroutine[None, None, str]]] = lambda *args: "",
enable_trace_context_propagation: bool = True,
use_legacy_attributes: bool = True,
):
"""
Initialize the OpenAI instrumentor.
Parameters:
- enrich_assistant: bool, enable assistant enrichment for additional context
- exception_logger: Optional[Callable], custom exception logging handler
- get_common_metrics_attributes: Callable[[], dict], provider for common metrics attributes
- upload_base64_image: Optional[Callable], handler for base64 image uploads in traces
- enable_trace_context_propagation: bool, enable trace context propagation
- use_legacy_attributes: bool, use legacy attribute format (vs new semantic conventions)
"""
def instrument(self, **kwargs) -> None:
"""
Start instrumentation of OpenAI library.
Parameters:
- tracer_provider: OpenTelemetry tracer provider
- meter_provider: OpenTelemetry meter provider
- event_logger_provider: OpenTelemetry event logger provider
"""
def uninstrument(self, **kwargs) -> None:
"""Stop instrumentation and restore original OpenAI functions."""
def instrumentation_dependencies(self) -> Collection[str]:
"""Return list of required package dependencies."""Global configuration settings that control instrumentation behavior.
class Config:
enrich_assistant: bool = False
exception_logger: Optional[Callable] = None
get_common_metrics_attributes: Callable[[], dict] = lambda: {}
upload_base64_image: Union[Callable[[str, str, str, str], str], Callable[[str, str, str, str], Coroutine[None, None, str]]]
enable_trace_context_propagation: bool = True
use_legacy_attributes: bool = True
event_logger: Optional[EventLogger] = NoneHelper functions for version detection, environment configuration, and instrumentation control.
def is_openai_v1() -> bool:
"""Check if OpenAI library version is >= 1.0.0."""
def is_reasoning_supported() -> bool:
"""Check if reasoning is supported (OpenAI >= 1.58.0)."""
def is_azure_openai(instance) -> bool:
"""Check if instance is Azure OpenAI client."""
def is_metrics_enabled() -> bool:
"""Check if metrics collection is enabled via TRACELOOP_METRICS_ENABLED."""
def should_send_prompts() -> bool:
"""Check if prompt content should be traced based on TRACELOOP_TRACE_CONTENT."""
def should_emit_events() -> bool:
"""Check if events should be emitted (non-legacy mode with event logger)."""
async def start_as_current_span_async(tracer, *args, **kwargs):
"""Async context manager for starting spans."""
def dont_throw(func) -> Callable:
"""
Decorator that wraps functions to log exceptions instead of throwing them.
Works for both synchronous and asynchronous functions.
"""
def run_async(method) -> None:
"""Run async method in appropriate event loop context."""
# Constants
TRACELOOP_TRACE_CONTENT: str = "TRACELOOP_TRACE_CONTENT"
"""Environment variable name for controlling content tracing."""Data structures for representing AI model interactions and tool calls in structured events.
from typing import Union, List, Optional, Callable, Any, Literal, Coroutine
from typing_extensions import TypedDict
from dataclasses import dataclass
from enum import Enum
class ToolCall(TypedDict):
"""Represents a tool call in the AI model."""
id: str
function: dict[str, Any] # Contains function_name and optional arguments
type: Literal["function"]
class CompletionMessage(TypedDict):
"""Represents a message in the AI model."""
content: Any
role: str = "assistant"
@dataclass
class MessageEvent:
"""Represents an input event for the AI model."""
content: Any
role: str = "user"
tool_calls: Optional[List[ToolCall]] = None
@dataclass
class ChoiceEvent:
"""Represents a completion event for the AI model."""
index: int
message: CompletionMessage
finish_reason: str = "unknown"
tool_calls: Optional[List[ToolCall]] = NoneFunctions for emitting structured events following OpenTelemetry semantic conventions.
def emit_event(event: Union[MessageEvent, ChoiceEvent]) -> None:
"""
Emit an event to the OpenTelemetry SDK.
Parameters:
- event: MessageEvent or ChoiceEvent to emit
Raises:
- TypeError: If event type is not supported
"""
class Roles(Enum):
"""Valid message roles for AI interactions."""
USER = "user"
ASSISTANT = "assistant"
SYSTEM = "system"
TOOL = "tool"Package version information.
__version__: str = "0.46.2"Control instrumentation behavior through environment variables:
# Control content logging (default: "true")
TRACELOOP_TRACE_CONTENT = "false" # Disable logging prompts/completions for privacy
# Control metrics collection (default: "true")
TRACELOOP_METRICS_ENABLED = "false" # Disable metrics collectionThe instrumentation automatically traces the following OpenAI operations:
client.chat.completions.create()client.completions.create()client.embeddings.create()client.images.generate()client.beta.assistants.*client.beta.threads.*client.responses.*openai.ChatCompletion.create()openai.Completion.create()openai.Embedding.create()By default, the instrumentation logs prompts, completions, and embeddings to span attributes for visibility and debugging. To disable content logging for privacy:
export TRACELOOP_TRACE_CONTENT=falseMetrics collection can be disabled:
export TRACELOOP_METRICS_ENABLED=falseProvide custom exception logging to handle instrumentation errors:
def custom_exception_handler(error):
# Log to your preferred logging system
logger.warning(f"OpenAI instrumentation error: {error}")
instrumentor = OpenAIInstrumentor(exception_logger=custom_exception_handler)from opentelemetry import trace
from opentelemetry.exporter.jaeger.thrift import JaegerExporter
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
# Configure tracing
trace.set_tracer_provider(TracerProvider())
jaeger_exporter = JaegerExporter(
agent_host_name="localhost",
agent_port=6831,
)
span_processor = BatchSpanProcessor(jaeger_exporter)
trace.get_tracer_provider().add_span_processor(span_processor)
# Instrument OpenAI
OpenAIInstrumentor().instrument()from opentelemetry import metrics
from opentelemetry.sdk.metrics import MeterProvider
from opentelemetry.sdk.metrics.export import PeriodicExportingMetricReader
from opentelemetry.exporter.prometheus import PrometheusMetricReader
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
# Configure metrics
metric_reader = PrometheusMetricReader()
metrics.set_meter_provider(MeterProvider(metric_readers=[metric_reader]))
# Instrument with metrics
OpenAIInstrumentor().instrument(meter_provider=metrics.get_meter_provider())from opentelemetry.sdk._logs import LoggerProvider
from opentelemetry.sdk._events import EventLoggerProvider
from opentelemetry.instrumentation.openai import OpenAIInstrumentor
# Configure event logging
logger_provider = LoggerProvider()
event_logger_provider = EventLoggerProvider(logger_provider)
# Instrument with events (requires use_legacy_attributes=False)
instrumentor = OpenAIInstrumentor(use_legacy_attributes=False)
instrumentor.instrument(event_logger_provider=event_logger_provider)