OpenTelemetry instrumentation for Mistral AI client library enabling automatic tracing and observability
npx @tessl/cli install tessl/pypi-opentelemetry-instrumentation-mistralai@0.46.0OpenTelemetry instrumentation for the Mistral AI Python client library, enabling automatic tracing and observability for LLM applications using Mistral AI's APIs. This library instruments chat completions, chat streaming, and embeddings endpoints to collect telemetry data including request/response attributes, token usage metrics, and span information.
pip install opentelemetry-instrumentation-mistralaimistralai >= 0.2.0, < 1from opentelemetry.instrumentation.mistralai import MistralAiInstrumentorAdditional imports for type annotations:
from typing import Collection, Union, Any, Optional, List, LiteralEvent models and utilities:
from opentelemetry.instrumentation.mistralai.event_models import MessageEvent, ChoiceEvent, ToolCall, CompletionMessage, _FunctionToolCall
from opentelemetry.instrumentation.mistralai.utils import should_send_prompts, should_emit_events, dont_throw, TRACELOOP_TRACE_CONTENT
from opentelemetry.instrumentation.mistralai.config import Config
from opentelemetry.instrumentation.mistralai.event_emitter import emit_event, Roles, VALID_MESSAGE_ROLES, EVENT_ATTRIBUTES
from opentelemetry.instrumentation.mistralai.version import __version__from opentelemetry.instrumentation.mistralai import MistralAiInstrumentor
from mistralai.client import MistralClient
# Initialize and instrument
instrumentor = MistralAiInstrumentor()
instrumentor.instrument()
# Use Mistral AI client normally - it will be automatically traced
client = MistralClient(api_key="your-api-key")
# Chat completion will be automatically traced
response = client.chat(
model="mistral-large-latest",
messages=[{"role": "user", "content": "Hello, how are you?"}]
)
# Streaming chat will also be traced
for chunk in client.chat_stream(
model="mistral-large-latest",
messages=[{"role": "user", "content": "Tell me a story"}]
):
print(chunk.choices[0].delta.content, end="")
# Embeddings will be traced too
embeddings = client.embeddings(
model="mistral-embed",
input=["Hello world", "How are you?"]
)
# Clean up when done (optional)
instrumentor.uninstrument()Core instrumentor class for enabling and disabling Mistral AI instrumentation.
class MistralAiInstrumentor(BaseInstrumentor):
"""An instrumentor for Mistral AI's client library."""
def __init__(self, exception_logger=None, use_legacy_attributes: bool = True):
"""
Initialize the instrumentor.
Args:
exception_logger: Custom exception logger function (optional)
use_legacy_attributes (bool): Whether to use legacy span attributes vs
new event-based approach (default: True)
"""
def instrumentation_dependencies(self) -> Collection[str]:
"""Returns list of required packages: ["mistralai >= 0.2.0, < 1"]."""
def _instrument(self, **kwargs):
"""
Enable instrumentation (internal method).
Args:
tracer_provider: OpenTelemetry tracer provider (optional)
event_logger_provider: OpenTelemetry event logger provider (optional)
"""
def _uninstrument(self, **kwargs):
"""Disable instrumentation (internal method)."""Data structures for representing AI model events in the new event-based telemetry approach.
@dataclass
class MessageEvent:
"""Represents an input event for the AI model."""
content: Any
role: str = "user"
tool_calls: Optional[List[ToolCall]] = None
@dataclass
class ChoiceEvent:
"""Represents a completion event for the AI model."""
index: int
message: CompletionMessage
finish_reason: str = "unknown"
tool_calls: Optional[List[ToolCall]] = NoneHelper functions for controlling instrumentation behavior.
def should_send_prompts() -> bool:
"""
Determines if prompts should be logged based on TRACELOOP_TRACE_CONTENT
environment variable.
Returns:
bool: True if prompts should be sent (default), False otherwise
"""
def should_emit_events() -> bool:
"""
Checks if the instrumentation should emit events (non-legacy mode).
Returns:
bool: True if events should be emitted
"""
def dont_throw(func):
"""
Decorator that wraps functions to log exceptions instead of throwing them.
Args:
func: The function to wrap
Returns:
Wrapper function that catches and logs exceptions
"""Functions for emitting OpenTelemetry events in the new event-based approach.
def emit_event(event: Union[MessageEvent, ChoiceEvent], event_logger: Union[EventLogger, None]) -> None:
"""
Emit an event to the OpenTelemetry SDK.
Args:
event: The event to emit (MessageEvent or ChoiceEvent)
event_logger: The OpenTelemetry event logger
Returns:
None
"""
class Roles(Enum):
"""Enum of valid message roles."""
USER = "user"
ASSISTANT = "assistant"
SYSTEM = "system"
TOOL = "tool"
VALID_MESSAGE_ROLES = {"user", "assistant", "system", "tool"}
"""Set of valid roles for naming message events."""
EVENT_ATTRIBUTES = {"gen_ai.system": "mistral_ai"}
"""Default attributes to be used for events."""Configuration options for the instrumentation.
class Config:
"""Configuration class for the instrumentation."""
exception_logger = None # Custom exception logger
use_legacy_attributes = True # Whether to use legacy attributesclass _FunctionToolCall(TypedDict):
"""Internal type for function tool call details."""
function_name: str
arguments: Optional[dict[str, Any]]
class ToolCall(TypedDict):
"""Represents a tool call in the AI model."""
id: str
function: _FunctionToolCall
type: Literal["function"]
class CompletionMessage(TypedDict):
"""Represents a message in the AI model."""
content: Any
role: strTRACELOOP_TRACE_CONTENT: Controls whether prompts and completions are logged (default: "true"). Set to "false" to disable content logging for privacy.TRACELOOP_TRACE_CONTENT = "TRACELOOP_TRACE_CONTENT"
"""Environment variable name for controlling content tracing."""
_instruments = ("mistralai >= 0.2.0, < 1",)
"""Required package dependencies tuple."""
WRAPPED_METHODS = [
{
"method": "chat",
"span_name": "mistralai.chat",
"streaming": False,
},
{
"method": "chat_stream",
"span_name": "mistralai.chat",
"streaming": True,
},
{
"method": "embeddings",
"span_name": "mistralai.embeddings",
"streaming": False,
},
]
"""Configuration for methods to be instrumented."""
VALID_MESSAGE_ROLES = {"user", "assistant", "system", "tool"}
"""Set of valid roles for naming message events."""
EVENT_ATTRIBUTES = {"gen_ai.system": "mistral_ai"}
"""Default attributes to be used for events."""Internal functions used by the instrumentation (advanced usage).
def _llm_request_type_by_method(method_name: str) -> str:
"""
Determine LLM request type based on method name.
Args:
method_name: Name of the method being instrumented
Returns:
str: LLM request type ("chat", "embedding", or "unknown")
"""
def _set_span_attribute(span, name: str, value):
"""
Set a span attribute if value is not None or empty.
Args:
span: OpenTelemetry span object
name: Attribute name
value: Attribute value
"""
def _with_tracer_wrapper(func):
"""
Helper decorator for providing tracer for wrapper functions.
Args:
func: Function to wrap with tracer
Returns:
Wrapped function with tracer access
"""The library automatically instruments these Mistral AI client methods:
MistralClient.chat: Synchronous chat completionsMistralClient.chat_stream: Synchronous streaming chat completionsMistralClient.embeddings: Synchronous embeddingsMistralAsyncClient.chat: Asynchronous chat completionsMistralAsyncClient.chat_stream: Asynchronous streaming chat completionsMistralAsyncClient.embeddings: Asynchronous embeddingsBy default, this instrumentation logs prompts, completions, and embeddings to span attributes. For privacy reasons or to reduce trace size, disable content logging:
export TRACELOOP_TRACE_CONTENT=false__version__ = "0.46.2"