OpenInference instrumentation utilities for tracking application metadata such as sessions, users, and custom metadata using Python context managers
—
Complete TypedDict definitions for all data structures used in OpenInference tracing, ensuring type safety and proper structure validation across the API.
Type definitions for LLM message structures supporting text, images, and tool calls.
class Message(TypedDict, total=False):
"""
Message structure for LLM interactions.
Fields:
role (str): Message role ("user", "assistant", "system", etc.)
content (str): Text content of the message
contents (Sequence[MessageContent]): Structured content blocks
tool_call_id (str): ID of tool call this message responds to
tool_calls (Sequence[ToolCall]): Tool calls made in this message
"""
role: str
content: str
contents: Sequence[MessageContent]
tool_call_id: str
tool_calls: Sequence[ToolCall]
MessageContent = Union[TextMessageContent, ImageMessageContent]
"""Union type for different message content types."""
class TextMessageContent(TypedDict):
"""Text content block in a message."""
type: Literal["text"]
text: str
class ImageMessageContent(TypedDict):
"""Image content block in a message."""
type: Literal["image"]
image: Image
class Image(TypedDict, total=False):
"""Image reference structure."""
url: strUsage Example:
# Text message
text_message: Message = {
"role": "user",
"content": "Hello, how are you?"
}
# Message with structured content
structured_message: Message = {
"role": "user",
"contents": [
{"type": "text", "text": "What do you see in this image?"},
{"type": "image", "image": {"url": "data:image/jpeg;base64,..."}}
]
}
# Assistant message with tool calls
assistant_message: Message = {
"role": "assistant",
"tool_calls": [
{
"id": "call_123",
"function": {
"name": "get_weather",
"arguments": '{"city": "San Francisco"}'
}
}
]
}Type definitions for tool calling and function execution.
class Tool(TypedDict, total=False):
"""
Tool definition structure.
Fields:
json_schema (Required[Union[str, Dict[str, Any]]]): JSON schema for tool parameters
"""
json_schema: Required[Union[str, Dict[str, Any]]]
class ToolCall(TypedDict, total=False):
"""
Tool call structure in messages.
Fields:
id (str): Unique identifier for the tool call
function (ToolCallFunction): Function call details
"""
id: str
function: ToolCallFunction
class ToolCallFunction(TypedDict, total=False):
"""
Function details within a tool call.
Fields:
name (str): Function name to call
arguments (Union[str, Dict[str, Any]]): Function arguments (JSON string or dict)
"""
name: str
arguments: Union[str, Dict[str, Any]]Usage Example:
# Tool definition
calculator_tool: Tool = {
"json_schema": {
"type": "object",
"properties": {
"operation": {"type": "string", "enum": ["+", "-", "*", "/"]},
"a": {"type": "number"},
"b": {"type": "number"}
},
"required": ["operation", "a", "b"]
}
}
# Tool call in message
tool_call: ToolCall = {
"id": "call_456",
"function": {
"name": "calculator",
"arguments": {"operation": "+", "a": 10, "b": 5}
}
}Type definitions for LLM token usage tracking.
class TokenCount(TypedDict, total=False):
"""
Token usage information.
Fields:
prompt (int): Number of tokens in the prompt
completion (int): Number of tokens in the completion
total (int): Total number of tokens used
prompt_details (PromptDetails): Detailed prompt token breakdown
"""
prompt: int
completion: int
total: int
prompt_details: PromptDetails
class PromptDetails(TypedDict, total=False):
"""
Detailed prompt token usage breakdown.
Fields:
audio (int): Tokens used for audio input
cache_read (int): Tokens read from cache
cache_write (int): Tokens written to cache
"""
audio: int
cache_read: int
cache_write: intUsage Example:
# Simple token count
simple_tokens: TokenCount = {
"prompt": 50,
"completion": 25,
"total": 75
}
# Detailed token count with cache info
detailed_tokens: TokenCount = {
"prompt": 100,
"completion": 50,
"total": 150,
"prompt_details": {
"cache_read": 30,
"cache_write": 10,
"audio": 5
}
}Type definitions for retrieval and reranking operations.
class Document(TypedDict, total=False):
"""
Document structure for retrieval systems.
Fields:
content (str): Document text content
id (Union[str, int]): Document identifier
metadata (Union[str, Dict[str, Any]]): Document metadata (dict or JSON string)
score (float): Relevance or similarity score
"""
content: str
id: Union[str, int]
metadata: Union[str, Dict[str, Any]]
score: floatUsage Example:
# Document with metadata dict
doc_with_metadata: Document = {
"content": "This is the document content...",
"id": "doc-123",
"metadata": {
"source": "database",
"author": "John Doe",
"timestamp": "2024-01-01T10:00:00Z"
},
"score": 0.95
}
# Document with JSON metadata string
doc_with_json_metadata: Document = {
"content": "Another document...",
"id": 456,
"metadata": '{"category": "technical", "language": "en"}',
"score": 0.87
}Type definitions for embedding operations.
class Embedding(TypedDict, total=False):
"""
Embedding structure.
Fields:
text (str): Text that was embedded
vector (List[float]): Embedding vector values
"""
text: str
vector: List[float]Usage Example:
# Text embedding
text_embedding: Embedding = {
"text": "Hello world",
"vector": [0.1, 0.2, 0.3, -0.1, 0.5]
}
# List of embeddings
embeddings: List[Embedding] = [
{"text": "First document", "vector": [0.1, 0.2, 0.3]},
{"text": "Second document", "vector": [0.4, 0.5, 0.6]}
]Type aliases for OpenInference-specific enumerations and values.
OpenInferenceSpanKind = Union[
Literal[
"agent",
"chain",
"embedding",
"evaluator",
"guardrail",
"llm",
"reranker",
"retriever",
"tool",
"unknown",
],
OpenInferenceSpanKindValues,
]
"""OpenInference span kind type allowing string literals or enum values."""
OpenInferenceMimeType = Union[
Literal["application/json", "text/plain"],
OpenInferenceMimeTypeValues,
]
"""MIME type for input/output values."""
OpenInferenceLLMProvider = Union[str, OpenInferenceLLMProviderValues]
"""LLM provider identifier (string or enum value)."""
OpenInferenceLLMSystem = Union[str, OpenInferenceLLMSystemValues]
"""LLM system identifier (string or enum value)."""Usage Example:
from openinference.semconv.trace import OpenInferenceSpanKindValues
# Using literal strings (must be lowercase)
span_kind: OpenInferenceSpanKind = "llm"
# Using enum values
span_kind_enum: OpenInferenceSpanKind = OpenInferenceSpanKindValues.LLM
# MIME types
mime_json: OpenInferenceMimeType = "application/json"
mime_text: OpenInferenceMimeType = "text/plain"
# Provider names
provider: OpenInferenceLLMProvider = "openai"
system: OpenInferenceLLMSystem = "gpt"total=False: All fields are optionaltotal=True (default): All fields are required unless marked OptionalRequired[]: Mark specific fields as required in partial TypedDictsMany types use Union to support multiple formats:
Union[str, int] for IDs that can be strings or numbersUnion[str, Dict[str, Any]] for metadata that can be pre-serialized JSON or dictsUnion[TextMessageContent, ImageMessageContent] for different content typesUse Sequence instead of List for immutable collections:
All types are designed to work well with mypy and other type checkers:
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from openinference.instrumentation import Message, TokenCount
def process_llm_response(messages: list[Message], tokens: TokenCount) -> None:
# Type checker will validate structure
for message in messages:
if "role" in message:
print(f"Role: {message['role']}")
if "content" in message:
print(f"Content: {message['content']}")
print(f"Used {tokens.get('total', 0)} tokens")Install with Tessl CLI
npx tessl i tessl/pypi-openinference-instrumentation