OpenInference instrumentation utilities for tracking application metadata such as sessions, users, and custom metadata using Python context managers
—
Helper functions and utilities for JSON serialization, span context capture, and project management in OpenInference instrumentation.
Safe JSON serialization utility that handles edge cases and ensures non-ASCII characters are preserved.
def safe_json_dumps(obj: Any, **kwargs: Any) -> str:
"""
A convenience wrapper around json.dumps that ensures any object can
be safely encoded without a TypeError and that non-ASCII Unicode
characters are not escaped.
Args:
obj: Object to serialize to JSON
**kwargs: Additional keyword arguments passed to json.dumps
Returns:
str: JSON string representation
"""Usage Example:
from openinference.instrumentation import safe_json_dumps
from datetime import datetime
# Handles complex objects safely
data = {
"text": "Hello 世界", # Unicode characters preserved
"timestamp": datetime.now(), # Converted to string
"numbers": [1, 2.5, float('inf')], # Special values handled
"nested": {"key": "value"}
}
json_str = safe_json_dumps(data)
print(json_str)
# Output: {"text": "Hello 世界", "timestamp": "2024-01-01 10:00:00.123456", ...}
# Works with custom kwargs
compact_json = safe_json_dumps(data, separators=(',', ':'))
pretty_json = safe_json_dumps(data, indent=2)Context manager for capturing OpenInference span contexts, useful for annotation and evaluation workflows.
class capture_span_context:
"""
Context manager for capturing OpenInference span context.
Useful for getting span IDs for annotations, evaluations, or feedback.
"""
def __init__(self) -> None: ...
def __enter__(self) -> "capture_span_context": ...
def __exit__(
self,
_exc_type: Optional[Type[BaseException]],
_exc_value: Optional[BaseException],
_traceback: Optional[TracebackType],
) -> None: ...
def get_first_span_id(self) -> Optional[str]:
"""
Returns the first captured span ID, or None if no spans were captured.
This can be useful if the first span is the one that you want to annotate or evaluate.
Returns:
Optional[str]: First span ID as hex string, or None
"""
def get_last_span_id(self) -> Optional[str]:
"""
Returns the last captured span ID, or None if no spans were captured.
This can be useful if the last span is the one that you want to annotate or evaluate.
Returns:
Optional[str]: Last span ID as hex string, or None
"""
def get_span_contexts(self) -> Sequence[SpanContext]:
"""
Returns a sequence of all captured span contexts.
Returns:
Sequence[SpanContext]: All captured span contexts
"""Usage Example:
from openinference.instrumentation import capture_span_context
import openai
# Capture spans from LLM operations
with capture_span_context() as capture:
response = openai.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Hello!"}]
)
# Get the span ID for annotation
span_id = capture.get_last_span_id()
if span_id:
# Use span ID with Phoenix or other annotation systems
phoenix_client.annotations.add_span_annotation(
span_id=span_id,
annotation_name="feedback",
score=0.9,
label="helpful"
)
# Capture multiple spans
with capture_span_context() as capture:
# Multiple operations
embedding = openai.embeddings.create(...)
completion = openai.chat.completions.create(...)
# Get all span contexts
all_contexts = capture.get_span_contexts()
print(f"Captured {len(all_contexts)} spans")
# Get first and last
first_span = capture.get_first_span_id()
last_span = capture.get_last_span_id()Context manager for dynamically changing the project associated with spans, intended for notebook environments.
class dangerously_using_project:
"""
A context manager that switches the project for all spans created within the context.
This is intended for use in notebook environments where it's useful to be able to change the
project associated with spans on the fly.
Note: This should not be used in production environments or complex OpenTelemetry setups.
As dynamically modifying span resources in this way can lead to unexpected behavior.
Args:
project_name (str): The project name to associate with spans created within the context.
"""
def __init__(self, project_name: str) -> None: ...
def __enter__(self) -> None: ...
def __exit__(
self,
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[types.TracebackType],
) -> None: ...Usage Example:
from openinference.instrumentation import dangerously_using_project
# Normal spans go to default project
tracer.start_span("normal-operation")
# Temporarily switch project for experimentation
with dangerously_using_project("experiment-project"):
# All spans created here will be associated with "experiment-project"
tracer.start_span("experimental-operation")
llm_call()
# Back to default project
tracer.start_span("another-normal-operation")
# Useful in Jupyter notebooks for organizing experiments
with dangerously_using_project("notebook-session-1"):
# Experiment 1
results_1 = run_experiment()
with dangerously_using_project("notebook-session-2"):
# Experiment 2
results_2 = run_experiment()Additional helper functions used internally (exported for advanced use cases).
def get_span_id(span: Span) -> str:
"""
Extract span ID as hex string from OpenTelemetry span.
Args:
span (Span): OpenTelemetry span
Returns:
str: Span ID as hex string
"""
def get_trace_id(span: Span) -> str:
"""
Extract trace ID as hex string from OpenTelemetry span.
Args:
span (Span): OpenTelemetry span
Returns:
str: Trace ID as hex string
"""Usage Example:
from openinference.instrumentation.helpers import get_span_id, get_trace_id
from opentelemetry import trace
# Get current span IDs
current_span = trace.get_current_span()
if current_span:
span_id = get_span_id(current_span)
trace_id = get_trace_id(current_span)
print(f"Current span: {span_id} in trace: {trace_id}")The safe_json_dumps function provides several safety features:
The capture_span_context utility enables:
The dangerously_using_project context manager is useful for:
Warning: Only use dangerously_using_project in development or notebook environments. It modifies OpenTelemetry internals and can cause issues in production systems.
from openinference.instrumentation import capture_span_context
import phoenix
with capture_span_context() as capture:
# Your LLM operations
response = llm.generate("Hello world")
# Add human feedback
span_id = capture.get_last_span_id()
if span_id:
phoenix.Client().log_evaluations(
span_ids=[span_id],
evaluations=[
phoenix.Evaluation(
name="helpfulness",
score=0.8,
explanation="Response was helpful"
)
]
)import pytest
from openinference.instrumentation import capture_span_context
def test_span_creation():
with capture_span_context() as capture:
# Your code that should create spans
my_function_that_creates_spans()
# Verify spans were created
assert len(capture.get_span_contexts()) > 0
assert capture.get_first_span_id() is not NoneInstall with Tessl CLI
npx tessl i tessl/pypi-openinference-instrumentation