Comprehensive Python SDK for AI application observability and experimentation with OpenTelemetry-based tracing, automatic instrumentation, and dataset management.
Pre-built integrations for popular AI frameworks with automatic instrumentation and minimal configuration required. Enables seamless observability for existing AI applications.
Drop-in replacement for the OpenAI SDK with automatic Langfuse tracing for all API calls.
# Import replacements for automatic tracing
from langfuse.openai import OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI
from langfuse.openai import openai # Patched openai module
# All standard OpenAI classes with automatic tracing
class OpenAI:
"""Drop-in replacement for openai.OpenAI with automatic Langfuse tracing."""
class AsyncOpenAI:
"""Drop-in replacement for openai.AsyncOpenAI with automatic tracing."""
class AzureOpenAI:
"""Drop-in replacement for openai.AzureOpenAI with automatic tracing."""
class AsyncAzureOpenAI:
"""Drop-in replacement for openai.AsyncAzureOpenAI with automatic tracing."""Features:
Callback handler for comprehensive LangChain application tracing.
class CallbackHandler:
def __init__(self, *, public_key: str = None, secret_key: str = None,
host: str = None, tracing_enabled: bool = True,
environment: str = None, **kwargs):
"""Initialize Langfuse callback handler for LangChain.
Args:
public_key: Langfuse public key (optional if set via env)
secret_key: Langfuse secret key (optional if set via env)
host: Langfuse host URL
tracing_enabled: Enable/disable tracing
environment: Environment tag for traces
**kwargs: Additional configuration options
"""Features:
# Before (standard OpenAI)
# import openai
# client = openai.OpenAI(api_key="your-key")
# After (with Langfuse tracing)
from langfuse.openai import OpenAI
client = OpenAI(
api_key="your-openai-key"
# Langfuse configuration via environment variables:
# LANGFUSE_PUBLIC_KEY, LANGFUSE_SECRET_KEY, LANGFUSE_HOST
)
# All API calls automatically traced
response = client.chat.completions.create(
model="gpt-4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the capital of France?"}
],
temperature=0.7,
max_tokens=150
)
print(response.choices[0].message.content)
# Automatically creates:
# - Generation-type span with model="gpt-4"
# - Usage details (prompt_tokens, completion_tokens, total_tokens)
# - Model parameters (temperature=0.7, max_tokens=150)
# - Input (messages) and output (response)import asyncio
from langfuse.openai import AsyncOpenAI
async def main():
client = AsyncOpenAI(api_key="your-openai-key")
# Async calls automatically traced
response = await client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello!"}]
)
print(response.choices[0].message.content)
asyncio.run(main())from langfuse.openai import OpenAI
client = OpenAI(api_key="your-openai-key")
# Streaming responses automatically traced
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Tell me a story"}],
stream=True
)
full_response = ""
for chunk in stream:
if chunk.choices[0].delta.content is not None:
content = chunk.choices[0].delta.content
print(content, end="")
full_response += content
# Complete response automatically captured in tracefrom langfuse.openai import AzureOpenAI
client = AzureOpenAI(
api_key="your-azure-key",
api_version="2023-12-01-preview",
azure_endpoint="https://your-resource.openai.azure.com/"
)
# Works exactly like OpenAI with automatic tracing
response = client.chat.completions.create(
model="gpt-4", # Your deployment name
messages=[{"role": "user", "content": "Hello Azure!"}]
)from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langfuse.langchain import CallbackHandler
# Initialize Langfuse callback handler
langfuse_handler = CallbackHandler(
public_key="your-public-key",
secret_key="your-secret-key"
)
# Create LangChain components
llm = OpenAI(temperature=0.7)
prompt = PromptTemplate(
input_variables=["topic"],
template="Write a short poem about {topic}"
)
chain = LLMChain(llm=llm, prompt=prompt)
# Run with Langfuse tracing
result = chain.run(
topic="artificial intelligence",
callbacks=[langfuse_handler]
)
print(result)
# Automatically creates:
# - Chain-type span for the LLMChain
# - Generation-type span for the OpenAI LLM call
# - Proper parent-child relationships
# - Input/output capture at each levelfrom langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.llms import OpenAI
from langfuse.langchain import CallbackHandler
# Define tools
def search_tool(query):
# Your search implementation
return f"Search results for: {query}"
def calculator_tool(expression):
# Your calculator implementation
return str(eval(expression))
tools = [
Tool(
name="Search",
func=search_tool,
description="Useful for searching information"
),
Tool(
name="Calculator",
func=calculator_tool,
description="Useful for mathematical calculations"
)
]
# Initialize agent with Langfuse callback
llm = OpenAI(temperature=0)
agent = initialize_agent(
tools,
llm,
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True
)
langfuse_handler = CallbackHandler()
# Run agent with automatic tracing
response = agent.run(
"What is the square root of 144?",
callbacks=[langfuse_handler]
)
# Automatically creates:
# - Agent-type span for the overall agent execution
# - Tool-type spans for each tool call
# - Generation-type spans for LLM reasoning steps
# - Proper hierarchical trace structurefrom langfuse import Langfuse
from langfuse.openai import OpenAI
from langfuse.langchain import CallbackHandler
# Initialize Langfuse client for manual tracing
langfuse = Langfuse()
# OpenAI client with automatic tracing
openai_client = OpenAI(api_key="your-key")
# LangChain callback handler
langfuse_handler = CallbackHandler()
@langfuse.observe(as_type="chain")
def complex_workflow(user_query):
"""Workflow combining manual tracing, OpenAI, and LangChain."""
# Step 1: Direct OpenAI call (automatically traced)
initial_response = openai_client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": f"Analyze this query: {user_query}"}]
)
analysis = initial_response.choices[0].message.content
# Step 2: LangChain processing (automatically traced)
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI
llm = OpenAI()
prompt = PromptTemplate(
input_variables=["analysis", "query"],
template="Based on this analysis: {analysis}\n\nProvide a detailed response to: {query}"
)
chain = LLMChain(llm=llm, prompt=prompt)
detailed_response = chain.run(
analysis=analysis,
query=user_query,
callbacks=[langfuse_handler]
)
# Step 3: Manual span for custom processing
with langfuse.start_as_current_observation(name="post-processing", as_type="span") as span:
final_result = post_process_response(detailed_response)
span.update(output=final_result)
return final_result
# Usage creates comprehensive trace with all integration types
result = complex_workflow("Explain quantum computing")import os
from langfuse.openai import OpenAI
from langfuse.langchain import CallbackHandler
# Configure via environment variables
os.environ["LANGFUSE_PUBLIC_KEY"] = "your-public-key"
os.environ["LANGFUSE_SECRET_KEY"] = "your-secret-key"
os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com"
# OpenAI client automatically picks up Langfuse config
openai_client = OpenAI(api_key="your-openai-key")
# LangChain handler also picks up config
langfuse_handler = CallbackHandler()
# Or configure explicitly
langfuse_handler = CallbackHandler(
public_key="explicit-key",
secret_key="explicit-secret",
host="https://your-langfuse-instance.com",
environment="production",
tracing_enabled=True
)from langfuse import Langfuse, observe
from langfuse.openai import OpenAI
# Custom wrapper for additional observability
class ObservabilityWrapper:
def __init__(self):
self.langfuse = Langfuse()
self.openai_client = OpenAI()
@observe(as_type="chain")
def traced_workflow(self, input_data):
"""Workflow with comprehensive tracing."""
# Custom metrics
start_time = time.time()
try:
# OpenAI call (automatically traced as generation)
response = self.openai_client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": input_data}]
)
result = response.choices[0].message.content
# Add custom scoring
current_span = self.langfuse.get_current_observation()
if current_span:
execution_time = time.time() - start_time
current_span.score(
name="execution_time",
value=execution_time,
comment=f"Execution took {execution_time:.2f} seconds"
)
# Quality assessment
quality_score = assess_quality(result)
current_span.score(
name="quality",
value=quality_score,
comment="Automated quality assessment"
)
return result
except Exception as e:
# Error automatically captured by @observe decorator
raise
# Usage
wrapper = ObservabilityWrapper()
result = wrapper.traced_workflow("What is machine learning?")# Before: Using standard libraries without tracing
"""
import openai
from langchain.chains import LLMChain
client = openai.OpenAI()
response = client.chat.completions.create(...)
"""
# After: Drop-in replacement with automatic tracing
from langfuse.openai import OpenAI # Just change the import
from langchain.chains import LLMChain
from langfuse.langchain import CallbackHandler
client = OpenAI() # Everything else stays the same
response = client.chat.completions.create(...)
# Add LangChain tracing with callback
langfuse_handler = CallbackHandler()
chain = LLMChain(...)
result = chain.run(..., callbacks=[langfuse_handler]) # Just add callbacks parameter# 1. Environment-based configuration
import os
class LangfuseConfig:
"""Centralized configuration management."""
@classmethod
def setup_environment(cls):
required_vars = [
"LANGFUSE_PUBLIC_KEY",
"LANGFUSE_SECRET_KEY"
]
for var in required_vars:
if not os.getenv(var):
raise ValueError(f"Missing required environment variable: {var}")
@classmethod
def get_callback_handler(cls):
cls.setup_environment()
return CallbackHandler(
environment=os.getenv("ENVIRONMENT", "development")
)
# 2. Conditional tracing for different environments
def get_openai_client():
if os.getenv("ENVIRONMENT") == "production":
from langfuse.openai import OpenAI
else:
import openai
OpenAI = openai.OpenAI
return OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
# 3. Integration testing
def test_integrations():
"""Test that integrations work correctly."""
from langfuse.openai import OpenAI
from langfuse.langchain import CallbackHandler
# Test OpenAI integration
client = OpenAI(api_key="test-key")
assert hasattr(client, 'chat')
# Test LangChain integration
handler = CallbackHandler()
assert callable(handler)
print("All integrations working correctly")
test_integrations()Install with Tessl CLI
npx tessl i tessl/pypi-langfuse