Comprehensive Python SDK for AI application observability and experimentation with OpenTelemetry-based tracing, automatic instrumentation, and dataset management.
Template management system supporting both text and chat-based prompts with variable interpolation, version control, and LangChain integration. Enables centralized prompt management and experimentation.
Base type for prompt template clients supporting both text and chat formats.
# Base type alias
PromptClient = Union[TextPromptClient, ChatPromptClient]Manages text-based prompt templates with variable interpolation capabilities.
class TextPromptClient:
def __init__(self, name: str, version: int, config: Dict[str, Any],
labels: List[str], tags: List[str], commit_message: str = None,
prompt: str = None):
"""Initialize text prompt client."""
def compile(self, **kwargs) -> str:
"""Compile prompt template with provided variables.
Args:
**kwargs: Variable values for template interpolation
Returns:
Compiled prompt string with variables replaced
Raises:
ValueError: If required variables are missing
"""
@property
def variables(self) -> List[str]:
"""Get list of variable names from template.
Returns:
List of variable names found in the prompt template
"""
def get_langchain_prompt(self) -> Any:
"""Convert prompt to LangChain PromptTemplate format.
Returns:
LangChain PromptTemplate instance
"""
# Attributes
name: str
version: int
config: Dict[str, Any]
labels: List[str]
tags: List[str]
commit_message: Optional[str]
prompt: strManages chat-based prompt templates supporting message roles and placeholders.
class ChatPromptClient:
def __init__(self, name: str, version: int, config: Dict[str, Any],
labels: List[str], tags: List[str], commit_message: str = None,
prompt: List[ChatMessageWithPlaceholdersDict] = None):
"""Initialize chat prompt client."""
def compile(self, **kwargs) -> List[Dict[str, str]]:
"""Compile chat prompt with variables and resolve placeholders.
Args:
**kwargs: Variable values and placeholder content
Returns:
List of compiled chat messages with roles and content
Raises:
ValueError: If required variables or placeholders are missing
"""
@property
def variables(self) -> List[str]:
"""Get list of variable names from chat messages.
Returns:
List of variable names found across all chat messages
"""
def get_langchain_prompt(self) -> Any:
"""Convert to LangChain ChatPromptTemplate format.
Returns:
LangChain ChatPromptTemplate instance
"""
# Attributes
name: str
version: int
config: Dict[str, Any]
labels: List[str]
tags: List[str]
commit_message: Optional[str]
prompt: List[ChatMessageWithPlaceholdersDict]Core methods for managing prompts through the Langfuse client.
class Langfuse:
def get_prompt(self, name: str, *, version: int = None, label: str = None,
fallback: Union[str, List[ChatMessageDict]] = None,
max_retries: int = 2, fetch_timeout_seconds: int = 3,
cache_ttl_seconds: int = 60) -> Union[TextPromptClient, ChatPromptClient]:
"""Fetch prompt template by name and version/label.
Args:
name: Prompt name
version: Specific version to fetch (if not provided, gets latest)
label: Label to fetch (alternative to version)
fallback: Fallback prompt if fetch fails
max_retries: Maximum retry attempts
fetch_timeout_seconds: Request timeout
cache_ttl_seconds: Cache time-to-live
Returns:
TextPromptClient or ChatPromptClient based on prompt type
Raises:
Exception: If prompt not found and no fallback provided
"""
def create_prompt(self, *, name: str, prompt: Union[str, List[ChatMessageDict]],
config: Dict[str, Any] = None, labels: List[str] = None,
tags: List[str] = None) -> Union[TextPromptClient, ChatPromptClient]:
"""Create new prompt template.
Args:
name: Prompt name (must be unique for first version)
prompt: Prompt content (string for text, list of messages for chat)
config: Configuration metadata
labels: Labels for categorization
tags: Tags for organization
Returns:
Created prompt client (TextPromptClient or ChatPromptClient)
"""
def clear_prompt_cache(self) -> None:
"""Clear local prompt cache to force fresh fetches."""Supporting types for chat-based prompts with role and placeholder support.
# Basic chat message structure
ChatMessageDict = TypedDict('ChatMessageDict', {
'role': str, # "system", "user", "assistant", etc.
'content': str # Message content
})
# Chat message with placeholder support
ChatMessagePlaceholderDict = TypedDict('ChatMessagePlaceholderDict', {
'role': str,
'content': List[Union[str, Dict[str, str]]] # Mix of text and placeholders
})
# Union type for message handling
ChatMessageWithPlaceholdersDict = Union[ChatMessageDict, ChatMessagePlaceholderDict]from langfuse import Langfuse
langfuse = Langfuse()
# Create a text prompt template
text_prompt = langfuse.create_prompt(
name="qa-prompt",
prompt="Answer the following question: {question}\n\nContext: {context}",
labels=["qa", "production"],
tags=["v1", "context-aware"]
)
print(f"Prompt: {text_prompt.name}")
print(f"Version: {text_prompt.version}")
print(f"Variables: {text_prompt.variables}") # ["question", "context"]
# Use the prompt
compiled = text_prompt.compile(
question="What is the capital of France?",
context="France is a country in Western Europe..."
)
print(compiled)
# Output: "Answer the following question: What is the capital of France?\n\nContext: France is a country in Western Europe..."# Create a chat prompt template
chat_messages = [
{"role": "system", "content": "You are a helpful assistant specialized in {domain}."},
{"role": "user", "content": "Question: {question}"},
{"role": "assistant", "content": "I'll help you with that {domain} question."}
]
chat_prompt = langfuse.create_prompt(
name="assistant-chat",
prompt=chat_messages,
config={"temperature": 0.7, "max_tokens": 500},
labels=["assistant", "chat"],
tags=["conversational"]
)
# Use the chat prompt
compiled_messages = chat_prompt.compile(
domain="mathematics",
question="What is the derivative of x^2?"
)
for message in compiled_messages:
print(f"{message['role']}: {message['content']}")
# system: You are a helpful assistant specialized in mathematics.
# user: Question: What is the derivative of x^2?
# assistant: I'll help you with that mathematics question.# Get latest version of a prompt
prompt = langfuse.get_prompt("qa-prompt")
# Get specific version
prompt_v2 = langfuse.get_prompt("qa-prompt", version=2)
# Get by label
production_prompt = langfuse.get_prompt("qa-prompt", label="production")
# With fallback for reliability
prompt = langfuse.get_prompt(
name="qa-prompt",
fallback="Answer this question: {question}"
)
# Use retrieved prompt
response = prompt.compile(question="What is AI?")# Convert Langfuse prompts to LangChain format
langfuse_prompt = langfuse.get_prompt("qa-prompt")
langchain_prompt = langfuse_prompt.get_langchain_prompt()
# Use with LangChain
from langchain.chains import LLMChain
from langchain.llms import OpenAI
llm = OpenAI()
chain = LLMChain(llm=llm, prompt=langchain_prompt)
result = chain.run(
question="What is machine learning?",
context="Machine learning is a subset of AI..."
)# Advanced chat template with placeholders
complex_chat = [
{
"role": "system",
"content": [
"You are an expert in ",
{"type": "placeholder", "key": "expertise_area"},
". Use the following context: ",
{"type": "placeholder", "key": "context"}
]
},
{
"role": "user",
"content": "{user_question}"
}
]
chat_prompt = langfuse.create_prompt(
name="complex-assistant",
prompt=complex_chat,
config={"model": "gpt-4", "temperature": 0.3}
)
# Compile with placeholders and variables
compiled = chat_prompt.compile(
expertise_area="quantum physics",
context="Recent developments in quantum computing...",
user_question="How do quantum computers work?"
)# Create initial version
v1_prompt = langfuse.create_prompt(
name="summarizer",
prompt="Summarize the following text: {text}",
labels=["v1"]
)
# Create improved version
v2_prompt = langfuse.create_prompt(
name="summarizer", # Same name creates new version
prompt="Provide a concise summary of the following text, highlighting key points: {text}",
labels=["v2", "improved"],
tags=["production"]
)
# Compare versions
v1 = langfuse.get_prompt("summarizer", version=1)
v2 = langfuse.get_prompt("summarizer", version=2)
print(f"V1 variables: {v1.variables}")
print(f"V2 variables: {v2.variables}")
# Test both versions
test_text = "Long article about climate change..."
v1_result = v1.compile(text=test_text)
v2_result = v2.compile(text=test_text)
print("V1:", v1_result)
print("V2:", v2_result)def experiment_with_prompts():
"""Compare different prompt versions in experiments."""
from langfuse import Evaluation
# Define task functions for different prompt versions
def task_v1(*, item, **kwargs):
prompt = langfuse.get_prompt("summarizer", version=1)
compiled = prompt.compile(text=item["input"])
return llm_generate(compiled)
def task_v2(*, item, **kwargs):
prompt = langfuse.get_prompt("summarizer", version=2)
compiled = prompt.compile(text=item["input"])
return llm_generate(compiled)
# Evaluation data
test_data = [
{"input": "Long text to summarize...", "expected_output": "Expected summary..."}
]
# Run experiments with both versions
result_v1 = langfuse.run_experiment(
name="Summarizer V1 Test",
data=test_data,
task=task_v1,
evaluators=[summary_quality_evaluator]
)
result_v2 = langfuse.run_experiment(
name="Summarizer V2 Test",
data=test_data,
task=task_v2,
evaluators=[summary_quality_evaluator]
)
return result_v1, result_v2
experiment_with_prompts()class PromptManager:
"""Utility class for managing prompts with caching and fallbacks."""
def __init__(self, langfuse_client):
self.langfuse = langfuse_client
self.cache = {}
def get_prompt_with_cache(self, name, version=None, cache_duration=300):
"""Get prompt with local caching."""
cache_key = f"{name}:{version or 'latest'}"
if cache_key in self.cache:
prompt, timestamp = self.cache[cache_key]
if time.time() - timestamp < cache_duration:
return prompt
# Fetch fresh prompt
try:
prompt = self.langfuse.get_prompt(name, version=version)
self.cache[cache_key] = (prompt, time.time())
return prompt
except Exception as e:
# Return cached version if available, even if expired
if cache_key in self.cache:
return self.cache[cache_key][0]
raise e
def compile_with_defaults(self, prompt_name, variables, defaults=None):
"""Compile prompt with default variable values."""
prompt = self.get_prompt_with_cache(prompt_name)
# Merge variables with defaults
all_vars = (defaults or {}).copy()
all_vars.update(variables)
# Check for missing variables
missing = set(prompt.variables) - set(all_vars.keys())
if missing:
raise ValueError(f"Missing variables: {missing}")
return prompt.compile(**all_vars)
# Usage
prompt_manager = PromptManager(langfuse)
response = prompt_manager.compile_with_defaults(
prompt_name="qa-prompt",
variables={"question": "What is AI?"},
defaults={"context": "General knowledge base"}
)def get_appropriate_prompt(user_type, domain):
"""Select prompt based on user context."""
prompt_mapping = {
("expert", "technical"): "expert-technical-prompt",
("beginner", "technical"): "beginner-technical-prompt",
("expert", "general"): "expert-general-prompt",
("beginner", "general"): "beginner-general-prompt"
}
prompt_name = prompt_mapping.get((user_type, domain), "default-prompt")
try:
return langfuse.get_prompt(prompt_name, label="production")
except:
# Fallback to default
return langfuse.get_prompt("default-prompt")
# Usage in application
def generate_response(question, user_profile):
prompt = get_appropriate_prompt(
user_profile["experience_level"],
user_profile["domain"]
)
compiled = prompt.compile(
question=question,
user_level=user_profile["experience_level"]
)
return llm_generate(compiled)Install with Tessl CLI
npx tessl i tessl/pypi-langfuse