tessl install tessl/pypi-langsmith@0.6.1Python SDK for LangSmith Observability and Evaluation Platform
Decorators and context managers for automatic tracing of functions and code blocks. LangSmith's tracing system supports both synchronous and asynchronous code, with full support for generators and streaming.
Core Functions:
Patterns:
→ Manual Tracing for fine-grained control | → Configuration for global settings
Use @traceable decorator (this page) when:
Use manual tracing (manual-tracing.md) when:
The @traceable decorator automatically traces function execution, logging inputs, outputs, timing, and errors.
def traceable(
run_type: Literal["chain", "llm", "tool", "retriever", "prompt", "embedding", "parser"] = "chain",
*,
name: Optional[str] = None,
metadata: Optional[Mapping[str, Any]] = None,
tags: Optional[list[str]] = None,
client: Optional[Client] = None,
reduce_fn: Optional[Callable[[Sequence], Union[dict, str]]] = None,
project_name: Optional[str] = None,
process_inputs: Optional[Callable[[dict], dict]] = None,
process_outputs: Optional[Callable[..., dict]] = None,
process_chunk: Optional[Callable] = None,
_invocation_params_fn: Optional[Callable[[dict], dict]] = None,
dangerously_allow_filesystem: bool = False,
enabled: Optional[bool] = None,
) -> Callable:
"""
Decorator to trace a function with LangSmith.
Automatically creates a run/span for each function call and logs inputs,
outputs, and errors. Works with both sync and async functions, and supports
generators and async generators.
Parameters:
- run_type: Type of run (e.g., "chain", "llm", "tool", "retriever", "prompt")
- name: Optional name for the run (defaults to function name)
- metadata: Optional metadata to attach to the run
- tags: Optional list of tags
- client: Optional LangSmith client for custom settings
- reduce_fn: Function to reduce generator output (for functions returning generators)
- project_name: Optional project name to log run to
- process_inputs: Custom function to serialize/process inputs before logging
- process_outputs: Custom function to serialize/process outputs before logging
- process_chunk: Custom function to process streaming chunks
- _invocation_params_fn: Function to extract invocation parameters
- dangerously_allow_filesystem: Allow filesystem access for attachments
- enabled: Whether tracing is enabled (overrides global setting)
Returns:
Decorated function that logs traces
"""from langsmith import traceable
@traceable
def process_data(data: dict) -> dict:
"""Process some data."""
result = transform(data)
return {"output": result}
# Call the function - automatically traced
output = process_data({"input": "test"})@traceable(
run_type="llm",
name="GPT-4 Call",
tags=["production", "v1"],
metadata={"model": "gpt-4"}
)
def call_llm(prompt: str) -> str:
"""Call an LLM."""
return llm.invoke(prompt)@traceable
async def async_process(data: dict) -> dict:
"""Process data asynchronously."""
result = await async_transform(data)
return result
# Use with async/await
result = await async_process({"input": "test"})For generators, use reduce_fn to capture the complete output:
@traceable(reduce_fn=lambda outputs: {"results": list(outputs)})
def stream_results(query: str):
"""Stream results as they're generated."""
for item in generate_items(query):
yield item
# Use the generator
for result in stream_results("test query"):
process(result)
# Full output is logged to LangSmith@traceable
async def async_stream_results(query: str):
"""Stream results asynchronously."""
async for item in async_generate_items(query):
yield item
# Use the async generator
async for result in async_stream_results("query"):
await process(result)Sanitize or transform inputs and outputs before logging:
@traceable(
process_inputs=lambda inputs: {k: v for k, v in inputs.items() if k != "api_key"},
process_outputs=lambda output: {"summary": str(output)[:100]}
)
def sensitive_operation(api_key: str, data: dict) -> str:
"""Operation with sensitive data."""
return process_with_key(api_key, data)@traceable(enabled=False)
def sometimes_traced(data: dict) -> dict:
"""Function that can be disabled."""
return process(data)
# Or conditionally enable
import os
@traceable(enabled=os.getenv("ENV") == "production")
def prod_only_traced(data: dict) -> dict:
"""Only traced in production."""
return process(data)Context manager to temporarily set tracing context for a block of code.
@contextmanager
def tracing_context(
*,
project_name: Optional[str] = None,
tags: Optional[list[str]] = None,
metadata: Optional[dict[str, Any]] = None,
parent: Optional[Union[RunTree, Mapping, str, Literal[False]]] = None,
enabled: Optional[Union[bool, Literal["local"]]] = None,
client: Optional[Client] = None,
replicas: Optional[Sequence[WriteReplica]] = None,
distributed_parent_id: Optional[str] = None,
) -> Generator[None, None, None]:
"""
Context manager to temporarily set tracing context.
All traced operations within this context will inherit the specified settings.
Parameters:
- project_name: Name of the project to log the run to
- tags: Tags to add to all runs in this context
- metadata: Metadata to add to all runs in this context
- parent: Parent run (RunTree, request headers, or dotted order string); set to False to break parent chain
- enabled: Whether tracing is enabled (True/False/"local")
- client: Client to use for logging
- replicas: Sequence of WriteReplica dictionaries to send runs to
- distributed_parent_id: Distributed parent ID for distributed tracing
"""from langsmith import tracing_context, traceable
@traceable
def my_function():
return "result"
# All traces in this block go to "my-project"
with tracing_context(project_name="my-project"):
my_function()with tracing_context(
tags=["production", "v2"],
metadata={"environment": "prod", "region": "us-west"}
):
my_function() # Inherits tags and metadata# Disable completely
with tracing_context(enabled=False):
my_function() # Not traced
# Local-only tracing (no network calls)
with tracing_context(enabled="local"):
my_function() # Traced locally but not sentStart a new root trace instead of continuing the current trace tree:
with tracing_context(parent=False):
my_function() # Will be a root trace, not a childfrom langsmith import Client
custom_client = Client(api_key="custom-key")
with tracing_context(client=custom_client):
my_function() # Uses custom clientInner contexts override outer ones:
with tracing_context(project_name="project-a", tags=["tag-a"]):
with tracing_context(tags=["tag-b"]):
my_function() # Logs to "project-a" with tag "tag-b"Get the current tracing context as a dictionary.
def get_tracing_context(
context: Optional[contextvars.Context] = None
) -> dict[str, Any]:
"""
Get the current tracing context.
Parameters:
- context: Optional specific context to read from
Returns:
Dictionary with keys: parent, project_name, tags, metadata, enabled,
client, replicas, distributed_parent_id
"""from langsmith import get_tracing_context, tracing_context
# Get current context
ctx = get_tracing_context()
print(ctx["project_name"]) # Current project
print(ctx["tags"]) # Current tags
print(ctx["enabled"]) # Whether tracing is enabledGet the current run tree from the context.
def get_current_run_tree() -> Optional[RunTree]:
"""
Get the current run tree from the context.
Returns the current RunTree if inside a traced context, None otherwise.
Useful for accessing run metadata or manually manipulating the current run.
Returns:
The current RunTree if inside a traced context, None otherwise
"""from langsmith import traceable, get_current_run_tree
@traceable
def my_function():
# Get current run
run = get_current_run_tree()
if run:
print(f"Run ID: {run.id}")
print(f"Run name: {run.name}")
print(f"Project: {run.session_name}")
return "result"@traceable
def adaptive_function(input_data):
run = get_current_run_tree()
if run:
# Add tags based on runtime conditions
if is_important(input_data):
run.add_tags("important", "priority")
# Add metadata
run.add_metadata({
"data_size": len(input_data),
"processing_mode": "fast"
})
return process(input_data)@traceable
def parent_function():
return child_function()
@traceable
def child_function():
run = get_current_run_tree()
if run and run.parent_run_id:
print(f"This is a child of {run.parent_run_id}")
return "child result"Update metadata on the current run tree.
def set_run_metadata(**metadata: Any) -> None:
"""
Update metadata on the current run tree.
Convenience function for adding metadata to the current run without
manually getting the run tree.
Parameters:
- **metadata: Key-value pairs to add to the current run's metadata
"""from langsmith import traceable, set_run_metadata
@traceable
def process_request(user_id: str, request_type: str):
# Add metadata early
set_run_metadata(
user_id=user_id,
request_type=request_type
)
# More metadata as we learn more
if is_premium_user(user_id):
set_run_metadata(tier="premium")
result = process()
# Add result metadata
set_run_metadata(
result_size=len(result),
cache_hit=was_cached
)
return result@traceable
def track_model_usage(prompt: str):
# Track model information
set_run_metadata(
model="gpt-4",
prompt_tokens=count_tokens(prompt)
)
response = call_model(prompt)
# Add completion metadata
set_run_metadata(
completion_tokens=count_tokens(response),
total_cost=calculate_cost(prompt, response)
)
return response@traceable
def conditional_metadata(debug: bool = False):
if debug:
set_run_metadata(
debug_mode=True,
verbose_logging=True
)
return process()Automatically creates parent-child relationships:
from langsmith import traceable
@traceable(run_type="chain")
def orchestrator(inputs: dict):
# This will be the parent run
result1 = step1(inputs)
result2 = step2(result1)
result3 = step3(result2)
return result3
@traceable(run_type="tool")
def step1(data: dict):
# Automatically becomes a child of orchestrator
return process_step1(data)
@traceable(run_type="tool")
def step2(data: dict):
return process_step2(data)
@traceable(run_type="tool")
def step3(data: dict):
return process_step3(data)Pass trace context between services:
from langsmith import traceable, get_current_run_tree, tracing_context
import requests
# Service A
@traceable
def service_a_handler(request):
run = get_current_run_tree()
# Pass trace context to Service B
response = requests.post(
"http://service-b/process",
json={"data": request},
headers={
"X-Trace-Id": str(run.trace_id),
"X-Parent-Id": str(run.id)
}
)
return response.json()
# Service B
@traceable
def service_b_handler(request, headers):
# Extract trace context
parent_id = headers.get("X-Parent-Id")
with tracing_context(distributed_parent_id=parent_id):
result = process(request)
return resultfrom langsmith import traceable
@traceable(
reduce_fn=lambda chunks: {"full_response": "".join(chunks)}
)
def stream_llm_response(prompt: str):
"""Stream LLM response and log full output."""
for chunk in llm.stream(prompt):
yield chunk
# Use it
for chunk in stream_llm_response("Tell me a story"):
print(chunk, end="", flush=True)
# Full response is logged to LangSmith
# Async streaming
@traceable(
reduce_fn=lambda chunks: {"full_response": "".join(chunks)}
)
async def async_stream_response(prompt: str):
"""Stream async LLM response."""
async for chunk in llm.astream(prompt):
yield chunkErrors are automatically logged:
from langsmith import traceable
@traceable
def function_with_error():
try:
result = risky_operation()
return result
except ValueError as e:
# Error is automatically logged to the run
raiseAccess and manipulate the current run within a traced function:
from langsmith import traceable, get_current_run_tree
@traceable
def my_function():
# Get the current RunTree
run = get_current_run_tree()
if run:
# Access run properties
print(f"Run ID: {run.id}")
print(f"Run name: {run.name}")
print(f"Trace ID: {run.trace_id}")
print(f"Project: {run.session_name}")
# Modify the run
run.add_tags("dynamic-tag")
run.add_metadata({"computed_value": 42})
# Create a manual child
child = run.create_child(
name="Manual Child",
run_type="tool",
inputs={"data": "test"}
)
child_result = do_child_work()
child.end(outputs={"result": child_result})
child.post()
return "result"trace context manager and RunTreeconfigure()