Datadog APM client library providing distributed tracing, continuous profiling, error tracking, test optimization, deployment tracking, code hotspots analysis, and dynamic instrumentation for Python applications.
—
Comprehensive automatic instrumentation for web frameworks, databases, HTTP clients, message queues, AI/ML libraries, and 80+ other popular Python packages through sophisticated monkey-patching. This enables zero-code-change observability for most Python applications.
Selective instrumentation of specific libraries and frameworks for precise control over which components are monitored.
def patch(
# Web Frameworks
django: bool = None,
flask: bool = None,
fastapi: bool = None,
sanic: bool = None,
bottle: bool = None,
molten: bool = None,
pyramid: bool = None,
tornado: bool = None,
aiohttp: bool = None,
# Databases
psycopg: bool = None,
mysql: bool = None,
mysqldb: bool = None,
pymysql: bool = None,
sqlite3: bool = None,
pymongo: bool = None,
mongoengine: bool = None,
cassandra: bool = None,
# Caching
redis: bool = None,
rediscluster: bool = None,
aioredis: bool = None,
aredis: bool = None,
pymemcache: bool = None,
# HTTP Clients
requests: bool = None,
httpx: bool = None,
httplib: bool = None,
urllib3: bool = None,
# AWS Services
boto: bool = None,
botocore: bool = None,
aiobotocore: bool = None,
# Message Queues
celery: bool = None,
kombu: bool = None,
dramatiq: bool = None,
rq: bool = None,
# Search
elasticsearch: bool = None,
algoliasearch: bool = None,
# AI/ML Libraries
openai: bool = None,
anthropic: bool = None,
langchain: bool = None,
langgraph: bool = None,
google_genai: bool = None,
google_generativeai: bool = None,
litellm: bool = None,
# Testing
pytest: bool = None,
pytest_bdd: bool = None,
pytest_benchmark: bool = None,
# Async
asyncio: bool = None,
gevent: bool = None,
# Other
grpc: bool = None,
graphql: bool = None,
jinja2: bool = None,
mako: bool = None,
protobuf: bool = None,
avro: bool = None,
# Configuration
raise_errors: bool = True,
**kwargs
) -> None:
"""
Manually patch specific modules for automatic instrumentation.
Parameters:
- Library-specific boolean flags to enable/disable instrumentation
- raise_errors: Whether to raise exceptions if patching fails (default: True)
- **kwargs: Additional library flags not explicitly listed
Raises:
- ModuleNotFoundException: If a requested module doesn't have instrumentation
- ImportError: If a requested module isn't installed
"""Usage examples:
from ddtrace import patch
# Patch specific components for a web application
patch(
django=True, # Django web framework
psycopg=True, # PostgreSQL database
redis=True, # Redis caching
requests=True, # HTTP client
celery=True # Background tasks
)
# Patch AI/ML components for an AI application
patch(
fastapi=True, # API framework
openai=True, # OpenAI integration
langchain=True, # LangChain framework
pymongo=True # MongoDB database
)
# Minimal patching for microservice
patch(flask=True, requests=True)
# Error handling
try:
patch(nonexistent_library=True)
except ModuleNotFoundException as e:
print(f"Library not supported: {e}")
except ImportError as e:
print(f"Library not installed: {e}")def patch_all(**patch_modules: bool) -> None:
"""
Automatically patch all supported modules (deprecated).
Parameters:
- **patch_modules: Override flags for specific modules
Note: This function is deprecated in favor of patch() and DD_PATCH_MODULES
environment variable. It will be removed in a future version.
"""Configure automatic instrumentation through environment variables for deployment flexibility.
import os
# Enable instrumentation via environment variables
os.environ['DD_PATCH_MODULES'] = 'django:true,redis:true,psycopg:true'
# Or disable specific modules
os.environ['DD_PATCH_MODULES'] = 'django:true,redis:false,celery:true'
# Import ddtrace to apply environment-based patching
import ddtrace.auto # Automatically applies DD_PATCH_MODULES configurationComprehensive Django integration covering ORM, middleware, templates, and admin interface.
from ddtrace import patch
patch(django=True)
# Automatically instruments:
# - HTTP request/response handling
# - Database queries via Django ORM
# - Template rendering
# - Cache operations
# - Authentication and sessions
# - Admin interface interactions
# - Middleware processing
# - Static file serving
# Example Django view with automatic instrumentation
def user_profile(request, user_id):
# HTTP request is automatically traced
user = User.objects.get(id=user_id) # Database query traced
return render(request, 'profile.html', {'user': user}) # Template rendering tracedFlask application instrumentation including routes, templates, and extensions.
from ddtrace import patch
from flask import Flask
patch(flask=True)
app = Flask(__name__)
# Automatically instruments:
# - Route handling and request processing
# - Template rendering (Jinja2)
# - Error handling
# - Before/after request hooks
# - Blueprint operations
# - Session management
@app.route('/api/users/<int:user_id>')
def get_user(user_id):
# Route automatically traced with URL pattern
user_data = fetch_user_from_db(user_id) # If DB is patched, this is traced too
return jsonify(user_data)Modern async API framework instrumentation with automatic OpenAPI integration.
from ddtrace import patch
from fastapi import FastAPI
patch(fastapi=True)
app = FastAPI()
# Automatically instruments:
# - Async request handling
# - Path parameters and query parameters
# - Request/response serialization
# - Dependency injection
# - Background tasks
# - WebSocket connections
# - OpenAPI documentation generation
@app.post("/items/")
async def create_item(item: ItemModel):
# Async endpoint automatically traced
result = await save_item_to_db(item) # Async DB operation traced
return resultfrom ddtrace import patch
patch(psycopg=True)
# Automatically instruments:
# - SQL query execution
# - Connection pooling
# - Transaction management
# - Cursor operations
# - Prepared statements
import psycopg2
conn = psycopg2.connect("dbname=mydb user=myuser")
cursor = conn.cursor()
# This query is automatically traced with SQL statement and parameters
cursor.execute("SELECT * FROM users WHERE age > %s", (25,))
results = cursor.fetchall()from ddtrace import patch
patch(pymongo=True, mongoengine=True)
# PyMongo instrumentation
from pymongo import MongoClient
client = MongoClient('mongodb://localhost:27017/')
db = client.mydb
# Collection operations automatically traced
users = db.users.find({"age": {"$gt": 25}})
# MongoEngine instrumentation
from mongoengine import Document, StringField, IntField
class User(Document):
name = StringField(required=True)
age = IntField()
# ORM operations automatically traced
user = User.objects(age__gt=25).first()from ddtrace import patch
patch(redis=True, aioredis=True)
# Redis instrumentation
import redis
r = redis.Redis(host='localhost', port=6379, db=0)
# Cache operations automatically traced
r.set('user:123', 'john_doe')
username = r.get('user:123')
# Async Redis instrumentation
import aioredis
async def cache_operation():
redis_client = aioredis.from_url("redis://localhost")
await redis_client.set('async_key', 'value') # Traced
return await redis_client.get('async_key') # Tracedfrom ddtrace import patch
patch(requests=True)
import requests
# HTTP requests automatically traced with URL, method, status code
response = requests.get('https://api.example.com/users')
data = response.json()
# POST requests with request/response details
response = requests.post(
'https://api.example.com/users',
json={'name': 'John', 'email': 'john@example.com'}
)from ddtrace import patch
patch(httpx=True)
import httpx
# Sync client
with httpx.Client() as client:
response = client.get('https://api.example.com/data') # Traced
# Async client
async with httpx.AsyncClient() as client:
response = await client.get('https://api.example.com/data') # Tracedfrom ddtrace import patch
patch(openai=True)
from openai import OpenAI
client = OpenAI()
# Chat completions automatically traced with token usage, model info
response = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello, world!"}]
)
# Embeddings and other operations also traced
embeddings = client.embeddings.create(
model="text-embedding-ada-002",
input="Text to embed"
)from ddtrace import patch
patch(langchain=True)
# LangChain operations automatically traced
from langchain.llms import OpenAI
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
llm = OpenAI()
prompt = PromptTemplate(template="Tell me about {topic}")
chain = LLMChain(llm=llm, prompt=prompt)
# Chain execution automatically traced with input/output
result = chain.run(topic="machine learning")from ddtrace import patch
patch(celery=True)
from celery import Celery
app = Celery('myapp', broker='redis://localhost:6379')
# Task definitions automatically instrumented
@app.task
def process_data(data_id):
# Task execution automatically traced
data = fetch_data(data_id)
return process_and_save(data)
# Task calls traced as both producer and consumer
result = process_data.delay('123')from ddtrace import patch
patch(pytest=True)
# Test execution automatically traced
# - Test discovery and collection
# - Individual test execution time
# - Test outcomes (pass/fail/skip)
# - Test parameters and fixtures
# - Coverage information (if available)
def test_user_creation():
# Test execution automatically traced
user = create_user("test@example.com")
assert user.email == "test@example.com"
@pytest.mark.parametrize("email", ["test1@example.com", "test2@example.com"])
def test_email_validation(email):
# Parameterized tests traced individually
assert validate_email(email)from ddtrace import config, patch
# Configure before patching
config.django.service_name = "web-frontend"
config.redis.service_name = "cache-service"
config.psycopg.service_name = "database"
# Apply patches with custom configuration
patch(django=True, redis=True, psycopg=True)from ddtrace import Pin, patch
from ddtrace.contrib.redis import get_version
patch(redis=True)
import redis
# Customize Redis instrumentation
redis_client = redis.Redis()
pin = Pin.get_from(redis_client)
pin.clone(service="user-cache", tags={"cache.type": "user-data"}).onto(redis_client)
# Operations now use custom service name and tags
redis_client.set('user:123', 'data')from ddtrace import config, patch
# Global configuration
config.service = "my-application"
config.env = "production"
config.version = "2.1.0"
# Service name mapping for integrations
config.service_mapping = {
'psycopg': 'postgres-primary',
'redis': 'redis-cache',
'requests': 'external-apis'
}
# Global tags applied to all spans
config.tags = {
'team': 'backend',
'region': 'us-east-1'
}
patch(psycopg=True, redis=True, requests=True)from ddtrace import patch
from ddtrace.contrib.integration_registry import ModuleNotFoundException
try:
patch(
django=True,
nonexistent_lib=True, # This will fail
raise_errors=True
)
except ModuleNotFoundException as e:
print(f"Integration not available: {e}")
# Continue with available integrations
patch(django=True, raise_errors=False)
# Check which integrations are available
from ddtrace.contrib import trace_integrations
available_integrations = trace_integrations()
print(f"Available integrations: {list(available_integrations.keys())}")# Selective patching for performance-critical applications
patch(
# Essential components only
flask=True,
psycopg=True,
redis=True,
# Skip expensive instrumentations if not needed
jinja2=False,
graphql=False
)
# Configure sampling for high-throughput applications
from ddtrace import config
config.analytics_enabled = False # Disable analytics if not needed
config.priority_sampling = True # Use priority samplingddtrace provides automatic instrumentation for 80+ Python libraries organized by category:
Each integration captures relevant metadata, timing information, and error states specific to that library's operations.
Install with Tessl CLI
npx tessl i tessl/pypi-ddtrace