Datadog APM client library providing distributed tracing, continuous profiling, error tracking, test optimization, deployment tracking, code hotspots analysis, and dynamic instrumentation for Python applications.
—
Comprehensive configuration system for service identification, sampling, trace filtering, integration settings, and environment-specific customization through environment variables and programmatic APIs. This enables fine-tuned control over tracing behavior, performance optimization, and operational requirements.
The main configuration object provides centralized control over ddtrace behavior and settings.
class Config:
# Service identification
service: str # Service name
env: str # Environment (prod, staging, dev)
version: str # Application version
tags: Dict[str, str] # Global tags applied to all spans
# Tracing behavior
trace_enabled: bool # Enable/disable tracing
analytics_enabled: bool # Enable trace analytics
priority_sampling: bool # Use priority sampling
# Performance settings
trace_sample_rate: float # Global sampling rate (0.0-1.0)
trace_rate_limit: int # Traces per second rate limit
# Backend configuration
agent_hostname: str # Datadog Agent hostname
agent_port: int # Datadog Agent port
agent_url: str # Full Agent URL
# Integration settings
service_mapping: Dict[str, str] # Map integration names to service names
# Advanced options
report_hostname: bool # Include hostname in traces
health_metrics_enabled: bool # Enable health metrics
runtime_metrics_enabled: bool # Enable runtime metrics
config: Config # Global configuration objectUsage examples:
from ddtrace import config
# Basic service configuration
config.service = "my-python-app"
config.env = "production"
config.version = "2.1.0"
# Global tags applied to all spans
config.tags = {
"team": "backend",
"region": "us-east-1",
"datacenter": "aws-east"
}
# Sampling configuration
config.trace_sample_rate = 0.1 # Sample 10% of traces
config.priority_sampling = True # Use intelligent sampling
# Agent configuration
config.agent_hostname = "datadog-agent.internal"
config.agent_port = 8126
# Service name mapping for integrations
config.service_mapping = {
"psycopg": "postgres-primary",
"redis": "redis-cache",
"requests": "external-apis"
}Configure ddtrace through environment variables for deployment flexibility and containerized environments.
import os
# Service identification
os.environ["DD_SERVICE"] = "my-python-app"
os.environ["DD_ENV"] = "production"
os.environ["DD_VERSION"] = "2.1.0"
# Global tags (comma-separated key:value pairs)
os.environ["DD_TAGS"] = "team:backend,region:us-east-1,cost-center:engineering"
# Tracing configuration
os.environ["DD_TRACE_ENABLED"] = "true"
os.environ["DD_TRACE_SAMPLE_RATE"] = "0.1" # 10% sampling
os.environ["DD_TRACE_RATE_LIMIT"] = "100" # Max 100 traces/second
# Agent configuration
os.environ["DD_AGENT_HOST"] = "datadog-agent"
os.environ["DD_TRACE_AGENT_PORT"] = "8126"
os.environ["DD_TRACE_AGENT_URL"] = "http://datadog-agent:8126"
# Integration configuration
os.environ["DD_SERVICE_MAPPING"] = "psycopg:postgres,redis:cache"
# Feature flags
os.environ["DD_TRACE_ANALYTICS_ENABLED"] = "true"
os.environ["DD_RUNTIME_METRICS_ENABLED"] = "true"
os.environ["DD_PROFILING_ENABLED"] = "true"
# Import ddtrace to apply environment configuration
import ddtraceFine-tune settings for specific integrations and libraries.
from ddtrace import config
# Django configuration
config.django.service_name = "web-frontend"
config.django.cache_service_name = "django-cache"
config.django.database_service_name = "django-db"
config.django.distributed_tracing_enabled = True
config.django.analytics_enabled = True
config.django.analytics_sample_rate = 1.0
# Flask configuration
config.flask.service_name = "api-backend"
config.flask.analytics_enabled = True
config.flask.template_name_as_resource = True
config.flask.trace_query_string = True
# Database configurations
config.psycopg.service_name = "postgres-primary"
config.psycopg.analytics_enabled = True
config.redis.service_name = "redis-cache"
config.redis.analytics_enabled = False # High volume, disable analytics
config.requests.service_name = "external-apis"
config.requests.analytics_enabled = True
config.requests.analytics_sample_rate = 0.5 # Sample 50% of HTTP requests
# AI/ML integrations
config.openai.service_name = "openai-integration"
config.openai.analytics_enabled = True
config.openai.log_prompt_completion_sample_rate = 0.1 # Log 10% of completions
config.langchain.service_name = "langchain-workflows"
config.langchain.analytics_enabled = TrueControl which traces are collected and submitted to optimize performance and costs.
from ddtrace import config
# Global sampling rate (applies to all traces)
config.trace_sample_rate = 0.1 # Sample 10% of traces
# Priority sampling (intelligent sampling based on trace characteristics)
config.priority_sampling = True
# Rate limiting (maximum traces per second)
config.trace_rate_limit = 100
# Analytics sampling (for APM analytics features)
config.analytics_enabled = True
# Per-integration analytics sampling
config.django.analytics_sample_rate = 1.0 # 100% for web requests
config.psycopg.analytics_sample_rate = 0.1 # 10% for database queries
config.requests.analytics_sample_rate = 0.5 # 50% for external HTTP calls
# Environment-based sampling
if config.env == "production":
config.trace_sample_rate = 0.05 # Lower sampling in production
elif config.env == "staging":
config.trace_sample_rate = 0.25 # Medium sampling in staging
else:
config.trace_sample_rate = 1.0 # Full sampling in developmentfrom ddtrace import config
# Agent writer configuration
config._trace_writer_buffer_size = 1000 # Buffer size for trace batching
config._trace_writer_interval_seconds = 1 # Flush interval
config._trace_writer_connection_timeout = 2.0 # Connection timeout
config._trace_writer_max_payload_size = 8 << 20 # 8MB max payload
# Alternative: API key-based submission (bypassing agent)
config._dd_api_key = "your-datadog-api-key"
config._trace_writer_hostname = "intake.datadoghq.com"
config._trace_writer_port = 443from ddtrace import config
# Debug mode for development
config._debug_mode = True # Enable debug logging
config._startup_logs_enabled = True # Log startup information
# Health check endpoint
config._health_metrics_enabled = True
# Detailed error reporting
config._raise_on_error = True # Raise exceptions instead of logging
# Trace filtering for debugging
config._trace_remove_integration_service_names_enabled = Falsefrom ddtrace import config
# Obfuscate sensitive data
config._obfuscation_query_string_pattern = r"password|token|secret"
config._trace_query_string_obfuscation_disabled = False
# HTTP header filtering
config._trace_header_tags = {
"user-agent": "http.user_agent",
"content-type": "http.content_type"
}
# Remove PII from traces
config._trace_remove_integration_service_names_enabled = TrueModify configuration during application runtime for dynamic behavior adjustment.
from ddtrace import config, tracer
def configure_for_high_traffic():
"""Reduce sampling during high traffic periods"""
config.trace_sample_rate = 0.01 # 1% sampling
config.trace_rate_limit = 50 # Lower rate limit
config.analytics_enabled = False # Disable analytics
def configure_for_debugging():
"""Increase observability for debugging"""
config.trace_sample_rate = 1.0 # 100% sampling
config.analytics_enabled = True # Enable analytics
config._debug_mode = True # Enable debug logging
# Dynamic configuration based on conditions
if is_high_traffic_period():
configure_for_high_traffic()
elif is_debugging_session():
configure_for_debugging()
# Runtime service mapping updates
config.service_mapping["new-integration"] = "new-service-name"from ddtrace import config
from ddtrace.internal.diagnostics import health
# Validate configuration
def validate_config():
"""Validate current ddtrace configuration"""
issues = []
if not config.service:
issues.append("Service name not set")
if not config.env:
issues.append("Environment not set")
if config.trace_sample_rate < 0 or config.trace_sample_rate > 1:
issues.append("Invalid sample rate")
if config.trace_rate_limit <= 0:
issues.append("Invalid rate limit")
return issues
# Health check
def check_agent_connectivity():
"""Check if Datadog Agent is reachable"""
return health.agent_health_check()
# Diagnostic information
def get_diagnostic_info():
"""Get current configuration and health status"""
return {
"service": config.service,
"env": config.env,
"version": config.version,
"sampling_rate": config.trace_sample_rate,
"agent_reachable": check_agent_connectivity(),
"config_issues": validate_config()
}
# Usage
diagnostics = get_diagnostic_info()
print(f"Service: {diagnostics['service']}")
print(f"Environment: {diagnostics['env']}")
print(f"Agent reachable: {diagnostics['agent_reachable']}")Optimize ddtrace configuration for containerized environments.
import os
# Kubernetes service discovery
os.environ["DD_AGENT_HOST"] = os.environ.get("DD_AGENT_SERVICE_HOST", "datadog-agent")
os.environ["DD_TRACE_AGENT_PORT"] = os.environ.get("DD_AGENT_SERVICE_PORT", "8126")
# Container-specific tags
os.environ["DD_TAGS"] = f"pod_name:{os.environ.get('HOSTNAME', 'unknown')}"
# Resource-aware configuration
cpu_limit = float(os.environ.get("CPU_LIMIT", "1.0"))
memory_limit = int(os.environ.get("MEMORY_LIMIT", "512")) * 1024 * 1024 # Convert MB to bytes
# Adjust sampling based on resources
if cpu_limit < 0.5:
os.environ["DD_TRACE_SAMPLE_RATE"] = "0.05" # Low sampling for resource-constrained containers
elif cpu_limit >= 2.0:
os.environ["DD_TRACE_SAMPLE_RATE"] = "0.2" # Higher sampling for powerful containersfrom ddtrace import config
# Production-optimized settings
config.service = "production-api"
config.env = "production"
config.version = get_application_version()
# Performance optimization
config.trace_sample_rate = 0.05 # 5% sampling for high-volume production
config.trace_rate_limit = 200 # Reasonable rate limit
config.priority_sampling = True # Use intelligent sampling
config.analytics_enabled = False # Disable if not needed for performance
# Reliability settings
config._trace_writer_buffer_size = 1000
config._trace_writer_interval_seconds = 2
config.health_metrics_enabled = True
# Security
config._obfuscation_query_string_pattern = r"password|token|secret|key"from ddtrace import config
# Development-friendly settings
config.service = "dev-api"
config.env = "development"
config.version = "dev"
# Full observability
config.trace_sample_rate = 1.0 # 100% sampling for debugging
config.analytics_enabled = True # Enable analytics
config._debug_mode = True # Enable debug logging
config._startup_logs_enabled = True
# Fast feedback
config._trace_writer_interval_seconds = 0.5 # Faster flush for immediate feedbackfrom ddtrace import config
# Staging environment (production-like with more observability)
config.service = "staging-api"
config.env = "staging"
config.version = get_staging_version()
# Balanced sampling
config.trace_sample_rate = 0.25 # 25% sampling
config.analytics_enabled = True # Enable analytics for testing
config.priority_sampling = True # Test priority sampling behavior
# Monitoring
config.health_metrics_enabled = True
config.runtime_metrics_enabled = TrueComplete list of key ddtrace environment variables:
DD_SERVICE - Service nameDD_ENV - Environment nameDD_VERSION - Application versionDD_TAGS - Global tags (comma-separated key:value pairs)DD_TRACE_ENABLED - Enable/disable tracingDD_TRACE_SAMPLE_RATE - Global sampling rate (0.0-1.0)DD_TRACE_RATE_LIMIT - Traces per second limitDD_TRACE_ANALYTICS_ENABLED - Enable trace analyticsDD_AGENT_HOST - Agent hostnameDD_TRACE_AGENT_PORT - Agent portDD_TRACE_AGENT_URL - Full agent URLDD_PROFILING_ENABLED - Enable continuous profilingDD_APPSEC_ENABLED - Enable application securityDD_RUNTIME_METRICS_ENABLED - Enable runtime metricsDD_SERVICE_MAPPING - Service name mappingDD_PATCH_MODULES - Module patching configurationInstall with Tessl CLI
npx tessl i tessl/pypi-ddtrace