0
# Integrations and Handlers
1
2
Integration components for connecting Logfire with standard logging frameworks, structured logging libraries, and external systems. These handlers and processors enable seamless integration with existing logging infrastructure and third-party tools.
3
4
## Capabilities
5
6
### Standard Library Logging Integration
7
8
Integration with Python's built-in logging module to capture existing log records and forward them to Logfire with proper formatting and context.
9
10
```python { .api }
11
class LogfireLoggingHandler:
12
"""
13
Handler for integrating Python standard library logging with Logfire.
14
Captures log records and forwards them to Logfire with appropriate formatting.
15
"""
16
17
def __init__(self, logfire: Logfire | None = None) -> None:
18
"""
19
Initialize the logging handler.
20
21
Parameters:
22
- logfire: Specific Logfire instance to use (None for default instance)
23
"""
24
25
def emit(self, record: logging.LogRecord) -> None:
26
"""
27
Process a log record and send it to Logfire.
28
29
Parameters:
30
- record: Standard library LogRecord to process
31
"""
32
```
33
34
**Usage Examples:**
35
36
```python
37
import logging
38
import logfire
39
40
# Configure Logfire
41
logfire.configure()
42
43
# Set up standard library logging with Logfire handler
44
logging.basicConfig(
45
level=logging.INFO,
46
handlers=[
47
logfire.LogfireLoggingHandler(),
48
logging.StreamHandler() # Also log to console
49
]
50
)
51
52
# Use standard logging - automatically captured by Logfire
53
logger = logging.getLogger(__name__)
54
logger.info('Application started')
55
logger.error('Database connection failed', extra={'db_host': 'localhost'})
56
57
# Existing logging code works unchanged
58
def process_user_data(user_id):
59
logger.debug('Processing user data', extra={'user_id': user_id})
60
try:
61
# Processing logic
62
result = perform_processing()
63
logger.info('User data processed successfully', extra={'user_id': user_id})
64
return result
65
except Exception as e:
66
logger.exception('Failed to process user data', extra={'user_id': user_id})
67
raise
68
```
69
70
### Structlog Integration
71
72
Integration with structlog for structured logging with consistent key-value formatting and rich context preservation.
73
74
```python { .api }
75
class StructlogProcessor:
76
"""
77
Processor for integrating structlog with Logfire.
78
Processes structlog events and forwards them to Logfire with structured data.
79
80
Alias: LogfireProcessor (same class, different name for backwards compatibility)
81
"""
82
83
def __init__(self, logfire: Logfire | None = None) -> None:
84
"""
85
Initialize the structlog processor.
86
87
Parameters:
88
- logfire: Specific Logfire instance to use (None for default instance)
89
"""
90
91
def __call__(self, logger, method_name: str, event_dict: dict) -> dict:
92
"""
93
Process a structlog event dictionary.
94
95
Parameters:
96
- logger: Structlog logger instance
97
- method_name: Log level method name
98
- event_dict: Event dictionary with structured data
99
100
Returns: Processed event dictionary
101
"""
102
103
# Alias for backwards compatibility
104
LogfireProcessor = StructlogProcessor
105
```
106
107
**Usage Examples:**
108
109
```python
110
import structlog
111
import logfire
112
113
# Configure Logfire
114
logfire.configure()
115
116
# Configure structlog with Logfire processor
117
structlog.configure(
118
processors=[
119
structlog.stdlib.filter_by_level,
120
structlog.stdlib.add_logger_name,
121
structlog.stdlib.add_log_level,
122
structlog.stdlib.PositionalArgumentsFormatter(),
123
structlog.processors.TimeStamper(fmt="iso"),
124
structlog.processors.StackInfoRenderer(),
125
structlog.processors.format_exc_info,
126
logfire.StructlogProcessor(), # Send to Logfire
127
structlog.processors.JSONRenderer()
128
],
129
context_class=dict,
130
logger_factory=structlog.stdlib.LoggerFactory(),
131
wrapper_class=structlog.stdlib.BoundLogger,
132
cache_logger_on_first_use=True,
133
)
134
135
# Use structlog - automatically captured by Logfire
136
logger = structlog.get_logger()
137
138
logger.info("User login", user_id=123, ip_address="192.168.1.1")
139
logger.error("Payment failed", user_id=123, amount=99.99, error_code="CARD_DECLINED")
140
141
# Structured logging with context
142
logger = logger.bind(request_id="req-456", user_id=123)
143
logger.info("Starting request processing")
144
logger.info("Database query completed", query_time_ms=45)
145
logger.info("Request processing completed", total_time_ms=150)
146
```
147
148
### Loguru Integration
149
150
Integration with Loguru for enhanced logging capabilities with automatic formatting and context preservation.
151
152
```python { .api }
153
def loguru_handler() -> dict[str, Any]:
154
"""
155
Create a Logfire handler configuration for Loguru integration.
156
157
Returns: Dictionary containing handler configuration for Loguru.add()
158
"""
159
```
160
161
**Usage Examples:**
162
163
```python
164
import loguru
165
import logfire
166
167
# Configure Logfire
168
logfire.configure()
169
170
# Configure Loguru with Logfire integration
171
from loguru import logger
172
173
# Add Logfire as a handler
174
handler_config = logfire.loguru_handler()
175
logger.add(**handler_config)
176
177
# Use Loguru - automatically captured by Logfire
178
logger.info("Application started")
179
logger.info("Processing user {user_id}", user_id=123)
180
logger.error("Failed to connect to database: {error}", error="connection timeout")
181
182
# Loguru's rich formatting works with Logfire
183
logger.info("User data: {data}", data={"name": "John", "age": 30})
184
185
# Exception logging with tracebacks
186
try:
187
result = 1 / 0
188
except Exception as e:
189
logger.exception("Division error occurred")
190
```
191
192
### Context and Baggage Management
193
194
Utilities for managing OpenTelemetry context and baggage for cross-service correlation and metadata propagation.
195
196
```python { .api }
197
def get_baggage() -> dict[str, str]:
198
"""
199
Get current OpenTelemetry baggage values.
200
201
Returns: Dictionary of baggage key-value pairs currently in context
202
"""
203
204
def set_baggage(baggage: dict[str, str]) -> Token:
205
"""
206
Set OpenTelemetry baggage for context propagation across service boundaries.
207
208
Parameters:
209
- baggage: Dictionary of key-value pairs to add to baggage
210
211
Returns: Context token for restoration if needed
212
"""
213
214
def add_non_user_code_prefix(prefix: str) -> None:
215
"""
216
Add a prefix to identify non-user code in stack traces.
217
Helps filter out framework/library code from error reporting.
218
219
Parameters:
220
- prefix: Path prefix to mark as non-user code (e.g., '/usr/local/lib/python3.9/site-packages/')
221
"""
222
```
223
224
**Usage Examples:**
225
226
```python
227
import logfire
228
229
# Set baggage for request correlation
230
request_baggage = {
231
'user_id': '12345',
232
'session_id': 'sess_abcd1234',
233
'request_id': 'req_xyz789',
234
'feature_flags': 'new_ui_enabled,beta_features'
235
)
236
237
# Set baggage - automatically propagated in HTTP headers
238
token = logfire.set_baggage(request_baggage)
239
240
# Make service calls - baggage is automatically included
241
with logfire.span('External service call'):
242
response = requests.post('https://service-b.example.com/process')
243
244
# In the receiving service, get baggage
245
current_baggage = logfire.get_baggage()
246
user_id = current_baggage.get('user_id')
247
session_id = current_baggage.get('session_id')
248
249
# Use baggage for correlation
250
with logfire.span('Process user request', user_id=user_id, session_id=session_id):
251
# Processing logic
252
pass
253
254
# Configure non-user code filtering
255
logfire.add_non_user_code_prefix('/usr/local/lib/python3.9/site-packages/')
256
logfire.add_non_user_code_prefix('/opt/conda/lib/python3.9/site-packages/')
257
```
258
259
### Instrumentation Control
260
261
Utilities for controlling when and how instrumentation operates, allowing fine-grained control over observability data collection.
262
263
```python { .api }
264
def suppress_instrumentation() -> AbstractContextManager[None]:
265
"""
266
Context manager to temporarily suppress all automatic instrumentation.
267
Useful for avoiding recursive instrumentation or excluding specific operations.
268
269
Returns: Context manager that disables instrumentation within its scope
270
"""
271
```
272
273
**Usage Examples:**
274
275
```python
276
import logfire
277
import requests
278
279
# Configure instrumentation
280
logfire.configure()
281
logfire.instrument_requests()
282
283
# Normal requests are instrumented
284
response = requests.get('https://api.example.com/data') # Creates span
285
286
# Suppress instrumentation for specific operations
287
with logfire.suppress_instrumentation():
288
# This request won't create spans or metrics
289
health_check = requests.get('https://api.example.com/health')
290
291
# Internal operations you don't want to trace
292
internal_response = requests.post('https://internal-metrics.company.com/report')
293
294
# Instrumentation resumes after context
295
response2 = requests.get('https://api.example.com/users') # Creates span
296
297
# Useful for avoiding noise in monitoring systems
298
def send_telemetry_data():
299
with logfire.suppress_instrumentation():
300
# Don't trace the telemetry sending itself
301
requests.post('https://telemetry-collector.example.com/', json=metrics_data)
302
```
303
304
### CLI and Development Tools
305
306
Command-line utilities and development helpers for managing Logfire configuration and debugging observability setup.
307
308
```python { .api }
309
def logfire_info() -> None:
310
"""
311
Display current Logfire configuration information.
312
Useful for debugging and verifying setup in development.
313
"""
314
```
315
316
**Usage Examples:**
317
318
```python
319
import logfire
320
321
# Configure Logfire
322
logfire.configure(
323
service_name='my-web-app',
324
environment='development',
325
send_to_logfire=True
326
)
327
328
# Display current configuration (useful for debugging)
329
logfire.logfire_info()
330
# Output shows:
331
# - Service name and version
332
# - Environment settings
333
# - Export destinations
334
# - Console configuration
335
# - Instrumentation status
336
```
337
338
### Custom Processors and Exporters
339
340
Support for extending Logfire with custom processing and export logic through OpenTelemetry processors.
341
342
```python { .api }
343
# Configuration support for custom processors
344
class AdvancedOptions:
345
"""Advanced configuration options for custom integrations."""
346
347
additional_span_processors: Sequence[SpanProcessor] = ()
348
"""Additional OpenTelemetry span processors for custom processing."""
349
350
log_record_processors: Sequence[LogRecordProcessor] = ()
351
"""Additional OpenTelemetry log record processors."""
352
```
353
354
**Usage Examples:**
355
356
```python
357
import logfire
358
from opentelemetry.sdk.trace.export import BatchSpanProcessor
359
from opentelemetry.exporter.jaeger.thrift import JaegerExporter
360
361
# Custom span processor for additional export destinations
362
jaeger_exporter = JaegerExporter(
363
agent_host_name="jaeger-collector.example.com",
364
agent_port=6831,
365
)
366
jaeger_processor = BatchSpanProcessor(jaeger_exporter)
367
368
# Configure Logfire with custom processors
369
logfire.configure(
370
service_name='my-service',
371
advanced=logfire.AdvancedOptions(
372
additional_span_processors=[jaeger_processor]
373
)
374
)
375
376
# Now spans are sent to both Logfire and Jaeger
377
with logfire.span('Custom processing'):
378
# This span goes to both destinations
379
logfire.info('Processing data')
380
```
381
382
### Third-Party Integrations
383
384
Helper functions and utilities for integrating with external monitoring and alerting systems.
385
386
**Usage Examples:**
387
388
```python
389
import logfire
390
391
# Integration with external monitoring systems
392
def setup_monitoring_integrations():
393
# Configure Logfire
394
logfire.configure(
395
service_name='production-api',
396
environment='production'
397
)
398
399
# Set up correlation with external systems
400
logfire.set_baggage({
401
'deployment_id': 'deploy-abc123',
402
'datacenter': 'us-west-2',
403
'cluster': 'production-cluster-1'
404
})
405
406
# Use tags for filtering and alerting
407
production_logger = logfire.with_tags('production', 'api', 'critical')
408
409
return production_logger
410
411
# Use throughout application
412
logger = setup_monitoring_integrations()
413
414
def handle_critical_operation():
415
with logger.span('Critical operation', operation_type='payment_processing') as span:
416
try:
417
# Critical business logic
418
result = process_payment()
419
span.set_attribute('success', True)
420
return result
421
except Exception as e:
422
# This will be tagged for alerting
423
span.record_exception(e)
424
logger.error('Critical operation failed',
425
operation_type='payment_processing',
426
error_type=type(e).__name__)
427
raise
428
```
429
430
### Type Definitions
431
432
```python { .api }
433
# Standard library imports for integrations
434
import logging
435
from contextvars import Token
436
from typing import AbstractContextManager, Any, Callable, Sequence
437
438
# OpenTelemetry processor types
439
from opentelemetry.sdk.trace import SpanProcessor
440
from opentelemetry.sdk._logs import LogRecordProcessor
441
442
# Logfire types
443
from logfire import Logfire
444
```
445
446
### Best Practices for Integrations
447
448
**Logging Integration:**
449
- Use consistent log levels across different logging systems
450
- Preserve structured data when converting between formats
451
- Maintain correlation IDs and request context
452
453
**Performance Considerations:**
454
- Use async handlers for high-volume logging
455
- Configure appropriate batching and export intervals
456
- Monitor handler performance to avoid logging bottlenecks
457
458
**Security:**
459
- Be careful with baggage content (avoid sensitive data)
460
- Use scrubbing options to redact sensitive information
461
- Consider the security implications of cross-service context propagation
462
463
**Example Production Setup:**
464
465
```python
466
import logging
467
import structlog
468
import logfire
469
470
def setup_production_logging():
471
# Configure Logfire
472
logfire.configure(
473
service_name='production-api',
474
environment='production',
475
send_to_logfire=True,
476
console=False, # Disable console in production
477
scrubbing=logfire.ScrubbingOptions(
478
extra_patterns=[r'password', r'api_key', r'token']
479
)
480
)
481
482
# Configure standard library logging
483
logging.basicConfig(
484
level=logging.INFO,
485
handlers=[logfire.LogfireLoggingHandler()],
486
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
487
)
488
489
# Configure structlog
490
structlog.configure(
491
processors=[
492
structlog.stdlib.filter_by_level,
493
structlog.stdlib.add_logger_name,
494
structlog.stdlib.add_log_level,
495
structlog.processors.TimeStamper(fmt="iso"),
496
logfire.StructlogProcessor(),
497
structlog.processors.JSONRenderer()
498
],
499
logger_factory=structlog.stdlib.LoggerFactory(),
500
wrapper_class=structlog.stdlib.BoundLogger,
501
cache_logger_on_first_use=True,
502
)
503
504
# Set up correlation context
505
logfire.set_baggage({
506
'service': 'production-api',
507
'version': '1.2.3',
508
'environment': 'production'
509
})
510
511
return logfire.with_tags('production', 'api')
512
513
# Use in application
514
production_logger = setup_production_logging()
515
```