0
# Python Logging Integration
1
2
Full logging.Handler compatibility with structured formatting, custom formatters, and seamless integration with existing Python logging workflows. This enables Fluentd integration without changing existing logging code.
3
4
## Capabilities
5
6
### FluentHandler Class
7
8
Python logging.Handler subclass that sends log records to Fluentd with structured formatting and full integration with the standard logging module.
9
10
```python { .api }
11
class FluentHandler(logging.Handler):
12
def __init__(
13
self,
14
tag: str,
15
host: str = "localhost",
16
port: int = 24224,
17
timeout: float = 3.0,
18
verbose: bool = False,
19
buffer_overflow_handler = None,
20
msgpack_kwargs = None,
21
nanosecond_precision: bool = False,
22
**kwargs
23
):
24
"""
25
Initialize FluentHandler.
26
27
Parameters:
28
- tag (str): Tag for log events
29
- host (str): Fluentd host
30
- port (int): Fluentd port
31
- timeout (float): Connection timeout
32
- verbose (bool): Verbose mode
33
- buffer_overflow_handler (callable): Buffer overflow handler
34
- msgpack_kwargs (dict): msgpack options
35
- nanosecond_precision (bool): Use nanosecond timestamps
36
- **kwargs: Additional FluentSender options
37
"""
38
39
def emit(self, record) -> bool:
40
"""
41
Emit a log record to Fluentd.
42
43
Parameters:
44
- record (LogRecord): Python logging.LogRecord instance
45
46
Returns:
47
bool: Success status from underlying sender
48
"""
49
50
def close(self) -> None:
51
"""Close handler and underlying sender."""
52
53
def getSenderClass(self):
54
"""
55
Get the sender class to use.
56
57
Returns:
58
class: FluentSender class
59
"""
60
61
def getSenderInstance(
62
self,
63
tag: str,
64
host: str,
65
port: int,
66
timeout: float,
67
verbose: bool,
68
buffer_overflow_handler,
69
msgpack_kwargs,
70
nanosecond_precision: bool,
71
**kwargs
72
):
73
"""
74
Create sender instance.
75
76
Returns:
77
FluentSender: Configured sender instance
78
"""
79
80
@property
81
def sender(self):
82
"""
83
Get sender instance (lazy initialization).
84
85
Returns:
86
FluentSender: The underlying sender instance
87
"""
88
89
def __enter__(self):
90
"""Enter context manager."""
91
92
def __exit__(self, exc_type, exc_val, exc_tb):
93
"""Exit context manager, closes handler."""
94
```
95
96
### FluentRecordFormatter Class
97
98
Structured formatter for converting Python log records into Fluentd-compatible structured data with flexible formatting options.
99
100
```python { .api }
101
class FluentRecordFormatter(logging.Formatter):
102
def __init__(
103
self,
104
fmt = None,
105
datefmt = None,
106
style: str = "%",
107
fill_missing_fmt_key: bool = False,
108
format_json: bool = True,
109
exclude_attrs = None
110
):
111
"""
112
Initialize FluentRecordFormatter.
113
114
Parameters:
115
- fmt (dict/callable): Format specification dict or callable
116
- datefmt (str): Date format string
117
- style (str): Format style ('%', '{', or '$')
118
- fill_missing_fmt_key (bool): Fill missing format keys with None
119
- format_json (bool): Parse message as JSON if possible
120
- exclude_attrs (iterable): Attributes to exclude from record
121
"""
122
123
def format(self, record) -> dict:
124
"""
125
Format log record as structured data.
126
127
Parameters:
128
- record (LogRecord): Python logging.LogRecord
129
130
Returns:
131
dict: Structured log data for Fluentd
132
"""
133
134
def usesTime(self) -> bool:
135
"""
136
Check if formatter uses time.
137
138
Returns:
139
bool: True if formatter uses asctime
140
"""
141
```
142
143
## Usage Examples
144
145
### Basic Logging Handler
146
147
```python
148
import logging
149
from fluent import handler
150
151
# Configure logging with FluentHandler
152
logging.basicConfig(level=logging.INFO)
153
logger = logging.getLogger('myapp')
154
155
# Add Fluent handler
156
fluent_handler = handler.FluentHandler('app.logs')
157
logger.addHandler(fluent_handler)
158
159
# Use standard logging - automatically sent to Fluentd
160
logger.info('Application started')
161
logger.warning('High memory usage detected')
162
logger.error('Database connection failed')
163
164
# Cleanup
165
fluent_handler.close()
166
```
167
168
### Structured Logging with Custom Formatter
169
170
```python
171
import logging
172
from fluent import handler
173
174
# Custom format for structured logs
175
custom_format = {
176
'host': '%(hostname)s',
177
'where': '%(module)s.%(funcName)s',
178
'level': '%(levelname)s',
179
'stack_trace': '%(exc_text)s'
180
}
181
182
# Setup logger with custom formatter
183
logger = logging.getLogger('structured_app')
184
logger.setLevel(logging.DEBUG)
185
186
fluent_handler = handler.FluentHandler('app.structured', host='logs.company.com')
187
formatter = handler.FluentRecordFormatter(custom_format)
188
fluent_handler.setFormatter(formatter)
189
logger.addHandler(fluent_handler)
190
191
# Structured logging
192
logger.info('User action performed', extra={
193
'user_id': 123,
194
'action': 'login',
195
'ip_address': '192.168.1.1'
196
})
197
198
logger.error('Payment processing failed', extra={
199
'order_id': 'ord-456',
200
'error_code': 'CARD_DECLINED',
201
'amount': 99.99
202
})
203
204
fluent_handler.close()
205
```
206
207
### JSON Message Parsing
208
209
```python
210
import logging
211
import json
212
from fluent import handler
213
214
logger = logging.getLogger('json_app')
215
fluent_handler = handler.FluentHandler('app.json')
216
217
# Formatter will automatically parse JSON strings
218
formatter = handler.FluentRecordFormatter(format_json=True)
219
fluent_handler.setFormatter(formatter)
220
logger.addHandler(fluent_handler)
221
222
# Send JSON string - automatically parsed into structured data
223
json_data = json.dumps({
224
'event': 'user_signup',
225
'user_id': 789,
226
'email': 'user@example.com',
227
'source': 'web'
228
})
229
230
logger.info(json_data)
231
232
# Send dictionary directly
233
logger.info({
234
'event': 'purchase',
235
'user_id': 789,
236
'product_id': 'prod-123',
237
'amount': 29.99
238
})
239
240
# Send plain string - becomes 'message' field
241
logger.info('This will be a message field')
242
243
fluent_handler.close()
244
```
245
246
### Configuration via logging.config
247
248
```python
249
import logging.config
250
251
# YAML/dict configuration
252
config = {
253
'version': 1,
254
'formatters': {
255
'fluent_fmt': {
256
'()': 'fluent.handler.FluentRecordFormatter',
257
'format': {
258
'level': '%(levelname)s',
259
'hostname': '%(hostname)s',
260
'where': '%(module)s.%(funcName)s',
261
'stack_trace': '%(exc_text)s'
262
}
263
}
264
},
265
'handlers': {
266
'fluent': {
267
'class': 'fluent.handler.FluentHandler',
268
'tag': 'myapp.logs',
269
'host': 'localhost',
270
'port': 24224,
271
'formatter': 'fluent_fmt',
272
'level': 'INFO'
273
},
274
'console': {
275
'class': 'logging.StreamHandler',
276
'level': 'DEBUG',
277
'formatter': 'fluent_fmt'
278
}
279
},
280
'loggers': {
281
'myapp': {
282
'handlers': ['fluent', 'console'],
283
'level': 'DEBUG',
284
'propagate': False
285
}
286
}
287
}
288
289
logging.config.dictConfig(config)
290
291
# Use configured logger
292
logger = logging.getLogger('myapp')
293
logger.info('Application configured via dictConfig')
294
logger.error('Error with stack trace', exc_info=True)
295
```
296
297
### Buffer Overflow Handling
298
299
```python
300
import logging
301
import msgpack
302
from io import BytesIO
303
from fluent import handler
304
305
def log_overflow_handler(pendings):
306
"""Handle log buffer overflow"""
307
print(f"Log buffer overflow: {len(pendings)} bytes")
308
309
# Save to local file as backup
310
with open('/tmp/fluent_overflow.log', 'ab') as f:
311
f.write(pendings)
312
313
# Parse and print lost log entries
314
unpacker = msgpack.Unpacker(BytesIO(pendings))
315
for log_entry in unpacker:
316
print(f"Lost log: {log_entry}")
317
318
# Setup handler with overflow protection
319
logger = logging.getLogger('overflow_test')
320
fluent_handler = handler.FluentHandler(
321
'app.logs',
322
host='unreliable-server.example.com',
323
buffer_overflow_handler=log_overflow_handler,
324
bufmax=1024 # Small buffer for testing
325
)
326
327
logger.addHandler(fluent_handler)
328
329
# Generate lots of logs to test overflow
330
for i in range(1000):
331
logger.info(f'Log message {i}', extra={'data': 'x' * 100})
332
333
fluent_handler.close()
334
```
335
336
### Context Manager Usage
337
338
```python
339
import logging
340
from fluent import handler
341
342
logger = logging.getLogger('context_app')
343
344
# Automatic cleanup with context manager
345
with handler.FluentHandler('app.session') as fluent_handler:
346
logger.addHandler(fluent_handler)
347
348
logger.info('Session started')
349
logger.info('Processing user request')
350
logger.info('Session ended')
351
352
# Handler automatically closed on exit
353
```
354
355
### Exception Logging
356
357
```python
358
import logging
359
from fluent import handler
360
361
logger = logging.getLogger('error_app')
362
fluent_handler = handler.FluentHandler('app.errors')
363
364
# Custom formatter to include stack traces
365
formatter = handler.FluentRecordFormatter({
366
'level': '%(levelname)s',
367
'message': '%(message)s',
368
'module': '%(module)s',
369
'function': '%(funcName)s',
370
'exception': '%(exc_text)s'
371
})
372
373
fluent_handler.setFormatter(formatter)
374
logger.addHandler(fluent_handler)
375
376
try:
377
# Code that might raise an exception
378
result = 10 / 0
379
except ZeroDivisionError as e:
380
# Log with full exception details
381
logger.error('Division by zero error', exc_info=True, extra={
382
'operation': 'division',
383
'numerator': 10,
384
'denominator': 0
385
})
386
387
fluent_handler.close()
388
```
389
390
### Multi-Handler Setup
391
392
```python
393
import logging
394
from fluent import handler
395
396
# Setup multiple handlers for different log levels
397
logger = logging.getLogger('multi_handler_app')
398
logger.setLevel(logging.DEBUG)
399
400
# Handler for all logs
401
all_logs_handler = handler.FluentHandler('app.all')
402
all_logs_handler.setLevel(logging.DEBUG)
403
404
# Handler for errors only
405
error_handler = handler.FluentHandler('app.errors', host='alerts.company.com')
406
error_handler.setLevel(logging.ERROR)
407
408
# Handler for performance metrics
409
perf_handler = handler.FluentHandler('app.performance')
410
perf_handler.setLevel(logging.INFO)
411
perf_filter = lambda record: 'performance' in record.getMessage().lower()
412
perf_handler.addFilter(perf_filter)
413
414
logger.addHandler(all_logs_handler)
415
logger.addHandler(error_handler)
416
logger.addHandler(perf_handler)
417
418
# Different log levels go to appropriate handlers
419
logger.debug('Debug information') # Only to all_logs_handler
420
logger.info('Performance metric: response time 150ms') # To all_logs and perf handlers
421
logger.error('Critical system error') # To all handlers
422
423
# Cleanup
424
all_logs_handler.close()
425
error_handler.close()
426
perf_handler.close()
427
```