docs
0
# Server Operations
1
2
Redis server management commands including database operations, configuration, and information retrieval. Server operations provide administrative capabilities for monitoring, configuring, and maintaining Redis instances with support for multiple databases, client management, and system information.
3
4
## Capabilities
5
6
### Server Information
7
8
Commands for retrieving server status, statistics, and configuration details.
9
10
```python { .api }
11
def info(self, section: Optional[str] = None) -> Dict[str, Any]: ...
12
13
def config_get(self, pattern: str = "*") -> Dict[bytes, bytes]: ...
14
15
def config_set(self, name: str, value: EncodableT) -> bool: ...
16
17
def config_rewrite(self) -> bool: ...
18
19
def config_resetstat(self) -> bool: ...
20
21
def time(self) -> Tuple[str, str]: ...
22
23
def lastsave(self) -> int: ...
24
```
25
26
### Database Management
27
28
Operations for managing Redis databases and their contents.
29
30
```python { .api }
31
def dbsize(self) -> int: ...
32
33
def flushdb(self, asynchronous: bool = False) -> bool: ...
34
35
def flushall(self, asynchronous: bool = False) -> bool: ...
36
37
def swapdb(self, first: int, second: int) -> bool: ...
38
39
def select(self, index: int) -> bool: ...
40
```
41
42
### Client Management
43
44
Functions for monitoring and managing client connections.
45
46
```python { .api }
47
def client_list(self, _type: Optional[str] = None, client_id: Optional[int] = None) -> str: ...
48
49
def client_info(self) -> Dict[str, Union[str, int]]: ...
50
51
def client_setname(self, name: str) -> bool: ...
52
53
def client_getname(self) -> Optional[bytes]: ...
54
55
def client_id(self) -> int: ...
56
57
def client_kill(self, address: str) -> bool: ...
58
59
def client_pause(self, timeout: int) -> bool: ...
60
```
61
62
### Memory Operations
63
64
Commands for memory management and analysis.
65
66
```python { .api }
67
def memory_usage(self, key: KeyT, samples: Optional[int] = None) -> Optional[int]: ...
68
69
def memory_stats(self) -> Dict[str, Any]: ...
70
71
def memory_purge(self) -> bool: ...
72
```
73
74
### Persistence Operations
75
76
Functions for managing Redis data persistence.
77
78
```python { .api }
79
def save(self) -> bool: ...
80
81
def bgsave(self, schedule: bool = True) -> bool: ...
82
83
def bgrewriteaof(self) -> bool: ...
84
```
85
86
### Access Control Lists (ACL)
87
88
Redis Access Control List operations for user management and authentication.
89
90
```python { .api }
91
def acl_cat(self, *category: bytes) -> List[bytes]: ...
92
93
def acl_genpass(self, *args: bytes) -> bytes: ...
94
95
def acl_setuser(self, username: bytes, *args: bytes) -> bytes: ...
96
97
def acl_list(self) -> List[bytes]: ...
98
99
def acl_deluser(self, username: bytes) -> bytes: ...
100
101
def acl_getuser(self, username: bytes) -> List[bytes]: ...
102
103
def acl_users(self) -> List[bytes]: ...
104
105
def acl_whoami(self) -> bytes: ...
106
107
def acl_save(self) -> SimpleString: ...
108
109
def acl_load(self) -> SimpleString: ...
110
111
def acl_log(self, *args: bytes) -> Union[SimpleString, List[Dict[str, str]]]: ...
112
```
113
114
### Command Information
115
116
Operations for inspecting available Redis commands and their properties.
117
118
```python { .api }
119
def command(self) -> List[List[Any]]: ...
120
121
def command_count(self) -> int: ...
122
123
def command_getkeys(self, *args) -> List[bytes]: ...
124
125
def command_info(self, *command_names: str) -> List[Optional[List[Any]]]: ...
126
```
127
128
### Debugging and Monitoring
129
130
Advanced operations for debugging and system monitoring.
131
132
```python { .api }
133
def debug_object(self, key: KeyT) -> str: ...
134
135
def monitor(self) -> None: ...
136
137
def slowlog_get(self, num: Optional[int] = None) -> List[Dict[str, Any]]: ...
138
139
def slowlog_len(self) -> int: ...
140
141
def slowlog_reset(self) -> bool: ...
142
```
143
144
## Usage Examples
145
146
### Server Information and Statistics
147
148
```python
149
import fakeredis
150
import pprint
151
152
client = fakeredis.FakeRedis()
153
154
# Add some test data to generate statistics
155
client.mset({
156
'user:1': 'alice',
157
'user:2': 'bob',
158
'counter': '42',
159
'session:abc': 'active'
160
})
161
client.lpush('queue', 'task1', 'task2', 'task3')
162
client.sadd('tags', 'redis', 'python', 'testing')
163
164
# Get comprehensive server information
165
print("=== Server Information ===")
166
info = client.info()
167
168
# Display key sections
169
sections_to_show = ['server', 'clients', 'memory', 'stats', 'keyspace']
170
for section in sections_to_show:
171
if section in info:
172
print(f"\n{section.upper()}:")
173
section_info = info[section]
174
if isinstance(section_info, dict):
175
for key, value in section_info.items():
176
print(f" {key}: {value}")
177
else:
178
print(f" {section_info}")
179
180
# Get specific information sections
181
print("\n=== Memory Information ===")
182
memory_info = client.info('memory')
183
if 'memory' in memory_info:
184
for key, value in memory_info['memory'].items():
185
if 'memory' in key.lower():
186
print(f" {key}: {value}")
187
188
print("\n=== Statistics ===")
189
stats_info = client.info('stats')
190
if 'stats' in stats_info:
191
for key, value in stats_info['stats'].items():
192
if 'commands' in key or 'connections' in key:
193
print(f" {key}: {value}")
194
```
195
196
### Configuration Management
197
198
```python
199
import fakeredis
200
201
client = fakeredis.FakeRedis()
202
203
# Get current configuration
204
print("=== Current Configuration ===")
205
config = client.config_get()
206
important_configs = ['maxmemory', 'timeout', 'tcp-keepalive', 'databases']
207
208
for key, value in config.items():
209
key_str = key.decode()
210
if any(cfg in key_str for cfg in important_configs):
211
print(f" {key_str}: {value.decode()}")
212
213
# Get specific configuration values
214
print("\n=== Specific Config Values ===")
215
memory_config = client.config_get('*memory*')
216
for key, value in memory_config.items():
217
print(f" {key.decode()}: {value.decode()}")
218
219
# Set configuration values
220
print("\n=== Setting Configuration ===")
221
try:
222
# Set maximum memory (example)
223
result = client.config_set('maxmemory', '100mb')
224
print(f"Set maxmemory: {result}")
225
226
# Verify the change
227
new_maxmem = client.config_get('maxmemory')
228
print(f"New maxmemory: {new_maxmem[b'maxmemory'].decode()}")
229
230
except Exception as e:
231
print(f"Config set error: {e}")
232
233
# Reset statistics
234
print("\n=== Reset Statistics ===")
235
reset_result = client.config_resetstat()
236
print(f"Statistics reset: {reset_result}")
237
```
238
239
### Database Operations
240
241
```python
242
import fakeredis
243
244
client = fakeredis.FakeRedis()
245
246
# Add data to current database (0)
247
client.mset({
248
'db0:key1': 'value1',
249
'db0:key2': 'value2',
250
'db0:counter': '100'
251
})
252
253
print(f"Database 0 size: {client.dbsize()}")
254
255
# Switch to database 1
256
client.select(1)
257
client.mset({
258
'db1:key1': 'different_value1',
259
'db1:key2': 'different_value2'
260
})
261
262
print(f"Database 1 size: {client.dbsize()}")
263
264
# Switch back to database 0
265
client.select(0)
266
print(f"Back to database 0, size: {client.dbsize()}")
267
268
# Demonstrate database swapping
269
print("\n=== Database Swapping ===")
270
print("Before swap:")
271
print(f" DB 0 key: {client.get('db0:key1')}")
272
273
client.select(1)
274
print(f" DB 1 key: {client.get('db1:key1')}")
275
276
# Swap databases 0 and 1
277
client.select(0) # Go back to 0 for swap
278
swap_result = client.swapdb(0, 1)
279
print(f"\nDatabase swap result: {swap_result}")
280
281
print("After swap:")
282
print(f" DB 0 key (was DB 1): {client.get('db1:key1')}")
283
284
client.select(1)
285
print(f" DB 1 key (was DB 0): {client.get('db0:key1')}")
286
287
# Flush operations
288
print("\n=== Flush Operations ===")
289
client.select(0)
290
print(f"DB 0 size before flush: {client.dbsize()}")
291
292
# Flush current database
293
client.flushdb()
294
print(f"DB 0 size after flushdb: {client.dbsize()}")
295
296
client.select(1)
297
print(f"DB 1 size (untouched): {client.dbsize()}")
298
299
# Add some data back and test flushall
300
client.select(0)
301
client.set('test_key', 'test_value')
302
client.select(1)
303
client.set('another_key', 'another_value')
304
305
print(f"\nBefore flushall - DB 0: {client.dbsize()}")
306
client.select(0)
307
print(f"Before flushall - DB 0: {client.dbsize()}")
308
309
# Flush all databases
310
client.flushall()
311
print(f"After flushall - DB 0: {client.dbsize()}")
312
client.select(1)
313
print(f"After flushall - DB 1: {client.dbsize()}")
314
```
315
316
### Client Management
317
318
```python
319
import fakeredis
320
import threading
321
import time
322
323
# Create multiple client connections
324
clients = []
325
for i in range(3):
326
client = fakeredis.FakeRedis()
327
client.client_setname(f'client_{i}')
328
clients.append(client)
329
330
# Main client for monitoring
331
monitor_client = fakeredis.FakeRedis()
332
monitor_client.client_setname('monitor_client')
333
334
print("=== Client Information ===")
335
336
# Get client list
337
client_list = monitor_client.client_list()
338
print("Connected clients:")
339
print(client_list)
340
341
# Get current client info
342
client_info = monitor_client.client_info()
343
print(f"\nMonitor client info:")
344
for key, value in client_info.items():
345
print(f" {key}: {value}")
346
347
# Get client ID
348
client_id = monitor_client.client_id()
349
print(f"\nMonitor client ID: {client_id}")
350
351
# Get client name
352
client_name = monitor_client.client_getname()
353
if client_name:
354
print(f"Monitor client name: {client_name.decode()}")
355
356
# Simulate some client activity
357
def client_activity(client, client_name, duration):
358
"""Simulate client activity"""
359
start_time = time.time()
360
counter = 0
361
362
while time.time() - start_time < duration:
363
# Perform various operations
364
client.set(f'{client_name}:counter', str(counter))
365
client.get(f'{client_name}:counter')
366
client.incr(f'{client_name}:activity')
367
368
counter += 1
369
time.sleep(0.1)
370
371
print("\n=== Starting Client Activity ===")
372
373
# Start client activity threads
374
threads = []
375
for i, client in enumerate(clients):
376
thread = threading.Thread(
377
target=client_activity,
378
args=(client, f'client_{i}', 2) # 2 seconds of activity
379
)
380
threads.append(thread)
381
thread.start()
382
383
# Monitor clients during activity
384
time.sleep(1)
385
print(f"\nClients during activity:")
386
active_client_list = monitor_client.client_list()
387
print(active_client_list)
388
389
# Wait for activity to complete
390
for thread in threads:
391
thread.join()
392
393
print(f"\nClients after activity:")
394
final_client_list = monitor_client.client_list()
395
print(final_client_list)
396
```
397
398
### Memory Analysis
399
400
```python
401
import fakeredis
402
403
client = fakeredis.FakeRedis()
404
405
# Create data with different memory characteristics
406
print("=== Creating Test Data ===")
407
408
# Small strings
409
for i in range(100):
410
client.set(f'small:{i}', f'value_{i}')
411
412
# Large strings
413
large_data = 'x' * 10000
414
for i in range(10):
415
client.set(f'large:{i}', large_data)
416
417
# Complex data structures
418
for i in range(20):
419
# Hashes
420
client.hset(f'user:{i}', mapping={
421
'name': f'User {i}',
422
'email': f'user{i}@example.com',
423
'age': str(20 + i),
424
'bio': 'A user with a longer biography ' * 10
425
})
426
427
# Lists
428
client.lpush(f'list:{i}', *[f'item_{j}' for j in range(50)])
429
430
# Sets
431
client.sadd(f'set:{i}', *[f'member_{j}' for j in range(30)])
432
433
# Analyze memory usage
434
print("\n=== Memory Analysis ===")
435
436
# Get overall memory stats
437
try:
438
memory_stats = client.memory_stats()
439
print("Memory statistics:")
440
for key, value in memory_stats.items():
441
if isinstance(value, (int, float)):
442
if 'bytes' in key:
443
print(f" {key}: {value:,} bytes")
444
else:
445
print(f" {key}: {value}")
446
except:
447
print("Memory stats not available in this Redis version")
448
449
# Analyze specific key memory usage
450
print("\n=== Key Memory Usage ===")
451
test_keys = [
452
'small:0',
453
'large:0',
454
'user:0',
455
'list:0',
456
'set:0'
457
]
458
459
for key in test_keys:
460
try:
461
usage = client.memory_usage(key)
462
if usage is not None:
463
print(f" {key}: {usage} bytes")
464
else:
465
print(f" {key}: Key not found")
466
except:
467
print(f" {key}: Memory usage not available")
468
469
# Get database size
470
total_keys = client.dbsize()
471
print(f"\nTotal keys in database: {total_keys}")
472
```
473
474
### Server Time and Persistence
475
476
```python
477
import fakeredis
478
import time
479
480
client = fakeredis.FakeRedis()
481
482
# Get server time
483
print("=== Server Time ===")
484
server_time = client.time()
485
server_timestamp = int(server_time[0])
486
server_microseconds = int(server_time[1])
487
488
print(f"Server timestamp: {server_timestamp}")
489
print(f"Server microseconds: {server_microseconds}")
490
491
# Convert to readable format
492
import datetime
493
readable_time = datetime.datetime.fromtimestamp(server_timestamp)
494
print(f"Readable time: {readable_time}")
495
496
# Add some data for persistence testing
497
print("\n=== Adding Data for Persistence Test ===")
498
test_data = {
499
'persistent:config': 'important_setting',
500
'persistent:counter': '12345',
501
'persistent:timestamp': str(int(time.time()))
502
}
503
504
for key, value in test_data.items():
505
client.set(key, value)
506
print(f"Set {key}: {value}")
507
508
# Test save operations
509
print("\n=== Persistence Operations ===")
510
511
# Synchronous save
512
try:
513
save_result = client.save()
514
print(f"Synchronous save: {save_result}")
515
except Exception as e:
516
print(f"Save error: {e}")
517
518
# Background save
519
try:
520
bgsave_result = client.bgsave()
521
print(f"Background save: {bgsave_result}")
522
except Exception as e:
523
print(f"Background save error: {e}")
524
525
# Get last save time
526
try:
527
last_save = client.lastsave()
528
last_save_time = datetime.datetime.fromtimestamp(last_save)
529
print(f"Last save time: {last_save_time}")
530
except Exception as e:
531
print(f"Last save error: {e}")
532
```
533
534
### Command Information and Debugging
535
536
```python
537
import fakeredis
538
539
client = fakeredis.FakeRedis()
540
541
print("=== Command Information ===")
542
543
# Get command count
544
try:
545
cmd_count = client.command_count()
546
print(f"Total commands available: {cmd_count}")
547
except:
548
print("Command count not available")
549
550
# Get information about specific commands
551
test_commands = ['SET', 'GET', 'HSET', 'LPUSH', 'ZADD']
552
try:
553
cmd_info = client.command_info(*test_commands)
554
print(f"\nCommand information:")
555
for i, cmd_name in enumerate(test_commands):
556
if i < len(cmd_info) and cmd_info[i]:
557
info = cmd_info[i]
558
print(f" {cmd_name}:")
559
if len(info) >= 2:
560
print(f" Arity: {info[1]}") # Number of arguments
561
if len(info) >= 3:
562
print(f" Flags: {info[2]}") # Command flags
563
else:
564
print(f" {cmd_name}: No information available")
565
except Exception as e:
566
print(f"Command info error: {e}")
567
568
# Test debug object (if available)
569
print("\n=== Debug Information ===")
570
client.set('debug_key', 'debug_value')
571
client.lpush('debug_list', 'item1', 'item2', 'item3')
572
573
debug_keys = ['debug_key', 'debug_list']
574
for key in debug_keys:
575
try:
576
debug_info = client.debug_object(key)
577
print(f"Debug info for {key}: {debug_info}")
578
except Exception as e:
579
print(f"Debug info for {key}: Not available ({e})")
580
581
# Slowlog operations (if supported)
582
print("\n=== Slowlog Operations ===")
583
try:
584
# Get slowlog length
585
slowlog_len = client.slowlog_len()
586
print(f"Slowlog entries: {slowlog_len}")
587
588
# Get slowlog entries
589
if slowlog_len > 0:
590
slowlog_entries = client.slowlog_get(5) # Get last 5 entries
591
print("Recent slow queries:")
592
for entry in slowlog_entries:
593
print(f" ID: {entry.get('id', 'N/A')}")
594
print(f" Duration: {entry.get('duration', 'N/A')} microseconds")
595
print(f" Command: {entry.get('command', 'N/A')}")
596
print(f" Time: {entry.get('start_time', 'N/A')}")
597
print()
598
599
# Reset slowlog
600
reset_result = client.slowlog_reset()
601
print(f"Slowlog reset: {reset_result}")
602
603
except Exception as e:
604
print(f"Slowlog operations not available: {e}")
605
```
606
607
### Pattern: Health Check System
608
609
```python
610
import fakeredis
611
import time
612
import json
613
from typing import Dict, Any, List
614
from dataclasses import dataclass
615
616
@dataclass
617
class HealthCheck:
618
name: str
619
status: str
620
response_time: float
621
message: str
622
timestamp: int
623
624
class RedisHealthMonitor:
625
def __init__(self, client: fakeredis.FakeRedis):
626
self.client = client
627
628
def perform_health_check(self) -> HealthCheck:
629
"""Perform comprehensive Redis health check"""
630
start_time = time.time()
631
632
try:
633
# Test basic connectivity
634
self.client.ping()
635
636
# Test read/write operations
637
test_key = f"health_check:{int(time.time())}"
638
self.client.set(test_key, "health_test", ex=60)
639
value = self.client.get(test_key)
640
641
if value != b"health_test":
642
raise Exception("Read/write test failed")
643
644
# Clean up test key
645
self.client.delete(test_key)
646
647
response_time = (time.time() - start_time) * 1000 # ms
648
649
return HealthCheck(
650
name="redis_health_check",
651
status="healthy",
652
response_time=response_time,
653
message="All checks passed",
654
timestamp=int(time.time())
655
)
656
657
except Exception as e:
658
response_time = (time.time() - start_time) * 1000
659
return HealthCheck(
660
name="redis_health_check",
661
status="unhealthy",
662
response_time=response_time,
663
message=f"Health check failed: {str(e)}",
664
timestamp=int(time.time())
665
)
666
667
def get_system_metrics(self) -> Dict[str, Any]:
668
"""Gather comprehensive system metrics"""
669
metrics = {}
670
671
try:
672
# Server information
673
info = self.client.info()
674
675
# Extract key metrics
676
if 'server' in info:
677
server_info = info['server']
678
metrics['server'] = {
679
'redis_version': server_info.get('redis_version', 'unknown'),
680
'uptime_seconds': server_info.get('uptime_in_seconds', 0),
681
'connected_clients': server_info.get('connected_clients', 0)
682
}
683
684
if 'memory' in info:
685
memory_info = info['memory']
686
metrics['memory'] = {
687
'used_memory': memory_info.get('used_memory', 0),
688
'used_memory_human': memory_info.get('used_memory_human', '0B'),
689
'maxmemory': memory_info.get('maxmemory', 0)
690
}
691
692
if 'stats' in info:
693
stats_info = info['stats']
694
metrics['stats'] = {
695
'total_commands_processed': stats_info.get('total_commands_processed', 0),
696
'total_connections_received': stats_info.get('total_connections_received', 0),
697
'expired_keys': stats_info.get('expired_keys', 0),
698
'evicted_keys': stats_info.get('evicted_keys', 0)
699
}
700
701
if 'keyspace' in info:
702
keyspace_info = info['keyspace']
703
metrics['keyspace'] = {}
704
for db, db_info in keyspace_info.items():
705
if isinstance(db_info, dict):
706
metrics['keyspace'][db] = {
707
'keys': db_info.get('keys', 0),
708
'expires': db_info.get('expires', 0)
709
}
710
711
# Database size
712
metrics['database'] = {
713
'total_keys': self.client.dbsize()
714
}
715
716
# Client information
717
client_info = self.client.client_info()
718
metrics['client'] = {
719
'client_id': client_info.get('id', 0),
720
'client_name': client_info.get('name', ''),
721
'database': client_info.get('db', 0)
722
}
723
724
except Exception as e:
725
metrics['error'] = f"Failed to gather metrics: {str(e)}"
726
727
return metrics
728
729
def store_health_history(self, health_check: HealthCheck, max_history: int = 100):
730
"""Store health check results for trend analysis"""
731
history_key = "health_check_history"
732
733
# Store as JSON in a list
734
health_data = {
735
'name': health_check.name,
736
'status': health_check.status,
737
'response_time': health_check.response_time,
738
'message': health_check.message,
739
'timestamp': health_check.timestamp
740
}
741
742
# Add to list (most recent first)
743
self.client.lpush(history_key, json.dumps(health_data))
744
745
# Trim to max history
746
self.client.ltrim(history_key, 0, max_history - 1)
747
748
def get_health_history(self, limit: int = 10) -> List[HealthCheck]:
749
"""Retrieve recent health check history"""
750
history_key = "health_check_history"
751
752
try:
753
history_data = self.client.lrange(history_key, 0, limit - 1)
754
755
health_checks = []
756
for data in history_data:
757
health_dict = json.loads(data.decode())
758
health_check = HealthCheck(
759
name=health_dict['name'],
760
status=health_dict['status'],
761
response_time=health_dict['response_time'],
762
message=health_dict['message'],
763
timestamp=health_dict['timestamp']
764
)
765
health_checks.append(health_check)
766
767
return health_checks
768
769
except Exception as e:
770
print(f"Error retrieving health history: {e}")
771
return []
772
773
def get_health_summary(self) -> Dict[str, Any]:
774
"""Generate health summary with recent trends"""
775
history = self.get_health_history(20) # Last 20 checks
776
777
if not history:
778
return {"status": "unknown", "message": "No health history available"}
779
780
# Calculate statistics
781
healthy_count = sum(1 for check in history if check.status == "healthy")
782
total_checks = len(history)
783
success_rate = (healthy_count / total_checks) * 100
784
785
recent_checks = history[:5] # Last 5 checks
786
avg_response_time = sum(check.response_time for check in recent_checks) / len(recent_checks)
787
788
# Determine overall status
789
if success_rate >= 95:
790
overall_status = "healthy"
791
elif success_rate >= 80:
792
overall_status = "degraded"
793
else:
794
overall_status = "unhealthy"
795
796
return {
797
"overall_status": overall_status,
798
"success_rate": round(success_rate, 2),
799
"avg_response_time": round(avg_response_time, 2),
800
"total_checks": total_checks,
801
"healthy_checks": healthy_count,
802
"last_check": {
803
"status": history[0].status,
804
"timestamp": history[0].timestamp,
805
"message": history[0].message
806
}
807
}
808
809
# Usage example
810
client = fakeredis.FakeRedis()
811
health_monitor = RedisHealthMonitor(client)
812
813
print("=== Redis Health Monitoring ===")
814
815
# Perform several health checks
816
for i in range(5):
817
print(f"\nPerforming health check {i+1}...")
818
819
health_check = health_monitor.perform_health_check()
820
print(f"Status: {health_check.status}")
821
print(f"Response time: {health_check.response_time:.2f}ms")
822
print(f"Message: {health_check.message}")
823
824
# Store in history
825
health_monitor.store_health_history(health_check)
826
827
# Add some test data to make metrics more interesting
828
if i == 2:
829
client.mset({f'test_key_{j}': f'test_value_{j}' for j in range(50)})
830
831
time.sleep(0.1) # Brief pause between checks
832
833
# Get system metrics
834
print(f"\n=== System Metrics ===")
835
metrics = health_monitor.get_system_metrics()
836
print(json.dumps(metrics, indent=2, default=str))
837
838
# Get health summary
839
print(f"\n=== Health Summary ===")
840
summary = health_monitor.get_health_summary()
841
print(json.dumps(summary, indent=2))
842
843
# Show recent health history
844
print(f"\n=== Recent Health History ===")
845
recent_history = health_monitor.get_health_history(5)
846
for i, check in enumerate(recent_history):
847
timestamp_str = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(check.timestamp))
848
print(f"{i+1}. {timestamp_str}: {check.status} ({check.response_time:.1f}ms) - {check.message}")
849
```