Engine of OpenMMLab projects for training deep learning models based on PyTorch with large-scale training frameworks, configuration management, and monitoring capabilities
—
Advanced logging system with support for multiple visualization backends including TensorBoard, Weights & Biases, MLflow, ClearML, Neptune, and others with structured message passing and history tracking. The system provides comprehensive monitoring capabilities for training processes.
Advanced logger with enhanced features for machine learning workflows.
class MMLogger:
@staticmethod
def get_instance(name: str, logger_name: str = None) -> 'MMLogger':
"""
Get logger instance (singleton pattern).
Parameters:
- name: Logger instance name
- logger_name: Actual logger name
Returns:
MMLogger instance
"""
@staticmethod
def get_current_instance() -> 'MMLogger':
"""
Get current logger instance.
Returns:
Current MMLogger instance
"""
def info(self, message: str):
"""
Log info message.
Parameters:
- message: Message to log
"""
def warning(self, message: str):
"""
Log warning message.
Parameters:
- message: Warning message to log
"""
def error(self, message: str):
"""
Log error message.
Parameters:
- message: Error message to log
"""
def debug(self, message: str):
"""
Log debug message.
Parameters:
- message: Debug message to log
"""
def critical(self, message: str):
"""
Log critical message.
Parameters:
- message: Critical message to log
"""
def log(self, level: int, message: str):
"""
Log message at specified level.
Parameters:
- level: Logging level
- message: Message to log
"""
@property
def name(self) -> str:
"""Logger name."""
@property
def logger(self):
"""Underlying logger instance."""Central hub for message passing and metric tracking between components.
class MessageHub:
@classmethod
def get_instance(cls, name: str = 'mmengine') -> 'MessageHub':
"""
Get MessageHub instance (singleton pattern).
Parameters:
- name: Instance name
Returns:
MessageHub instance
"""
@classmethod
def get_current_instance(cls) -> 'MessageHub':
"""
Get current MessageHub instance.
Returns:
Current MessageHub instance
"""
def update_scalar(self, key: str, value: float, count: int = 1):
"""
Update scalar value.
Parameters:
- key: Scalar key name
- value: Scalar value
- count: Count for averaging
"""
def update_scalars(self, scalar_dict: dict, count: int = 1):
"""
Update multiple scalar values.
Parameters:
- scalar_dict: Dictionary of scalar values
- count: Count for averaging
"""
def get_scalar(self, key: str):
"""
Get scalar value.
Parameters:
- key: Scalar key name
Returns:
Scalar value and statistics
"""
def get_scalars(self, keys: list) -> dict:
"""
Get multiple scalar values.
Parameters:
- keys: List of scalar keys
Returns:
Dictionary of scalar values
"""
def update_info(self, key: str, value):
"""
Update info value.
Parameters:
- key: Info key name
- value: Info value
"""
def get_info(self, key: str):
"""
Get info value.
Parameters:
- key: Info key name
Returns:
Info value
"""
def log_scalars(self, scalar_dict: dict, step: int):
"""
Log scalars to all backends.
Parameters:
- scalar_dict: Dictionary of scalars to log
- step: Step number
"""
def clear(self):
"""Clear all stored data."""
@property
def log_scalars_keys(self) -> set:
"""Get all scalar keys."""Buffer for storing and managing training history with statistical calculations.
class HistoryBuffer:
def __init__(self, max_length: int = 1000000):
"""
Initialize history buffer.
Parameters:
- max_length: Maximum buffer length
"""
def update(self, value: float, count: int = 1):
"""
Update buffer with new value.
Parameters:
- value: Value to add
- count: Count for weighted update
"""
def mean(self, window_size: int = None) -> float:
"""
Calculate mean value.
Parameters:
- window_size: Window size for calculation
Returns:
Mean value
"""
def current(self) -> float:
"""
Get current (latest) value.
Returns:
Current value
"""
def max(self, window_size: int = None) -> float:
"""
Get maximum value.
Parameters:
- window_size: Window size for calculation
Returns:
Maximum value
"""
def min(self, window_size: int = None) -> float:
"""
Get minimum value.
Parameters:
- window_size: Window size for calculation
Returns:
Minimum value
"""
def data(self) -> list:
"""
Get all data.
Returns:
List of all values
"""
def __len__(self) -> int:
"""Get buffer length."""Convenient function for logging with automatic logger detection.
def print_log(msg: str, logger: str = None, level: int = 20):
"""
Print log message with logger.
Parameters:
- msg: Message to log
- logger: Logger name or instance
- level: Logging level (INFO=20, WARNING=30, ERROR=40)
"""System for processing and formatting training logs.
class LogProcessor:
def __init__(self, window_size: int = 10, by_epoch: bool = True, custom_cfg: list = None, num_digits: int = 4, log_with_hierarchy: bool = False):
"""
Initialize log processor.
Parameters:
- window_size: Window size for smoothing
- by_epoch: Whether to log by epoch
- custom_cfg: Custom logging configuration
- num_digits: Number of digits for formatting
- log_with_hierarchy: Whether to log with hierarchy
"""
def get_log_after_iter(self, runner, batch_idx: int, mode: str) -> dict:
"""
Get log information after iteration.
Parameters:
- runner: Runner instance
- batch_idx: Batch index
- mode: Training mode ('train', 'val', 'test')
Returns:
Dictionary of log information
"""
def get_log_after_epoch(self, runner, batch_idx: int, mode: str) -> dict:
"""
Get log information after epoch.
Parameters:
- runner: Runner instance
- batch_idx: Batch index
- mode: Training mode
Returns:
Dictionary of log information
"""from mmengine import MMLogger, print_log
# Get logger instance
logger = MMLogger.get_instance('my_logger')
# Log at different levels
logger.info('Training started')
logger.warning('Learning rate is very high')
logger.error('Training failed')
# Use convenient print_log function
print_log('This is an info message', level=20)
print_log('This is a warning', level=30)from mmengine import MessageHub
# Get message hub instance
message_hub = MessageHub.get_instance('training')
# Update scalar metrics
message_hub.update_scalar('loss', 0.5)
message_hub.update_scalar('accuracy', 0.95)
# Update multiple scalars
metrics = {'loss': 0.4, 'accuracy': 0.96, 'lr': 0.001}
message_hub.update_scalars(metrics)
# Get scalar values
loss_info = message_hub.get_scalar('loss')
all_metrics = message_hub.get_scalars(['loss', 'accuracy'])from mmengine.logging import HistoryBuffer
# Create history buffer
loss_buffer = HistoryBuffer(max_length=10000)
# Update with training losses
for epoch in range(100):
for batch_loss in batch_losses:
loss_buffer.update(batch_loss)
# Get statistics
avg_loss = loss_buffer.mean(window_size=100) # Last 100 values
current_loss = loss_buffer.current()
min_loss = loss_buffer.min()
max_loss = loss_buffer.max()
print(f'Epoch {epoch}: avg={avg_loss:.4f}, current={current_loss:.4f}')from mmengine import Runner, MMLogger, MessageHub
# Set up logging for training
logger = MMLogger.get_instance('training')
message_hub = MessageHub.get_instance('training')
# In training loop
def train_step(model, data, optimizer):
loss = model(data)
# Log to message hub
message_hub.update_scalar('train_loss', loss.item())
# Log important events
logger.info(f'Step loss: {loss.item():.4f}')
return loss
# Create runner with log processor
runner = Runner(
model=model,
log_processor=dict(
window_size=20,
by_epoch=True,
custom_cfg=[
dict(data_src='loss', window_size=10, method_name='mean'),
dict(data_src='accuracy', window_size=10, method_name='mean')
]
)
)import logging
from mmengine import MMLogger
# Create logger with custom configuration
logger = MMLogger.get_instance(
'custom_logger',
logger_name='my_training'
)
# Add custom handler
handler = logging.FileHandler('training.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
logger.logger.addHandler(handler)
# Use logger
logger.info('Custom logging setup complete')from mmengine.runner import LogProcessor
# Create custom log processor
log_processor = LogProcessor(
window_size=50,
by_epoch=True,
custom_cfg=[
dict(data_src='loss', window_size=20, method_name='mean'),
dict(data_src='loss', window_size=1, method_name='current'),
dict(data_src='accuracy', window_size=20, method_name='mean'),
dict(data_src='lr', method_name='current')
],
num_digits=6,
log_with_hierarchy=True
)
# Use in runner
runner = Runner(
model=model,
log_processor=log_processor
)Install with Tessl CLI
npx tessl i tessl/pypi-mmengine