A lightweight library to help with training neural networks in PyTorch.
—
Helper utilities for tensor operations, type conversions, logging setup, and reproducibility management. PyTorch Ignite provides essential utilities that support training workflows and make common operations more convenient.
Utilities for tensor manipulation and device management.
def convert_tensor(input_, device=None, non_blocking=False):
"""
Convert input to tensor and move to specified device.
Parameters:
- input_: input data (tensor, numpy array, list, etc.)
- device: target device (torch.device, string, or None)
- non_blocking: whether to use non-blocking transfer
Returns:
Converted tensor on target device
"""
def to_onehot(indices, num_classes):
"""
Convert indices to one-hot encoding.
Parameters:
- indices: tensor of class indices
- num_classes: total number of classes
Returns:
One-hot encoded tensor
"""
def apply_to_tensor(input_, func):
"""
Apply function to all tensors in nested structure.
Parameters:
- input_: nested structure containing tensors
- func: function to apply to each tensor
Returns:
Structure with function applied to all tensors
"""
def apply_to_type(input_, input_type, func):
"""
Apply function to all objects of specific type in nested structure.
Parameters:
- input_: nested structure
- input_type: type to match
- func: function to apply to matched objects
Returns:
Structure with function applied to matched objects
"""Utilities for configuring logging in training workflows.
def setup_logger(name=None, level=logging.INFO, stream=None, format="%(asctime)s %(name)s %(levelname)s %(message)s", filepath=None, distributed_rank=None):
"""
Setup logger with specified configuration.
Parameters:
- name: logger name (default: None for root logger)
- level: logging level (default: logging.INFO)
- stream: output stream (default: None for sys.stderr)
- format: log message format string
- filepath: path to log file (optional)
- distributed_rank: rank for distributed training (optional)
Returns:
Configured logger instance
"""Utilities for ensuring reproducible training runs.
def manual_seed(seed):
"""
Set manual seed for reproducibility across all relevant libraries.
Parameters:
- seed: random seed value
Sets seeds for:
- Python random module
- NumPy random state
- PyTorch random state
- PyTorch CUDA random state (if available)
"""from ignite.utils import convert_tensor
import torch
# Convert various inputs to tensors
data = [1, 2, 3, 4, 5]
tensor_data = convert_tensor(data) # Convert list to tensor
# Move to specific device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tensor_on_device = convert_tensor(data, device=device)
# Non-blocking transfer for performance
tensor_nb = convert_tensor(data, device=device, non_blocking=True)
# Works with numpy arrays
import numpy as np
np_array = np.array([1.0, 2.0, 3.0])
tensor_from_np = convert_tensor(np_array, device=device)from ignite.utils import to_onehot
import torch
# Convert class indices to one-hot
indices = torch.tensor([0, 1, 2, 1, 0])
num_classes = 3
onehot = to_onehot(indices, num_classes)
print(onehot)
# Output: tensor([[1, 0, 0],
# [0, 1, 0],
# [0, 0, 1],
# [0, 1, 0],
# [1, 0, 0]])
# Works with batch dimensions
batch_indices = torch.tensor([[0, 1], [2, 1]])
batch_onehot = to_onehot(batch_indices, num_classes)
print(batch_onehot.shape) # torch.Size([2, 2, 3])from ignite.utils import apply_to_tensor, apply_to_type
import torch
# Apply function to all tensors in nested structure
data = {
'input': torch.randn(10, 5),
'target': torch.randint(0, 2, (10,)),
'metadata': {
'weights': torch.ones(10),
'info': 'some string'
}
}
# Move all tensors to GPU
gpu_data = apply_to_tensor(data, lambda x: x.cuda() if torch.cuda.is_available() else x)
# Apply function to specific types
def double_tensors(x):
return x * 2
doubled_data = apply_to_type(data, torch.Tensor, double_tensors)
# Custom type matching
class CustomClass:
def __init__(self, value):
self.value = value
data_with_custom = {
'tensor': torch.randn(5),
'custom': CustomClass(42),
'nested': {
'another_custom': CustomClass(24)
}
}
def modify_custom(obj):
obj.value *= 2
return obj
modified_data = apply_to_type(data_with_custom, CustomClass, modify_custom)from ignite.utils import setup_logger
import logging
# Basic logger setup
logger = setup_logger()
logger.info("This is a basic log message")
# Custom logger with specific level
debug_logger = setup_logger(name="debug_logger", level=logging.DEBUG)
debug_logger.debug("Debug message")
# Logger with file output
file_logger = setup_logger(
name="file_logger",
level=logging.INFO,
filepath="training.log",
format="%(asctime)s [%(levelname)s] %(name)s: %(message)s"
)
file_logger.info("This will be written to training.log")
# Distributed training logger (only logs on rank 0)
import ignite.distributed as idist
dist_logger = setup_logger(
name="distributed_logger",
distributed_rank=idist.rank()
)
dist_logger.info("This only appears on rank 0")from ignite.utils import manual_seed
# Set seed for reproducibility
manual_seed(42)
# Now all random operations will be reproducible
import torch
import numpy as np
import random
# These will produce the same results across runs
torch_random = torch.randn(3, 3)
np_random = np.random.randn(3, 3)
py_random = random.random()
# For complete reproducibility in training
manual_seed(42)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# Your training code here - results will be reproduciblefrom ignite.engine import Engine, Events
from ignite.utils import convert_tensor, setup_logger
# Setup logger for training
logger = setup_logger(name="trainer")
def train_step(engine, batch):
model.train()
optimizer.zero_grad()
# Use convert_tensor for device management
x, y = batch
x = convert_tensor(x, device=device, non_blocking=True)
y = convert_tensor(y, device=device, non_blocking=True)
y_pred = model(x)
loss = criterion(y_pred, y)
loss.backward()
optimizer.step()
return loss.item()
trainer = Engine(train_step)
@trainer.on(Events.EPOCH_COMPLETED)
def log_results(engine):
logger.info(f"Epoch {engine.state.epoch} completed")
# Set reproducible seed before training
manual_seed(42)
trainer.run(train_loader, max_epochs=10)from ignite.utils import apply_to_tensor
import torch
def prepare_batch(batch, device=None, non_blocking=False):
"""Custom batch preparation function."""
# Move all tensors in batch to device
def move_to_device(tensor):
if device is not None:
return tensor.to(device, non_blocking=non_blocking)
return tensor
return apply_to_tensor(batch, move_to_device)
# Use in training
def train_step(engine, batch):
batch = prepare_batch(batch, device=device, non_blocking=True)
# ... rest of training stepInstall with Tessl CLI
npx tessl i tessl/pypi-pytorch-ignite