A Python toolbox for performing gradient-free optimization with unified interfaces for optimization algorithms and parameter handling.
—
Comprehensive callback system for optimization monitoring, logging, progress tracking, early stopping, and state persistence during optimization runs. Callbacks enable real-time observation and control of the optimization process.
Visual feedback and progress tracking during optimization runs with configurable update intervals and display formats.
class OptimizationPrinter:
"""
Prints optimization progress at regular intervals.
Parameters:
- print_interval_tells: Print every N tell operations (int, default=1)
- print_interval_seconds: Print every N seconds (float, default=60.0)
"""
def __init__(self, print_interval_tells: int = 1, print_interval_seconds: float = 60.0):
"""Initialize printer with update intervals."""
class ProgressBar:
"""
Displays text-based progress bar during optimization.
Shows current progress through budget with estimated time remaining
and current best value.
"""Structured logging capabilities for optimization runs with configurable log levels and detailed parameter tracking.
class OptimizationLogger:
"""
Logs optimization progress to logger.
Parameters:
- logger: Logger instance (default: global_logger)
- log_level: Logging level (default: logging.INFO)
- log_interval_tells: Log every N tell operations (int, default=1)
- log_interval_seconds: Log every N seconds (float, default=60.0)
"""
def __init__(
self,
*,
logger=None,
log_level: int = None,
log_interval_tells: int = 1,
log_interval_seconds: float = 60.0
):
"""Initialize logger with configuration."""
class ParametersLogger:
"""
Logs detailed parameter information during optimization.
Records parameter values, mutations, and evolution history
for detailed analysis of optimization behavior.
"""Checkpoint and recovery capabilities for long-running optimization tasks with automatic state saving and restoration.
class OptimizerDump:
"""
Periodically dumps optimizer state to file for recovery.
Enables resuming optimization from checkpoints in case of
interruption or system failure.
Parameters:
- filepath: Path for state dump file
- dump_interval: Dump frequency in evaluations
"""Intelligent termination conditions based on convergence criteria, improvement thresholds, and custom stopping rules.
class EarlyStopping:
"""
Implements early stopping based on various criteria.
Supports multiple stopping conditions including loss improvement
tolerance, duration limits, and custom stopping functions.
Parameters:
- improvement_tolerance: Minimum improvement threshold
- patience: Number of iterations to wait for improvement
- duration_limit: Maximum optimization duration
- custom_criterion: Custom stopping function
"""
def add_loss_improvement_tolerance_criterion(
self,
tolerance: float,
patience: int
) -> 'EarlyStopping':
"""
Add loss improvement tolerance stopping criterion.
Args:
tolerance: Minimum relative improvement required
patience: Number of iterations to wait for improvement
Returns:
Self for method chaining
"""
def add_duration_criterion(self, max_duration: float) -> 'EarlyStopping':
"""
Add duration-based stopping criterion.
Args:
max_duration: Maximum optimization duration in seconds
Returns:
Self for method chaining
"""
def add_custom_criterion(self, criterion_func: Callable) -> 'EarlyStopping':
"""
Add custom stopping criterion.
Args:
criterion_func: Function that returns True to stop optimization
Returns:
Self for method chaining
"""Implementation classes for specific stopping criteria used by the EarlyStopping callback.
class _DurationCriterion:
"""
Duration-based stopping criterion implementation.
Monitors optimization runtime and triggers stopping when
maximum duration is exceeded.
"""
class _LossImprovementToleranceCriterion:
"""
Loss improvement tolerance criterion implementation.
Tracks improvement in best loss value and triggers stopping
when improvement falls below threshold for specified patience.
"""import nevergrad as ng
# Create optimizer with progress display
param = ng.p.Array(shape=(10,))
optimizer = ng.optimizers.CMA(parametrization=param, budget=100)
# Add progress bar
progress_callback = ng.callbacks.ProgressBar()
# Add to optimizer (implementation depends on optimizer interface)
# Manual integration in optimization loop:
def sphere(x):
return sum(x**2)
for i in range(optimizer.budget):
x = optimizer.ask()
loss = sphere(x.value)
optimizer.tell(x, loss)
# Manual progress update
if i % 10 == 0:
best = optimizer.provide_recommendation()
print(f"Iteration {i}: Best loss = {sphere(best.value):.6f}")import logging
import nevergrad as ng
# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("optimization")
# Create logger callback
logger_callback = ng.callbacks.OptimizationLogger(
logger=logger,
log_level=logging.INFO,
log_interval_tells=5, # Log every 5 evaluations
log_interval_seconds=30.0 # Or every 30 seconds
)
# Create detailed parameter logger
param_logger = ng.callbacks.ParametersLogger()
# Use in optimization
optimizer = ng.optimizers.CMA(parametrization=param, budget=100)
# Integration depends on specific optimizer implementationimport nevergrad as ng
# Create early stopping with multiple criteria
early_stopping = ng.callbacks.EarlyStopping()
# Add improvement tolerance criterion
early_stopping.add_loss_improvement_tolerance_criterion(
tolerance=1e-6, # Must improve by at least 1e-6
patience=20 # Wait up to 20 iterations
)
# Add duration limit
early_stopping.add_duration_criterion(
max_duration=3600.0 # Stop after 1 hour
)
# Add custom criterion
def custom_stop_criterion(optimizer):
"""Stop if loss is below target."""
best = optimizer.provide_recommendation()
return sphere(best.value) < 1e-3
early_stopping.add_custom_criterion(custom_stop_criterion)
# Use with optimizer
optimizer = ng.optimizers.CMA(parametrization=param, budget=1000)
# Manual integration with early stopping
for i in range(optimizer.budget):
x = optimizer.ask()
loss = sphere(x.value)
optimizer.tell(x, loss)
# Check stopping criteria
if early_stopping.should_stop(optimizer):
print(f"Early stopping triggered at iteration {i}")
breakimport nevergrad as ng
# Create optimizer dump for checkpointing
dump_callback = ng.callbacks.OptimizerDump(
filepath="optimizer_checkpoint.pkl",
dump_interval=50 # Save every 50 evaluations
)
# Combined callback usage
class OptimizationRunner:
def __init__(self, optimizer, callbacks=None):
self.optimizer = optimizer
self.callbacks = callbacks or []
self.iteration = 0
def run(self, function, budget):
for i in range(budget):
x = self.optimizer.ask()
loss = function(x.value)
self.optimizer.tell(x, loss)
# Execute callbacks
for callback in self.callbacks:
callback.on_iteration(self.optimizer, i, loss)
self.iteration += 1
# Usage
callbacks = [
ng.callbacks.ProgressBar(),
ng.callbacks.OptimizationLogger(log_interval_tells=10),
ng.callbacks.OptimizerDump(filepath="checkpoint.pkl", dump_interval=25)
]
runner = OptimizationRunner(optimizer, callbacks)
runner.run(sphere, 100)class CustomCallback:
"""Example custom callback implementation."""
def __init__(self, target_loss=1e-3):
self.target_loss = target_loss
self.best_losses = []
def on_iteration(self, optimizer, iteration, current_loss):
"""Called after each optimization iteration."""
best = optimizer.provide_recommendation()
best_loss = current_loss # Assuming current is best
self.best_losses.append(best_loss)
if best_loss < self.target_loss:
print(f"Target loss {self.target_loss} achieved at iteration {iteration}")
# Log every 25 iterations
if iteration % 25 == 0:
print(f"Iteration {iteration}: Best = {best_loss:.6f}")
def on_completion(self, optimizer):
"""Called when optimization completes."""
print(f"Optimization completed. Total evaluations: {len(self.best_losses)}")
print(f"Final best loss: {min(self.best_losses):.6f}")
# Usage
custom_callback = CustomCallback(target_loss=1e-4)
# Integrate with optimization loop as shown aboveclass MultiObjectiveCallback:
"""Callback for multi-objective optimization monitoring."""
def __init__(self, log_interval=10):
self.log_interval = log_interval
self.pareto_history = []
def on_iteration(self, optimizer, iteration, losses):
"""Monitor Pareto front evolution."""
if iteration % self.log_interval == 0:
pareto_front = optimizer.pareto_front()
self.pareto_history.append(len(pareto_front))
print(f"Iteration {iteration}: Pareto front size = {len(pareto_front)}")
def plot_pareto_evolution(self):
"""Plot evolution of Pareto front size."""
import matplotlib.pyplot as plt
plt.plot(range(0, len(self.pareto_history) * self.log_interval, self.log_interval),
self.pareto_history)
plt.xlabel("Iterations")
plt.ylabel("Pareto Front Size")
plt.title("Evolution of Pareto Front")
plt.show()
# Use with multi-objective optimization
mo_callback = MultiObjectiveCallback(log_interval=20)Install with Tessl CLI
npx tessl i tessl/pypi-nevergrad