CMA-ES, Covariance Matrix Adaptation Evolution Strategy for non-linear numerical optimization in Python
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Core optimization functions and classes for general-purpose optimization with CMA-ES. This includes the main interfaces for function minimization and the ask-and-tell optimization loop.
def fmin2(
objective_function,
x0,
sigma0,
options=None,
args=(),
gradf=None,
restarts=0,
restart_from_best=False,
incpopsize=2,
eval_initial_x=False,
parallel_objective=None,
noise_handler=None,
noise_change_sigma_exponent=1,
noise_kappa_exponent=0,
bipop=False,
callback=None,
init_callback=None
):
"""
Functional interface to CMA-ES for non-convex function minimization.
This is the main recommended interface for CMA-ES optimization with
optional restarts and noise handling capabilities.
Parameters:
-----------
objective_function : callable
Function to minimize, called as objective_function(x, *args).
Should return a scalar value. Can return numpy.NaN to reject
solution x (triggers resampling without counting as evaluation).
x0 : array-like or callable
Initial solution estimate (phenotype coordinates).
Can be a callable that returns initial guess for each restart.
Can also be a CMAEvolutionStrategy instance.
sigma0 : float or array-like
Initial standard deviation (step-size). Should be about 1/4th
of the search domain width. Use None if x0 is CMAEvolutionStrategy.
options : dict, optional
CMA-ES options dictionary. See CMAOptions() for available options.
Common options:
- 'ftarget': target function value (default 1e-11)
- 'maxfevals': max function evaluations (default inf)
- 'maxiter': max iterations (default 100+50*(N+3)**2//popsize**0.5)
- 'popsize': population size (default 4+floor(3*log(N)))
- 'bounds': box constraints [[lower_bounds], [upper_bounds]]
- 'tolfun': function value tolerance (default 1e-11)
- 'tolx': solution tolerance (default 1e-11)
args : tuple, optional
Additional arguments passed to objective_function.
gradf : callable, optional
Gradient function where len(gradf(x, *args)) == len(x).
Called once per iteration if provided.
restarts : int or dict, optional
Number of IPOP restarts with increasing population size.
If dict, keys 'maxrestarts' and 'maxfevals' are recognized.
restart_from_best : bool, optional
Whether to restart from best solution found (default False).
incpopsize : float, optional
Population size multiplier for restarts (default 2).
eval_initial_x : bool, optional
Whether to evaluate initial solution x0 (default False).
parallel_objective : callable, optional
Function for parallel evaluation: parallel_objective(list_of_x, *args)
should return list of function values.
noise_handler : NoiseHandler, optional
Handler for noisy objective functions.
noise_change_sigma_exponent : float, optional
Exponent for sigma adaptation with noise (default 1).
noise_kappa_exponent : float, optional
Exponent for noise level adaptation (default 0).
bipop : bool, optional
Use BIPOP restart strategy (default False).
callback : callable, optional
Function called after each iteration: callback(CMAEvolutionStrategy).
init_callback : callable, optional
Function called after initialization: init_callback(CMAEvolutionStrategy).
Returns:
--------
tuple[numpy.ndarray, CMAEvolutionStrategy]
(xbest, es) where:
- xbest: best solution found
- es: CMAEvolutionStrategy instance with complete results
Examples:
---------
>>> import cma
>>>
>>> # Simple optimization
>>> def sphere(x):
... return sum(x**2)
>>>
>>> x, es = cma.fmin2(sphere, [1, 2, 3], 0.5)
>>> print(f"Best solution: {x}")
>>> print(f"Function value: {es.result.fbest}")
>>>
>>> # With options
>>> x, es = cma.fmin2(sphere, [1, 2, 3], 0.5,
... options={'maxfevals': 1000, 'popsize': 20})
>>>
>>> # With restarts
>>> x, es = cma.fmin2(sphere, [1, 2, 3], 0.5, restarts=2)
>>>
>>> # Box constraints
>>> bounds = [[-5, -5, -5], [5, 5, 5]]
>>> x, es = cma.fmin2(sphere, [1, 2, 3], 0.5,
... options={'bounds': bounds})
>>>
>>> # Parallel evaluation
>>> def parallel_sphere(X_list, *args):
... return [sum(x**2) for x in X_list]
>>>
>>> x, es = cma.fmin2(None, [1, 2, 3], 0.5,
... parallel_objective=parallel_sphere)
"""
passdef fmin(objective_function, x0, sigma0, *posargs, **kwargs):
"""
DEPRECATED: Use fmin2 instead.
This function remains fully functional and maintained for backward
compatibility but fmin2 is recommended for new code.
Parameters:
-----------
Same as fmin2.
Returns:
--------
list
Extended result list: [xbest, fbest, evals_best, evaluations,
iterations, xfavorite, stds, stop_dict, es, logger]
Notes:
------
The relationship between fmin and fmin2:
>>> res = fmin(objective, x0, sigma0)
>>> x, es = fmin2(objective, x0, sigma0) # equivalent to:
>>> x, es = res[0], res[-2]
"""
passThe main CMA-ES optimizer class providing fine-grained control through the ask-and-tell interface.
class CMAEvolutionStrategy:
"""
CMA-ES stochastic optimizer class with ask-and-tell interface.
This class provides the most flexible interface to CMA-ES, allowing
users to control the optimization loop iteration by iteration.
"""
def __init__(self, x0, sigma0, inopts=None):
"""
Initialize CMA-ES optimizer.
Parameters:
-----------
x0 : array-like
Initial solution estimate, determines problem dimension N.
Given as "phenotype" coordinates (after transformation if applied).
sigma0 : float or array-like
Initial standard deviation(s). Should be about 1/4th of search
domain width. Problem variables should be scaled so that single
standard deviation is meaningful across all variables.
inopts : dict, optional
Options dictionary. See CMAOptions() for available options.
Key options include:
- 'bounds': [[lower], [upper]] for box constraints
- 'maxiter': maximum iterations
- 'popsize': population size
- 'seed': random seed for reproducibility
- 'verb_disp': display verbosity level
Examples:
---------
>>> import cma
>>>
>>> # Basic initialization
>>> es = cma.CMAEvolutionStrategy([0, 0, 0], 0.5)
>>>
>>> # With options
>>> opts = {'popsize': 20, 'maxiter': 1000, 'seed': 123}
>>> es = cma.CMAEvolutionStrategy([0, 0, 0], 0.5, opts)
>>>
>>> # Different sigma per coordinate
>>> es = cma.CMAEvolutionStrategy([0, 0, 0], [0.5, 1.0, 0.2])
"""
passdef ask(self, number=None, xmean=None, gradf=None, args=()):
"""
Sample new candidate solutions from current distribution.
Parameters:
-----------
number : int, optional
Number of solutions to return. Default is population size.
xmean : array-like, optional
Distribution mean override. If None, uses current mean.
gradf : callable, optional
Gradient function for mean shift. Called as gradf(xmean, *args).
args : tuple, optional
Arguments passed to gradf.
Returns:
--------
list[numpy.ndarray]
List of candidate solutions (phenotype coordinates).
Examples:
---------
>>> es = cma.CMAEvolutionStrategy([0, 0, 0], 0.5)
>>> solutions = es.ask()
>>> len(solutions) == es.popsize
True
>>>
>>> # Ask for specific number of solutions
>>> solutions = es.ask(number=10)
>>> len(solutions)
10
"""
pass
def tell(self, arx, fitnesses, check_points=True, copy=False):
"""
Update distribution parameters based on candidate evaluations.
Parameters:
-----------
arx : list[array-like]
List of candidate solutions (same as returned by ask()).
fitnesses : array-like
Corresponding fitness values. Lower values indicate better solutions.
Can contain numpy.inf for infeasible solutions.
check_points : bool, optional
Whether to check input validity (default True).
copy : bool, optional
Whether to copy input arrays (default False).
Examples:
---------
>>> import cma
>>>
>>> def objective(x):
... return sum(x**2)
>>>
>>> es = cma.CMAEvolutionStrategy([1, 2, 3], 0.5)
>>> solutions = es.ask()
>>> fitnesses = [objective(x) for x in solutions]
>>> es.tell(solutions, fitnesses)
>>>
>>> # Handle infeasible solutions
>>> fitnesses_with_inf = [objective(x) if feasible(x) else np.inf
... for x in solutions]
>>> es.tell(solutions, fitnesses_with_inf)
"""
pass
def stop(self):
"""
Check termination criteria.
Returns:
--------
dict or False
Dictionary of active termination conditions if optimization
should stop, False otherwise. Common termination reasons:
- 'ftarget': target function value reached
- 'maxfevals': maximum function evaluations exceeded
- 'maxiter': maximum iterations exceeded
- 'tolx': solution tolerance reached
- 'tolfun': function value tolerance reached
- 'stagnation': no improvement for many iterations
Examples:
---------
>>> es = cma.CMAEvolutionStrategy([0, 0, 0], 0.5)
>>> es.stop() # Initially False
False
>>>
>>> # After optimization
>>> while not es.stop():
... X = es.ask()
... es.tell(X, [sum(x**2) for x in X])
>>>
>>> termination = es.stop()
>>> print(f"Stopped because: {list(termination.keys())}")
"""
pass
def optimize(self, objective_function, iterations=None, args=(), **kwargs):
"""
Convenience method to run complete optimization loop.
Equivalent to running ask-tell loop until termination criteria met.
Parameters:
-----------
objective_function : callable
Function to minimize, called as objective_function(x, *args).
iterations : int, optional
Maximum number of iterations to run.
args : tuple, optional
Additional arguments for objective_function.
**kwargs : dict
Additional keyword arguments (callback, etc.).
Returns:
--------
CMAEvolutionStrategy
Self, for method chaining.
Examples:
---------
>>> import cma
>>>
>>> def rosenbrock(x):
... return sum(100*(x[1:] - x[:-1]**2)**2 + (1 - x[:-1])**2)
>>>
>>> es = cma.CMAEvolutionStrategy(4*[0.1], 0.5)
>>> es = es.optimize(rosenbrock)
>>> print(f"Best solution: {es.result.xbest}")
>>> print(f"Function value: {es.result.fbest}")
>>>
>>> # Method chaining
>>> result = cma.CMAEvolutionStrategy(4*[0.1], 0.5).optimize(rosenbrock).result
"""
pass@property
def result(self):
"""
Current optimization result.
Returns:
--------
CMAEvolutionStrategyResult
Named tuple with fields:
- xbest: best solution found so far
- fbest: best function value found
- evals_best: evaluations when best was found
- evaluations: total function evaluations
- iterations: total iterations completed
- xfavorite: current mean of distribution (often better with noise)
- stds: current standard deviations per coordinate
- stop: termination conditions dictionary (if stopped)
Examples:
---------
>>> es = cma.CMAEvolutionStrategy([0, 0, 0], 0.5)
>>> es.optimize(lambda x: sum(x**2))
>>>
>>> result = es.result
>>> print(f"Best solution: {result.xbest}")
>>> print(f"Best fitness: {result.fbest}")
>>> print(f"Evaluations used: {result.evaluations}")
>>> print(f"Final mean: {result.xfavorite}")
>>> print(f"Final stds: {result.stds}")
"""
pass
@property
def popsize(self):
"""Population size (number of offspring per iteration)."""
pass
@property
def sigma(self):
"""Current overall step size (scalar)."""
pass
@property
def mean(self):
"""Current distribution mean (in genotype coordinates)."""
pass
def result_pretty(self, number_of_runs=0, time_str=None, fbestever=None):
"""
Pretty print optimization results.
Parameters:
-----------
number_of_runs : int, optional
Number of restarts performed.
time_str : str, optional
Elapsed time string.
fbestever : float, optional
Best function value over all runs.
Returns:
--------
str
Formatted result summary.
"""
pass
def disp(self, iteration=None):
"""
Display current optimization status.
Parameters:
-----------
iteration : int, optional
Current iteration number for display.
"""
passdef pickle_dumps(self):
"""
Return pickle.dumps(self) with special handling for lambda functions.
Returns:
--------
bytes
Pickled representation of the optimizer state.
"""
pass
@staticmethod
def pickle_loads(s):
"""
Inverse of pickle_dumps.
Parameters:
-----------
s : bytes
Pickled optimizer state.
Returns:
--------
CMAEvolutionStrategy
Restored optimizer instance.
"""
pass
def copy(self):
"""
Create a (deep) copy of the optimizer.
Returns:
--------
CMAEvolutionStrategy
Independent copy of the optimizer.
"""
passimport cma
import numpy as np
def sphere(x):
return sum(x**2)
# Initialize optimizer
es = cma.CMAEvolutionStrategy(5 * [0.1], 0.3)
# Optimization loop
while not es.stop():
# Get candidate solutions
solutions = es.ask()
# Evaluate objective function
fitness_values = [sphere(x) for x in solutions]
# Update optimizer
es.tell(solutions, fitness_values)
# Optional: display progress
es.disp()
# Results
print(f"Best solution: {es.result.xbest}")
print(f"Best fitness: {es.result.fbest}")
print(es.result_pretty())import cma
from multiprocessing import Pool
def objective(x):
# Expensive computation
return sum(x**2) + 0.1 * sum(np.sin(30 * x))
def evaluate_batch(solutions):
"""Evaluate solutions in parallel."""
with Pool() as pool:
return pool.map(objective, solutions)
# Optimization with parallel evaluation
es = cma.CMAEvolutionStrategy(10 * [0.1], 0.5)
while not es.stop():
solutions = es.ask()
fitness_values = evaluate_batch(solutions)
es.tell(solutions, fitness_values)
if es.countiter % 10 == 0:
es.disp()
print(f"Optimization completed in {es.result.iterations} iterations")import cma
def custom_callback(es):
"""Custom callback function called each iteration."""
if es.countiter % 50 == 0:
print(f"Iteration {es.countiter}: best = {es.result.fbest:.6f}")
# Custom termination condition
if es.result.fbest < 1e-8:
es.opts['ftarget'] = es.result.fbest # Trigger termination
def objective(x):
return sum((x - 1)**2)
# Optimization with callback
es = cma.CMAEvolutionStrategy([0, 0, 0], 0.5)
while not es.stop():
solutions = es.ask()
fitness_values = [objective(x) for x in solutions]
es.tell(solutions, fitness_values)
custom_callback(es)
print("Optimization terminated")
print(f"Reason: {list(es.stop().keys())}")import cma
import pickle
# Initialize and run optimization
es = cma.CMAEvolutionStrategy([0, 0, 0], 0.5)
# Save state after 100 iterations
for _ in range(100):
if es.stop():
break
solutions = es.ask()
fitness_values = [sum(x**2) for x in solutions]
es.tell(solutions, fitness_values)
# Save optimizer state
state = es.pickle_dumps()
# Or with regular pickle: state = pickle.dumps(es)
# Later: restore and continue
es_restored = cma.CMAEvolutionStrategy.pickle_loads(state)
# Or: es_restored = pickle.loads(state)
# Continue optimization
while not es_restored.stop():
solutions = es_restored.ask()
fitness_values = [sum(x**2) for x in solutions]
es_restored.tell(solutions, fitness_values)
print(f"Total evaluations: {es_restored.result.evaluations}")# CMA is a shortcut alias for CMAEvolutionStrategy
import cma
# These are equivalent:
es1 = cma.CMAEvolutionStrategy([0, 0, 0], 0.5)
es2 = cma.CMA([0, 0, 0], 0.5)
# Useful for shorter typing without IDE completion
CMA = cma.CMAEvolutionStrategy
es = CMA([0, 0, 0], 0.5)Install with Tessl CLI
npx tessl i tessl/pypi-cma