CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-nevergrad

A Python toolbox for performing gradient-free optimization with unified interfaces for optimization algorithms and parameter handling.

Pending
Overview
Eval results
Files

benchmark.mddocs/

Functions and Benchmarking

Comprehensive benchmarking framework with 37+ artificial test functions and experimental infrastructure for optimizer evaluation and comparison. Provides standardized test functions, configurable difficulty levels, and systematic experiment management for optimization research.

Capabilities

Artificial Test Functions

Configurable benchmark functions with various difficulty features including rotation, translation, noise, and multi-block structures for comprehensive optimizer testing.

class ArtificialFunction:
    """
    Configurable artificial test function for optimization benchmarking.
    
    Parameters:
    - name: Core function name (str, e.g., "sphere", "rastrigin")
    - block_dimension: Dimension of each block (int)
    - num_blocks: Number of blocks (int, default=1)
    - useless_variables: Additional irrelevant variables (int, default=0)
    - noise_level: Noise level, 0=deterministic (float, default=0)
    - noise_dissymmetry: Asymmetric noise (bool, default=False)
    - rotation: Apply rotation transforms (bool, default=False)
    - translation_factor: Translation strength (float, default=1.0)
    - hashing: Apply hashing transforms (bool, default=False)
    - aggregator: Block combination method (str, "max" or "mean")
    - split: Split into separate objectives (bool, default=False)
    - bounded: Add bounds constraint (bool, default=False)
    - expo: Exponent for transforms (float, default=1.0)
    - zero_pen: Zero penalty mode (bool, default=False)
    """
    
    def __init__(
        self,
        name: str,
        block_dimension: int,
        num_blocks: int = 1,
        useless_variables: int = 0,
        noise_level: float = 0,
        noise_dissymmetry: bool = False,
        rotation: bool = False,
        translation_factor: float = 1.0,
        hashing: bool = False,
        aggregator: str = "max",
        split: bool = False,
        bounded: bool = False,
        expo: float = 1.0,
        zero_pen: bool = False
    ):
        """Initialize artificial function with specified configuration."""
    
    def __call__(self, x) -> float:
        """Evaluate function at point x."""

class ExperimentFunction:
    """
    Base class for experiment functions that combines callable function 
    with its parametrization for systematic benchmarking.
    """
    
    def __init__(self, function: Callable, parametrization: Parameter):
        """Initialize with function and parametrization."""
    
    def __call__(self, *args, **kwargs) -> float:
        """Evaluate the function."""
    
    def copy(self) -> 'ExperimentFunction':
        """Create thread-safe copy for parallel evaluation."""
    
    def evaluation_function(self, *args, **kwargs) -> float:
        """Core evaluation function."""
    
    def compute_pseudotime(self, input_parameter: Parameter, loss: float) -> float:
        """Compute pseudo-time for this evaluation."""

class FarOptimumFunction(ArtificialFunction):
    """Function with optimum located at a distant point."""

Benchmark Experiment Framework

Systematic experiment management for optimizer evaluation with parallel execution, result tracking, and statistical analysis capabilities.

class Experiment:
    """
    Main class for running optimization experiments with systematic 
    evaluation and result tracking.
    
    Parameters:
    - function: Function to optimize (ExperimentFunction)
    - optimizer: Optimizer name or instance (str or ConfiguredOptimizer)
    - budget: Number of function evaluations (int)
    - num_workers: Parallel workers (int, default=1)
    - batch_mode: Batch evaluation mode (bool, default=True)
    - seed: Random seed for reproducibility (int, optional)
    - constraint_violation: Constraint violation handling (optional)
    - penalize_violation_at_test: Penalize violations (bool, default=True)
    - suggestions: Initial suggestions (optional)
    """
    
    def __init__(
        self,
        function: ExperimentFunction,
        optimizer: Union[str, ConfiguredOptimizer],
        budget: int,
        num_workers: int = 1,
        batch_mode: bool = True,
        seed: Optional[int] = None,
        constraint_violation: Optional[ArrayLike] = None,
        penalize_violation_at_test: bool = True,
        suggestions: Optional[ArrayLike] = None
    ):
        """Initialize experiment with function and optimizer."""
    
    def run(self) -> Parameter:
        """
        Execute the optimization experiment.
        
        Returns:
            Best parameter found during optimization
        """
    
    def is_incoherent(self) -> bool:
        """Check if experiment configuration is coherent."""

# Experiment registry for predefined benchmark suites
registry: Registry
"""Registry containing predefined experiment configurations."""

Available Core Functions

Complete library of 37+ classical and modern test functions for comprehensive optimizer evaluation across different difficulty characteristics.

# Classical continuous functions
AVAILABLE_FUNCTIONS = [
    # Unimodal functions
    "sphere", "sphere1", "sphere2", "sphere4",  # Sphere variants
    "ellipsoid", "altellipsoid",                # Ill-conditioned ellipsoids  
    "cigar", "altcigar", "bentcigar",          # Cigar functions
    "discus",                                   # Discus function
    "rosenbrock",                              # Rosenbrock valley
    
    # Multimodal functions
    "ackley",                                  # Ackley function
    "rastrigin", "bucherastrigin",            # Rastrigin variants
    "griewank",                               # Griewank function
    "schwefel_1_2",                           # Schwefel function
    "lunacek",                                # Bi-modal function
    "multipeak",                              # Multi-peak function
    "hm",                                     # Modern multimodal
    
    # Deceptive functions
    "deceptiveillcond", "deceptivepath", "deceptivemultimodal",
    
    # Step functions (zero gradients)
    "stepellipsoid", "stepdoublelinearslope",
    
    # Linear functions
    "slope", "doublelinearslope", "linear",
    
    # Noisy functions
    "st0", "st1", "st10", "st100",            # Styblinski-Tang variants
    
    # Integration functions
    "genzcornerpeak", "minusgenzcornerpeak",
    "genzgaussianpeakintegral", "minusgenzgaussianpeakintegral"
]

Specialized Function Domains

Domain-specific benchmark functions for real-world optimization scenarios including machine learning, reinforcement learning, and control systems.

# Available through nevergrad.functions submodules (not in main exports)
# Access via: from nevergrad.functions import ml, rl, games, control, etc.

# Machine learning hyperparameter tuning
ml_functions = [
    "keras_tuning",      # Neural network hyperparameters
    "sklearn_tuning",    # Scikit-learn model tuning
]

# Reinforcement learning environments  
rl_functions = [
    "gym_environments",  # OpenAI Gym integration
    "control_problems",  # Classic control tasks
]

# Game-based optimization
game_functions = [
    "game_scenarios",    # Strategic game optimization
]

# Control system optimization
control_functions = [
    "pid_tuning",        # PID controller optimization
    "system_control",    # General control system tuning
]

Usage Examples

Creating Basic Test Functions

from nevergrad.functions import ArtificialFunction

# Simple 10D sphere function
func = ArtificialFunction("sphere", block_dimension=10)

# Challenging setup with rotation and noise
func = ArtificialFunction(
    name="rastrigin",
    block_dimension=5,
    noise_level=0.1,
    rotation=True,
    translation_factor=1.5
)

# Multi-block function with useless variables
func = ArtificialFunction(
    name="ackley", 
    block_dimension=10,
    num_blocks=3,
    useless_variables=5,
    aggregator="mean"
)

Running Benchmark Experiments

from nevergrad.benchmark import Experiment
from nevergrad.functions import ArtificialFunction
import nevergrad as ng

# Create test function
func = ArtificialFunction("sphere", block_dimension=20)

# Run single experiment
experiment = Experiment(func, "CMA", budget=1000)
result = experiment.run()
print(f"Best value: {func(result)}")
print(f"Best point: {result.value}")

# Compare multiple optimizers
optimizers = ["CMA", "DE", "PSO", "OnePlusOne"]
results = {}

for optimizer_name in optimizers:
    experiment = Experiment(func, optimizer_name, budget=500, seed=42)
    recommendation = experiment.run()
    results[optimizer_name] = func(recommendation)
    print(f"{optimizer_name}: {results[optimizer_name]:.6f}")

Advanced Experiment Configuration

# Parallel evaluation with multiple workers
experiment = Experiment(
    function=func,
    optimizer="CMA",
    budget=2000,
    num_workers=4,          # Parallel evaluation
    batch_mode=True,        # Batch evaluation
    seed=12345             # Reproducible results
)

# With initial suggestions
import numpy as np
initial_points = [ng.p.Array(init=np.random.randn(20)) for _ in range(5)]

experiment = Experiment(
    function=func,
    optimizer="CMA", 
    budget=1000,
    suggestions=initial_points
)
result = experiment.run()

Using Different Function Difficulties

# Easy: Basic sphere function
easy_func = ArtificialFunction("sphere", block_dimension=10)

# Medium: Multimodal with some noise
medium_func = ArtificialFunction(
    "rastrigin", 
    block_dimension=10,
    noise_level=0.05
)

# Hard: Rotated, translated, multi-block with noise
hard_func = ArtificialFunction(
    "ackley",
    block_dimension=5,
    num_blocks=4,
    rotation=True,
    translation_factor=2.0,
    noise_level=0.1,
    useless_variables=10
)

# Compare optimizer performance across difficulties
difficulties = [
    ("Easy", easy_func),
    ("Medium", medium_func), 
    ("Hard", hard_func)
]

for name, func in difficulties:
    experiment = Experiment(func, "CMA", budget=500)
    result = experiment.run()
    print(f"{name}: {func(result):.6f}")

Accessing Predefined Experiments

from nevergrad.benchmark import registry

# List available experiment suites
print("Available experiments:", list(registry.keys()))

# Run predefined experiments (if available)
# Note: registry contents depend on optional dependencies
for experiment_name in registry.keys():
    experiments = registry[experiment_name](seed=42)
    for experiment in experiments[:3]:  # Run first 3 experiments
        result = experiment.run()
        print(f"{experiment_name}: {result}")

Install with Tessl CLI

npx tessl i tessl/pypi-nevergrad

docs

benchmark.md

callbacks.md

index.md

ops.md

optimizer-families.md

optimizers.md

parametrization.md

types-and-errors.md

tile.json