0
# Functions and Benchmarking
1
2
Comprehensive benchmarking framework with 37+ artificial test functions and experimental infrastructure for optimizer evaluation and comparison. Provides standardized test functions, configurable difficulty levels, and systematic experiment management for optimization research.
3
4
## Capabilities
5
6
### Artificial Test Functions
7
8
Configurable benchmark functions with various difficulty features including rotation, translation, noise, and multi-block structures for comprehensive optimizer testing.
9
10
```python { .api }
11
class ArtificialFunction:
12
"""
13
Configurable artificial test function for optimization benchmarking.
14
15
Parameters:
16
- name: Core function name (str, e.g., "sphere", "rastrigin")
17
- block_dimension: Dimension of each block (int)
18
- num_blocks: Number of blocks (int, default=1)
19
- useless_variables: Additional irrelevant variables (int, default=0)
20
- noise_level: Noise level, 0=deterministic (float, default=0)
21
- noise_dissymmetry: Asymmetric noise (bool, default=False)
22
- rotation: Apply rotation transforms (bool, default=False)
23
- translation_factor: Translation strength (float, default=1.0)
24
- hashing: Apply hashing transforms (bool, default=False)
25
- aggregator: Block combination method (str, "max" or "mean")
26
- split: Split into separate objectives (bool, default=False)
27
- bounded: Add bounds constraint (bool, default=False)
28
- expo: Exponent for transforms (float, default=1.0)
29
- zero_pen: Zero penalty mode (bool, default=False)
30
"""
31
32
def __init__(
33
self,
34
name: str,
35
block_dimension: int,
36
num_blocks: int = 1,
37
useless_variables: int = 0,
38
noise_level: float = 0,
39
noise_dissymmetry: bool = False,
40
rotation: bool = False,
41
translation_factor: float = 1.0,
42
hashing: bool = False,
43
aggregator: str = "max",
44
split: bool = False,
45
bounded: bool = False,
46
expo: float = 1.0,
47
zero_pen: bool = False
48
):
49
"""Initialize artificial function with specified configuration."""
50
51
def __call__(self, x) -> float:
52
"""Evaluate function at point x."""
53
54
class ExperimentFunction:
55
"""
56
Base class for experiment functions that combines callable function
57
with its parametrization for systematic benchmarking.
58
"""
59
60
def __init__(self, function: Callable, parametrization: Parameter):
61
"""Initialize with function and parametrization."""
62
63
def __call__(self, *args, **kwargs) -> float:
64
"""Evaluate the function."""
65
66
def copy(self) -> 'ExperimentFunction':
67
"""Create thread-safe copy for parallel evaluation."""
68
69
def evaluation_function(self, *args, **kwargs) -> float:
70
"""Core evaluation function."""
71
72
def compute_pseudotime(self, input_parameter: Parameter, loss: float) -> float:
73
"""Compute pseudo-time for this evaluation."""
74
75
class FarOptimumFunction(ArtificialFunction):
76
"""Function with optimum located at a distant point."""
77
```
78
79
### Benchmark Experiment Framework
80
81
Systematic experiment management for optimizer evaluation with parallel execution, result tracking, and statistical analysis capabilities.
82
83
```python { .api }
84
class Experiment:
85
"""
86
Main class for running optimization experiments with systematic
87
evaluation and result tracking.
88
89
Parameters:
90
- function: Function to optimize (ExperimentFunction)
91
- optimizer: Optimizer name or instance (str or ConfiguredOptimizer)
92
- budget: Number of function evaluations (int)
93
- num_workers: Parallel workers (int, default=1)
94
- batch_mode: Batch evaluation mode (bool, default=True)
95
- seed: Random seed for reproducibility (int, optional)
96
- constraint_violation: Constraint violation handling (optional)
97
- penalize_violation_at_test: Penalize violations (bool, default=True)
98
- suggestions: Initial suggestions (optional)
99
"""
100
101
def __init__(
102
self,
103
function: ExperimentFunction,
104
optimizer: Union[str, ConfiguredOptimizer],
105
budget: int,
106
num_workers: int = 1,
107
batch_mode: bool = True,
108
seed: Optional[int] = None,
109
constraint_violation: Optional[ArrayLike] = None,
110
penalize_violation_at_test: bool = True,
111
suggestions: Optional[ArrayLike] = None
112
):
113
"""Initialize experiment with function and optimizer."""
114
115
def run(self) -> Parameter:
116
"""
117
Execute the optimization experiment.
118
119
Returns:
120
Best parameter found during optimization
121
"""
122
123
def is_incoherent(self) -> bool:
124
"""Check if experiment configuration is coherent."""
125
126
# Experiment registry for predefined benchmark suites
127
registry: Registry
128
"""Registry containing predefined experiment configurations."""
129
```
130
131
### Available Core Functions
132
133
Complete library of 37+ classical and modern test functions for comprehensive optimizer evaluation across different difficulty characteristics.
134
135
```python { .api }
136
# Classical continuous functions
137
AVAILABLE_FUNCTIONS = [
138
# Unimodal functions
139
"sphere", "sphere1", "sphere2", "sphere4", # Sphere variants
140
"ellipsoid", "altellipsoid", # Ill-conditioned ellipsoids
141
"cigar", "altcigar", "bentcigar", # Cigar functions
142
"discus", # Discus function
143
"rosenbrock", # Rosenbrock valley
144
145
# Multimodal functions
146
"ackley", # Ackley function
147
"rastrigin", "bucherastrigin", # Rastrigin variants
148
"griewank", # Griewank function
149
"schwefel_1_2", # Schwefel function
150
"lunacek", # Bi-modal function
151
"multipeak", # Multi-peak function
152
"hm", # Modern multimodal
153
154
# Deceptive functions
155
"deceptiveillcond", "deceptivepath", "deceptivemultimodal",
156
157
# Step functions (zero gradients)
158
"stepellipsoid", "stepdoublelinearslope",
159
160
# Linear functions
161
"slope", "doublelinearslope", "linear",
162
163
# Noisy functions
164
"st0", "st1", "st10", "st100", # Styblinski-Tang variants
165
166
# Integration functions
167
"genzcornerpeak", "minusgenzcornerpeak",
168
"genzgaussianpeakintegral", "minusgenzgaussianpeakintegral"
169
]
170
```
171
172
### Specialized Function Domains
173
174
Domain-specific benchmark functions for real-world optimization scenarios including machine learning, reinforcement learning, and control systems.
175
176
```python { .api }
177
# Available through nevergrad.functions submodules (not in main exports)
178
# Access via: from nevergrad.functions import ml, rl, games, control, etc.
179
180
# Machine learning hyperparameter tuning
181
ml_functions = [
182
"keras_tuning", # Neural network hyperparameters
183
"sklearn_tuning", # Scikit-learn model tuning
184
]
185
186
# Reinforcement learning environments
187
rl_functions = [
188
"gym_environments", # OpenAI Gym integration
189
"control_problems", # Classic control tasks
190
]
191
192
# Game-based optimization
193
game_functions = [
194
"game_scenarios", # Strategic game optimization
195
]
196
197
# Control system optimization
198
control_functions = [
199
"pid_tuning", # PID controller optimization
200
"system_control", # General control system tuning
201
]
202
```
203
204
## Usage Examples
205
206
### Creating Basic Test Functions
207
208
```python
209
from nevergrad.functions import ArtificialFunction
210
211
# Simple 10D sphere function
212
func = ArtificialFunction("sphere", block_dimension=10)
213
214
# Challenging setup with rotation and noise
215
func = ArtificialFunction(
216
name="rastrigin",
217
block_dimension=5,
218
noise_level=0.1,
219
rotation=True,
220
translation_factor=1.5
221
)
222
223
# Multi-block function with useless variables
224
func = ArtificialFunction(
225
name="ackley",
226
block_dimension=10,
227
num_blocks=3,
228
useless_variables=5,
229
aggregator="mean"
230
)
231
```
232
233
### Running Benchmark Experiments
234
235
```python
236
from nevergrad.benchmark import Experiment
237
from nevergrad.functions import ArtificialFunction
238
import nevergrad as ng
239
240
# Create test function
241
func = ArtificialFunction("sphere", block_dimension=20)
242
243
# Run single experiment
244
experiment = Experiment(func, "CMA", budget=1000)
245
result = experiment.run()
246
print(f"Best value: {func(result)}")
247
print(f"Best point: {result.value}")
248
249
# Compare multiple optimizers
250
optimizers = ["CMA", "DE", "PSO", "OnePlusOne"]
251
results = {}
252
253
for optimizer_name in optimizers:
254
experiment = Experiment(func, optimizer_name, budget=500, seed=42)
255
recommendation = experiment.run()
256
results[optimizer_name] = func(recommendation)
257
print(f"{optimizer_name}: {results[optimizer_name]:.6f}")
258
```
259
260
### Advanced Experiment Configuration
261
262
```python
263
# Parallel evaluation with multiple workers
264
experiment = Experiment(
265
function=func,
266
optimizer="CMA",
267
budget=2000,
268
num_workers=4, # Parallel evaluation
269
batch_mode=True, # Batch evaluation
270
seed=12345 # Reproducible results
271
)
272
273
# With initial suggestions
274
import numpy as np
275
initial_points = [ng.p.Array(init=np.random.randn(20)) for _ in range(5)]
276
277
experiment = Experiment(
278
function=func,
279
optimizer="CMA",
280
budget=1000,
281
suggestions=initial_points
282
)
283
result = experiment.run()
284
```
285
286
### Using Different Function Difficulties
287
288
```python
289
# Easy: Basic sphere function
290
easy_func = ArtificialFunction("sphere", block_dimension=10)
291
292
# Medium: Multimodal with some noise
293
medium_func = ArtificialFunction(
294
"rastrigin",
295
block_dimension=10,
296
noise_level=0.05
297
)
298
299
# Hard: Rotated, translated, multi-block with noise
300
hard_func = ArtificialFunction(
301
"ackley",
302
block_dimension=5,
303
num_blocks=4,
304
rotation=True,
305
translation_factor=2.0,
306
noise_level=0.1,
307
useless_variables=10
308
)
309
310
# Compare optimizer performance across difficulties
311
difficulties = [
312
("Easy", easy_func),
313
("Medium", medium_func),
314
("Hard", hard_func)
315
]
316
317
for name, func in difficulties:
318
experiment = Experiment(func, "CMA", budget=500)
319
result = experiment.run()
320
print(f"{name}: {func(result):.6f}")
321
```
322
323
### Accessing Predefined Experiments
324
325
```python
326
from nevergrad.benchmark import registry
327
328
# List available experiment suites
329
print("Available experiments:", list(registry.keys()))
330
331
# Run predefined experiments (if available)
332
# Note: registry contents depend on optional dependencies
333
for experiment_name in registry.keys():
334
experiments = registry[experiment_name](seed=42)
335
for experiment in experiments[:3]: # Run first 3 experiments
336
result = experiment.run()
337
print(f"{experiment_name}: {result}")
338
```