or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

advanced-optimization.mdconfiguration.mdconstraints-boundaries.mdcore-optimization.mdfitness-functions.mdindex.mdlogging-analysis.mdsamplers-adaptation.md

advanced-optimization.mddocs/

0

# Advanced Optimization

1

2

Advanced optimization techniques including surrogate models, pure Python implementation, and noise handling for specialized use cases.

3

4

## Surrogate Model Optimization

5

6

CMA-ES with linear-quadratic surrogate models (lq-CMA-ES) for expensive objective functions where function evaluations are costly.

7

8

### fmin_lq_surr2 - Surrogate Model Interface (Recommended)

9

10

```python { .api }

11

def fmin_lq_surr2(

12

objective_function,

13

x0,

14

sigma0,

15

options=None,

16

inject=True,

17

restarts=0,

18

incpopsize=2,

19

keep_model=False,

20

not_evaluated=np.isnan,

21

callback=None

22

):

23

"""

24

Minimize objective function using lq-CMA-ES with surrogate models.

25

26

Linear-quadratic CMA-ES builds global surrogate models (linear or quadratic)

27

to reduce the number of expensive function evaluations by predicting

28

function values for candidate solutions.

29

30

Parameters:

31

-----------

32

objective_function : callable

33

Expensive function to minimize. Called as objective_function(x).

34

35

x0 : array-like or callable

36

Initial solution estimate. Can be callable returning different

37

starting points for each restart.

38

39

sigma0 : float or array-like

40

Initial standard deviation (step size).

41

42

options : dict, optional

43

CMA-ES options. See CMAOptions() for available options.

44

Important for surrogate models:

45

- 'maxfevals': Maximum true function evaluations (crucial for expensive functions)

46

- 'ftarget': Target function value for termination

47

48

inject : bool, optional

49

Whether to inject best surrogate solution in each iteration (default True).

50

51

restarts : int, optional

52

Number of IPOP restarts to perform (default 0).

53

54

incpopsize : float, optional

55

Population size multiplier for restarts (default 2).

56

57

keep_model : bool, optional

58

Whether to keep surrogate model between restarts (default False).

59

If False, new model is built for each restart.

60

61

not_evaluated : callable, optional

62

Function to test if value indicates missing evaluation (default np.isnan).

63

64

callback : callable, optional

65

Callback function called each iteration.

66

67

Returns:

68

--------

69

tuple[numpy.ndarray, CMAEvolutionStrategy]

70

(xbest, es) where xbest is best solution and es contains optimization details.

71

Note: es.result.xfavorite is often the best estimate for the optimum.

72

73

Examples:

74

---------

75

>>> import cma

76

>>> import numpy as np

77

>>>

78

>>> # Expensive function simulation

79

>>> def expensive_function(x):

80

... # Simulate expensive computation

81

... return sum((x - 1)**2) + 0.1 * np.random.randn()

82

>>>

83

>>> # Optimize with limited evaluations

84

>>> x, es = cma.fmin_lq_surr2(

85

... expensive_function,

86

... [0, 0, 0],

87

... 0.5,

88

... options={'maxfevals': 100, 'verb_disp': 0}

89

... )

90

>>> print(f"Best solution: {x}")

91

>>> print(f"Evaluations used: {es.countevals}")

92

>>>

93

>>> # With restarts for robustness

94

>>> x, es = cma.fmin_lq_surr2(

95

... expensive_function,

96

... lambda: np.random.randn(3), # Random start each restart

97

... 0.5,

98

... restarts=2,

99

... options={'maxfevals': 200}

100

... )

101

>>>

102

>>> # For very expensive functions

103

>>> def very_expensive(x):

104

... # Simulate 10 second computation

105

... import time

106

... time.sleep(0.1) # Reduced for example

107

... return sum(x**2)

108

>>>

109

>>> x, es = cma.fmin_lq_surr2(

110

... very_expensive,

111

... [1, 1],

112

... 0.3,

113

... options={'maxfevals': 50, 'ftarget': 1e-6}

114

... )

115

"""

116

pass

117

```

118

119

### fmin_lq_surr - Legacy Surrogate Interface

120

121

```python { .api }

122

def fmin_lq_surr(objective_function, x0, sigma0, options=None, **kwargs):

123

"""

124

Minimize objective function using lq-CMA-ES with surrogate models.

125

126

Legacy interface to surrogate model optimization. Use fmin_lq_surr2

127

for new code as it provides better control and cleaner results.

128

129

Parameters:

130

-----------

131

Same as fmin_lq_surr2 but with different defaults and behavior.

132

133

Returns:

134

--------

135

tuple[numpy.ndarray, CMAEvolutionStrategy]

136

(xbest, es) where results may be based on surrogate values.

137

138

Notes:

139

------

140

- xbest takes into account only recent history, not all evaluations

141

- es.result may be confusing as it's partly based on surrogate values

142

- es.best contains solution with best surrogate value (not true value)

143

- Model is kept same for each restart (use fmin_lq_surr2 to change this)

144

"""

145

pass

146

```

147

148

### Surrogate Model Usage Patterns

149

150

```python { .api }

151

import cma

152

import numpy as np

153

import functools

154

155

# Pattern 1: Simple expensive function optimization

156

def expensive_objective(x):

157

"""Simulate expensive function with 1-second delay."""

158

import time

159

time.sleep(0.01) # Simulate computation time

160

return sum((x - np.array([1, 2, 3]))**2) + 0.01 * np.random.randn()

161

162

# Optimize with surrogate model

163

x_best, es = cma.fmin_lq_surr2(

164

expensive_objective,

165

[0, 0, 0],

166

0.5,

167

options={

168

'maxfevals': 80, # Limit true evaluations

169

'ftarget': 1e-4, # Stop when good enough

170

'verb_disp': 10 # Show progress every 10 iterations

171

}

172

)

173

174

print(f"Found solution: {x_best}")

175

print(f"True evaluations: {es.countevals}")

176

print(f"Total iterations: {es.countiter}")

177

178

# Pattern 2: Using with functools.partial for additional arguments

179

def parametric_function(x, a, b):

180

"""Function with additional parameters."""

181

return sum((x - a)**2) + b * sum(np.sin(x))

182

183

# Create partial function with fixed parameters

184

objective = functools.partial(parametric_function, a=np.array([1, 2]), b=0.1)

185

186

x_best, es = cma.fmin_lq_surr2(

187

objective,

188

[0, 0],

189

0.3,

190

options={'maxfevals': 60}

191

)

192

193

# Pattern 3: Multiple restarts with different starting points

194

def random_start():

195

"""Generate random starting point for each restart."""

196

return np.random.uniform(-2, 2, 5)

197

198

x_best, es = cma.fmin_lq_surr2(

199

lambda x: sum(x**4 - 16*x**2 + 5*x), # Multi-modal function

200

random_start, # Callable for different starts

201

0.5,

202

restarts=3,

203

incpopsize=1.5, # Smaller population increase

204

options={'maxfevals': 200}

205

)

206

207

# Pattern 4: Custom evaluation detection

208

def custom_not_evaluated(value):

209

"""Custom function to detect missing evaluations."""

210

return np.isnan(value) or np.isinf(value) or value < -1e10

211

212

x_best, es = cma.fmin_lq_surr2(

213

expensive_objective,

214

[0, 0, 0],

215

0.5,

216

not_evaluated=custom_not_evaluated,

217

options={'maxfevals': 100}

218

)

219

```

220

221

## Pure Python Implementation

222

223

Pure Python CMA-ES implementation that doesn't require numpy, suitable for environments where numpy is unavailable.

224

225

### purecma.fmin - Pure Python Function Interface

226

227

```python { .api }

228

def purecma.fmin(

229

objective_fct,

230

xstart,

231

sigma,

232

args=(),

233

maxfevals='1e3 * N**2',

234

ftarget=None,

235

verb_disp=100,

236

verb_log=1,

237

verb_save=1000

238

):

239

"""

240

Pure Python CMA-ES minimization without numpy dependency.

241

242

Minimalistic implementation for reading/understanding CMA-ES algorithm

243

or when numpy is not available. For serious applications with numpy

244

available, use cma.fmin2 or cma.CMAEvolutionStrategy instead.

245

246

Parameters:

247

-----------

248

objective_fct : callable

249

Function to minimize. Takes list of floats, returns single float.

250

Should be minimize (lower values are better).

251

252

xstart : list or sequence

253

Initial solution vector. Length defines search space dimension.

254

255

sigma : float

256

Initial step-size (standard deviation in any coordinate).

257

258

args : tuple, optional

259

Additional arguments passed to objective_fct as objective_fct(x, *args).

260

261

maxfevals : int or str, optional

262

Maximum function evaluations. String is evaluated with N as dimension

263

(default '1e3 * N**2').

264

265

ftarget : float, optional

266

Target function value for termination.

267

268

verb_disp : int, optional

269

Display progress every verb_disp iterations (default 100, 0 for never).

270

271

verb_log : int, optional

272

Log data every verb_log iterations (default 1, 0 for never).

273

274

verb_save : int, optional

275

Save plot every verb_save iterations (default 1000, 0 for never).

276

277

Returns:

278

--------

279

tuple[list, dict]

280

(xbest, dict_info) where:

281

- xbest: best solution found (list of floats)

282

- dict_info: dictionary with optimization information including

283

'fbest', 'evals', 'iterations', 'stop'

284

285

Examples:

286

---------

287

>>> import cma.purecma as purecma

288

>>>

289

>>> # Simple optimization without numpy

290

>>> def sphere(x):

291

... return sum(xi**2 for xi in x)

292

>>>

293

>>> xbest, info = purecma.fmin(sphere, [1, 2, 3], 0.5)

294

>>> print(f"Best solution: {xbest}")

295

>>> print(f"Best fitness: {info['fbest']}")

296

>>> print(f"Evaluations: {info['evals']}")

297

>>>

298

>>> # With additional arguments

299

>>> def shifted_sphere(x, shift):

300

... return sum((xi - shi)**2 for xi, shi in zip(x, shift))

301

>>>

302

>>> xbest, info = purecma.fmin(

303

... shifted_sphere,

304

... [0, 0, 0],

305

... 0.5,

306

... args=([1, 2, 3],) # Shift vector

307

... )

308

>>>

309

>>> # With termination criteria

310

>>> xbest, info = purecma.fmin(

311

... sphere,

312

... [1, 2, 3, 4, 5],

313

... 0.3,

314

... maxfevals=1000,

315

... ftarget=1e-8,

316

... verb_disp=50

317

... )

318

"""

319

pass

320

```

321

322

### purecma.CMAES - Pure Python Class Interface

323

324

```python { .api }

325

class purecma.CMAES:

326

"""

327

Pure Python CMA-ES class providing ask-and-tell interface.

328

329

Minimalistic implementation without numpy dependency. Suitable for

330

understanding the CMA-ES algorithm or when numpy is unavailable.

331

"""

332

333

def __init__(self, xmean, sigma, opts=None):

334

"""

335

Initialize pure Python CMA-ES optimizer.

336

337

Parameters:

338

-----------

339

xmean : list

340

Initial mean (starting point), determines dimension.

341

342

sigma : float

343

Initial step size (standard deviation).

344

345

opts : dict, optional

346

Options dictionary. Available options:

347

- 'maxiter': maximum iterations

348

- 'maxfevals': maximum function evaluations

349

- 'ftarget': target function value

350

- 'popsize': population size (default 4 + floor(3*log(N)))

351

- 'seed': random seed

352

- 'verb_disp': display verbosity

353

354

Examples:

355

---------

356

>>> import cma.purecma as purecma

357

>>>

358

>>> # Basic initialization

359

>>> es = purecma.CMAES([0, 0, 0], 0.5)

360

>>>

361

>>> # With options

362

>>> opts = {'maxfevals': 1000, 'popsize': 10, 'seed': 42}

363

>>> es = purecma.CMAES([1, 2, 3], 0.3, opts)

364

"""

365

pass

366

367

def ask(self):

368

"""

369

Sample candidate solutions from current distribution.

370

371

Returns:

372

--------

373

list[list]

374

List of candidate solutions (each solution is a list of floats).

375

376

Examples:

377

---------

378

>>> es = purecma.CMAES([0, 0], 0.5)

379

>>> solutions = es.ask()

380

>>> len(solutions) == es.popsize

381

True

382

>>> isinstance(solutions[0], list)

383

True

384

"""

385

pass

386

387

def tell(self, arx, arf):

388

"""

389

Update distribution based on function evaluations.

390

391

This is where the main CMA-ES algorithm implementation occurs.

392

393

Parameters:

394

-----------

395

arx : list[list]

396

List of candidate solutions (same as returned by ask()).

397

398

arf : list[float]

399

Corresponding fitness values (lower is better).

400

401

Examples:

402

---------

403

>>> import cma.purecma as purecma

404

>>>

405

>>> def objective(x):

406

... return sum(xi**2 for xi in x)

407

>>>

408

>>> es = purecma.CMAES([1, 2], 0.5)

409

>>> while not es.stop():

410

... solutions = es.ask()

411

... fitness = [objective(x) for x in solutions]

412

... es.tell(solutions, fitness)

413

>>>

414

>>> print(f"Best solution: {es.result()[0]}")

415

"""

416

pass

417

418

def stop(self):

419

"""

420

Check termination criteria.

421

422

Returns:

423

--------

424

dict or False

425

Termination conditions if stopped, False otherwise.

426

"""

427

pass

428

429

def result(self):

430

"""

431

Return current best result.

432

433

Returns:

434

--------

435

tuple[list, float, int, int]

436

(xbest, fbest, evals, iterations)

437

"""

438

pass

439

440

def disp(self, verb_modulo=1):

441

"""

442

Display current optimization status.

443

444

Parameters:

445

-----------

446

verb_modulo : int, optional

447

Display every verb_modulo iterations.

448

"""

449

pass

450

```

451

452

### Pure Python Usage Patterns

453

454

```python { .api }

455

import cma.purecma as purecma

456

457

# Pattern 1: Simple function minimization

458

def rosenbrock(x):

459

"""Pure Python Rosenbrock function."""

460

return sum(100 * (x[i+1] - x[i]**2)**2 + (1 - x[i])**2

461

for i in range(len(x) - 1))

462

463

# Using functional interface

464

xbest, info = purecma.fmin(rosenbrock, [0, 0], 0.5, verb_disp=50)

465

print(f"Functional result: {xbest}, fitness: {info['fbest']}")

466

467

# Pattern 2: Ask-and-tell interface with custom loop

468

es = purecma.CMAES([0, 0], 0.5, {'maxfevals': 2000, 'ftarget': 1e-8})

469

470

iteration = 0

471

while not es.stop():

472

solutions = es.ask()

473

fitness_values = [rosenbrock(x) for x in solutions]

474

es.tell(solutions, fitness_values)

475

476

iteration += 1

477

if iteration % 50 == 0:

478

es.disp()

479

480

result = es.result()

481

print(f"Ask-tell result: {result[0]}, fitness: {result[1]}")

482

483

# Pattern 3: Multi-dimensional problem without numpy

484

def high_dim_sphere(x):

485

"""High-dimensional sphere function."""

486

return sum(xi**2 for xi in x)

487

488

# 20-dimensional problem

489

dimension = 20

490

initial_point = [0.5] * dimension

491

xbest, info = purecma.fmin(

492

high_dim_sphere,

493

initial_point,

494

0.3,

495

maxfevals=dimension * 1000,

496

verb_disp=100

497

)

498

499

print(f"20D result: fitness = {info['fbest']}, evals = {info['evals']}")

500

501

# Pattern 4: Custom termination and monitoring

502

class OptimizationLogger:

503

def __init__(self):

504

self.history = []

505

506

def log(self, iteration, fbest, sigma):

507

self.history.append((iteration, fbest, sigma))

508

509

def custom_optimization():

510

"""Custom optimization with logging and early termination."""

511

512

def objective(x):

513

# Noisy sphere function

514

import random

515

return sum(xi**2 for xi in x) + 0.01 * random.gauss(0, 1)

516

517

logger = OptimizationLogger()

518

es = purecma.CMAES([1, 1, 1], 0.5, {'maxfevals': 5000})

519

520

best_ever = float('inf')

521

no_improvement_count = 0

522

523

while not es.stop():

524

solutions = es.ask()

525

fitness_values = [objective(x) for x in solutions]

526

es.tell(solutions, fitness_values)

527

528

current_best = min(fitness_values)

529

logger.log(es.countiter, current_best, es.sigma)

530

531

# Custom termination: no improvement for 100 iterations

532

if current_best < best_ever:

533

best_ever = current_best

534

no_improvement_count = 0

535

else:

536

no_improvement_count += 1

537

538

if no_improvement_count > 100:

539

break

540

541

if es.countiter % 50 == 0:

542

print(f"Iter {es.countiter}: best = {current_best:.6f}, "

543

f"sigma = {es.sigma:.6f}")

544

545

return es.result(), logger.history

546

547

result, history = custom_optimization()

548

print(f"Custom optimization completed: {result[1]:.6f}")

549

```

550

551

## Noise Handling

552

553

Tools for handling noisy objective functions where function values are subject to uncertainty.

554

555

### NoiseHandler Class

556

557

```python { .api }

558

class NoiseHandler:

559

"""

560

Noise handling for uncertain/noisy objective functions.

561

562

Implements adaptive noise handling according to Hansen et al. 2009

563

"A Method for Handling Uncertainty in Evolutionary Optimization".

564

565

Controls noise via step-size increase and number of re-evaluations.

566

"""

567

568

def __init__(self, N, maxevals=[1, 1, 30], aggregate=np.median):

569

"""

570

Initialize noise handler.

571

572

Parameters:

573

-----------

574

N : int

575

Search space dimension.

576

577

maxevals : int or list, optional

578

Maximum evaluations per solution. If list:

579

- Single value: max evaluations

580

- Two values: [min_evals, max_evals]

581

- Three values: [min_evals, start_evals, max_evals]

582

Default [1, 1, 30].

583

584

aggregate : callable, optional

585

Function to aggregate multiple evaluations (default np.median).

586

Common choices: np.mean, np.median, min, max.

587

588

Examples:

589

---------

590

>>> import cma

591

>>> import numpy as np

592

>>>

593

>>> # Create noise handler for 5D problem

594

>>> noise_handler = cma.NoiseHandler(5, maxevals=[1, 2, 10])

595

>>>

596

>>> # Noisy objective function

597

>>> def noisy_sphere(x, noise_level=0.1):

598

... true_value = sum(x**2)

599

... noise = noise_level * np.random.randn()

600

... return true_value + noise

601

>>>

602

>>> # Use with fmin2

603

>>> x, es = cma.fmin2(

604

... noisy_sphere,

605

... 5 * [0.1],

606

... 0.5,

607

... noise_handler=noise_handler,

608

... options={'maxfevals': 2000}

609

... )

610

"""

611

pass

612

613

def __call__(self, func, x, *args, **kwargs):

614

"""

615

Evaluate function with noise handling.

616

617

Parameters:

618

-----------

619

func : callable

620

Noisy function to evaluate.

621

622

x : array-like

623

Solution to evaluate.

624

625

*args, **kwargs :

626

Additional arguments for func.

627

628

Returns:

629

--------

630

float

631

Aggregated function value after multiple evaluations.

632

"""

633

pass

634

635

@property

636

def evaluations(self):

637

"""Current number of evaluations per solution."""

638

pass

639

640

def reeval_if_needed(self, es):

641

"""

642

Re-evaluate solutions if noise level suggests it's beneficial.

643

644

Parameters:

645

-----------

646

es : CMAEvolutionStrategy

647

Evolution strategy instance to potentially re-evaluate.

648

"""

649

pass

650

```

651

652

### Noise Handling Usage Patterns

653

654

```python { .api }

655

import cma

656

import numpy as np

657

658

# Pattern 1: Simple noisy optimization

659

def noisy_rosenbrock(x, noise_std=0.1):

660

"""Rosenbrock function with additive Gaussian noise."""

661

true_value = sum(100*(x[1:] - x[:-1]**2)**2 + (1 - x[:-1])**2)

662

noise = noise_std * np.random.randn()

663

return true_value + noise

664

665

# Use noise handler with fmin2

666

noise_handler = cma.NoiseHandler(4, maxevals=[1, 2, 20])

667

668

x_best, es = cma.fmin2(

669

noisy_rosenbrock,

670

4 * [0.1],

671

0.5,

672

noise_handler=noise_handler,

673

options={'maxfevals': 3000, 'verb_disp': 100}

674

)

675

676

print(f"Noisy optimization result: {x_best}")

677

print(f"Final evaluations per point: {noise_handler.evaluations}")

678

679

# Pattern 2: Custom noise aggregation

680

def robust_aggregation(values):

681

"""Custom aggregation using trimmed mean."""

682

values = np.array(values)

683

if len(values) <= 2:

684

return np.mean(values)

685

# Remove extreme 20% of values

686

trim = max(1, int(0.1 * len(values)))

687

sorted_vals = np.sort(values)

688

return np.mean(sorted_vals[trim:-trim])

689

690

noise_handler_robust = cma.NoiseHandler(

691

3,

692

maxevals=[2, 5, 15],

693

aggregate=robust_aggregation

694

)

695

696

# Pattern 3: Manual ask-and-tell with noise handler

697

def manual_noisy_optimization():

698

"""Manual optimization loop with noise handling."""

699

700

def very_noisy_function(x):

701

# High noise level

702

return sum(x**2) + 0.5 * np.random.randn()

703

704

es = cma.CMAEvolutionStrategy([1, 1, 1], 0.5, {'maxfevals': 2000})

705

noise_handler = cma.NoiseHandler(3, maxevals=[1, 3, 10])

706

707

while not es.stop():

708

solutions = es.ask()

709

710

# Evaluate with noise handling

711

fitness_values = []

712

for x in solutions:

713

# Use noise handler directly

714

f_val = noise_handler(very_noisy_function, x)

715

fitness_values.append(f_val)

716

717

es.tell(solutions, fitness_values)

718

719

# Let noise handler decide about re-evaluations

720

noise_handler.reeval_if_needed(es)

721

722

if es.countiter % 50 == 0:

723

print(f"Iter {es.countiter}: "

724

f"evals/point = {noise_handler.evaluations}, "

725

f"best = {es.result.fbest:.6f}")

726

727

return es.result

728

729

result = manual_noisy_optimization()

730

print(f"Manual noisy result: {result.fbest}")

731

732

# Pattern 4: Adaptive noise handling parameters

733

def adaptive_noise_optimization():

734

"""Optimization with noise level that changes over time."""

735

736

iteration_counter = [0] # Use list for mutable closure

737

738

def time_varying_noisy_function(x):

739

iteration_counter[0] += 1

740

true_val = sum((x - np.array([1, 2, 3]))**2)

741

742

# Noise decreases over time (learning/adaptation scenario)

743

noise_level = 1.0 / (1 + iteration_counter[0] / 1000)

744

noise = noise_level * np.random.randn()

745

return true_val + noise

746

747

# Start with conservative noise handling

748

noise_handler = cma.NoiseHandler(3, maxevals=[2, 4, 20])

749

750

x_best, es = cma.fmin2(

751

time_varying_noisy_function,

752

[0, 0, 0],

753

0.5,

754

noise_handler=noise_handler,

755

options={'maxfevals': 2500}

756

)

757

758

return x_best, es

759

760

x_best, es = adaptive_noise_optimization()

761

print(f"Adaptive noise result: {x_best}")

762

```

763

764

## Integration Examples

765

766

### Combining Surrogate Models with Noise Handling

767

768

```python { .api }

769

import cma

770

import numpy as np

771

772

def expensive_noisy_function(x):

773

"""Simulate expensive function with noise and computational cost."""

774

import time

775

time.sleep(0.01) # Simulate computation time

776

777

true_value = sum((x - np.array([1, 2, 3]))**2)

778

noise = 0.1 * np.random.randn()

779

return true_value + noise

780

781

# Cannot directly combine lq-surr with NoiseHandler,

782

# but can use noise-aware evaluation

783

def noise_aware_expensive_function(x, num_evals=3):

784

"""Evaluate expensive function multiple times for noise reduction."""

785

evaluations = [expensive_noisy_function(x) for _ in range(num_evals)]

786

return np.median(evaluations) # Robust aggregation

787

788

# Use surrogate model with noise-reduced evaluations

789

x_best, es = cma.fmin_lq_surr2(

790

lambda x: noise_aware_expensive_function(x, 2), # 2 evaluations per point

791

[0, 0, 0],

792

0.5,

793

options={'maxfevals': 60} # Very limited budget

794

)

795

796

print(f"Expensive noisy optimization: {x_best}")

797

print(f"Total expensive evaluations: ~{es.countevals * 2}")

798

```

799

800

### Pure Python with Custom Features

801

802

```python { .api }

803

import cma.purecma as purecma

804

805

def pure_python_with_constraints():

806

"""Pure Python optimization with simple box constraints."""

807

808

def constrained_objective(x):

809

# Penalty for constraint violations (simple box constraints)

810

penalty = 0

811

for i, xi in enumerate(x):

812

if xi < -2:

813

penalty += (xi + 2)**2 * 1000

814

elif xi > 2:

815

penalty += (xi - 2)**2 * 1000

816

817

# Original objective

818

obj = sum((xi - (i+1)*0.5)**2 for i, xi in enumerate(x))

819

return obj + penalty

820

821

# Optimize with pure Python CMA-ES

822

es = purecma.CMAES([0, 0, 0, 0], 0.3, {'maxfevals': 2000})

823

824

best_feasible = None

825

best_feasible_f = float('inf')

826

827

while not es.stop():

828

solutions = es.ask()

829

fitness_values = [constrained_objective(x) for x in solutions]

830

es.tell(solutions, fitness_values)

831

832

# Track best feasible solution

833

for x, f in zip(solutions, fitness_values):

834

if all(-2 <= xi <= 2 for xi in x): # Feasible

835

if f < best_feasible_f:

836

best_feasible = x[:]

837

best_feasible_f = f

838

839

if es.countiter % 100 == 0:

840

es.disp()

841

842

result = es.result()

843

print(f"Pure Python with constraints:")

844

print(f"Best CMA-ES solution: {result[0]}")

845

print(f"Best feasible solution: {best_feasible}")

846

847

return best_feasible, best_feasible_f

848

849

pure_python_with_constraints()

850

```