0
# Batch Running
1
2
Mesa's batch running system enables systematic exploration of model behavior through parameter sweeps and multiple simulation runs. The `batch_run` function provides powerful capabilities for running models across parameter spaces, collecting data, and analyzing results with parallel processing support.
3
4
## Imports
5
6
```python { .api }
7
from mesa import batch_run, Model
8
from typing import Any, Dict, Iterable, List, Mapping, Union
9
import pandas as pd
10
import numpy as np
11
```
12
13
## batch_run Function
14
15
The core batch running functionality is provided by the `batch_run` function, which orchestrates parameter sweeps and data collection across multiple model runs.
16
17
```python { .api }
18
def batch_run(model_cls: type[Model],
19
parameters: Mapping[str, Any | Iterable[Any]],
20
number_processes: int | None = 1,
21
iterations: int = 1,
22
data_collection_period: int = -1,
23
max_steps: int = 1000,
24
display_progress: bool = True) -> list[dict[str, Any]]:
25
"""
26
Batch run a Mesa model with parameter sweeps and data collection.
27
28
Systematically executes model runs across parameter combinations,
29
collecting data and supporting parallel execution for efficient
30
exploration of model behavior.
31
32
Parameters:
33
model_cls: The Mesa model class to run
34
parameters: Dictionary mapping parameter names to values or ranges.
35
Single values are held constant, iterables define parameter sweeps.
36
number_processes: Number of parallel processes to use (None for all available cores)
37
iterations: Number of independent runs per parameter combination
38
data_collection_period: How often to collect data during runs:
39
-1 = only at end of run
40
1 = every step
41
n = every n steps
42
max_steps: Maximum number of simulation steps per run
43
display_progress: Whether to show progress bar during execution
44
45
Returns:
46
List of dictionaries containing collected data from all runs.
47
Each dictionary represents one data collection point with:
48
- Parameter values for that run
49
- Collected model and agent data
50
- Run metadata (iteration, step, etc.)
51
"""
52
...
53
```
54
55
## Parameter Specification
56
57
Parameters can be specified as single values (constants) or iterables (ranges) to define parameter sweeps.
58
59
```python { .api }
60
# Parameter specification examples
61
parameters = {
62
# Single values (constants across all runs)
63
"width": 20,
64
"height": 20,
65
"torus": True,
66
67
# Ranges for parameter sweeps
68
"n_agents": range(10, 101, 10), # 10, 20, 30, ..., 100
69
"infection_rate": [0.1, 0.2, 0.3, 0.4, 0.5], # Explicit values
70
"recovery_time": np.arange(5, 20, 2.5), # NumPy array: 5.0, 7.5, 10.0, ...
71
72
# Boolean parameters
73
"vaccination": [True, False],
74
75
# Complex parameter types
76
"network_type": ["small_world", "scale_free", "random"],
77
"initial_infected": lambda: random.randint(1, 5), # Callable for random values
78
}
79
80
# Parameter combinations
81
# This creates: 10 n_agents × 5 infection_rates × 2 vaccination × 3 network_types
82
# = 300 parameter combinations
83
# With iterations=5, total runs = 1500
84
```
85
86
## Usage Examples
87
88
### Basic Parameter Sweep
89
90
```python { .api }
91
from mesa import Agent, Model, DataCollector, batch_run
92
93
class SimpleAgent(Agent):
94
def __init__(self, model, cooperation_rate=0.5):
95
super().__init__(model)
96
self.cooperation_rate = cooperation_rate
97
self.cooperated_this_step = False
98
99
def step(self):
100
# Decide whether to cooperate
101
self.cooperated_this_step = self.random.random() < self.cooperation_rate
102
103
# Interact with neighbors if in spatial model
104
if hasattr(self.model, 'space'):
105
neighbors = self.model.space.get_neighbors(self.pos, radius=1)
106
for neighbor in neighbors:
107
if neighbor.cooperated_this_step and self.cooperated_this_step:
108
# Mutual cooperation bonus
109
self.cooperation_rate = min(1.0, self.cooperation_rate + 0.01)
110
111
class CooperationModel(Model):
112
def __init__(self, n_agents=100, initial_cooperation=0.5, network_density=0.1):
113
super().__init__()
114
115
self.n_agents = n_agents
116
self.network_density = network_density
117
118
# Data collection
119
self.datacollector = DataCollector(
120
model_reporters={
121
"Cooperation Rate": lambda m: len([a for a in m.agents
122
if a.cooperated_this_step]) / len(m.agents),
123
"Average Cooperation Tendency": lambda m: sum(a.cooperation_rate for a in m.agents) / len(m.agents),
124
"Highly Cooperative Agents": lambda m: len([a for a in m.agents
125
if a.cooperation_rate > 0.8])
126
},
127
agent_reporters={
128
"Cooperation Rate": "cooperation_rate",
129
"Cooperated": "cooperated_this_step"
130
}
131
)
132
133
# Create agents
134
for i in range(n_agents):
135
agent = SimpleAgent(self, cooperation_rate=initial_cooperation)
136
137
self.running = True
138
139
def step(self):
140
self.datacollector.collect(self)
141
self.agents.shuffle_do("step")
142
143
# Define parameter sweep
144
parameters = {
145
"n_agents": [50, 100, 200],
146
"initial_cooperation": [0.1, 0.3, 0.5, 0.7, 0.9],
147
"network_density": [0.05, 0.1, 0.2, 0.3]
148
}
149
150
# Run batch experiment
151
results = batch_run(
152
CooperationModel,
153
parameters=parameters,
154
iterations=10, # 10 runs per parameter combination
155
max_steps=200,
156
number_processes=4, # Use 4 CPU cores
157
data_collection_period=10, # Collect data every 10 steps
158
display_progress=True
159
)
160
161
# Convert to DataFrame for analysis
162
df = pd.DataFrame(results)
163
print(f"Total runs: {len(df)}")
164
print(f"Parameter combinations: {len(df.groupby(['n_agents', 'initial_cooperation', 'network_density']))}")
165
print(f"Columns: {list(df.columns)}")
166
```
167
168
### Advanced Parameter Sweep with Custom Data Collection
169
170
```python { .api }
171
from mesa import Agent, Model, DataCollector, batch_run
172
from mesa.space import MultiGrid
173
import numpy as np
174
175
class EconomicAgent(Agent):
176
def __init__(self, model, wealth=100, risk_tolerance=0.5):
177
super().__init__(model)
178
self.wealth = wealth
179
self.initial_wealth = wealth
180
self.risk_tolerance = risk_tolerance
181
self.investments = []
182
self.transactions_count = 0
183
184
def step(self):
185
# Investment decision
186
if self.wealth > 50 and self.random.random() < self.risk_tolerance:
187
investment = min(self.wealth * 0.1, 20) # Invest up to 10% of wealth
188
self.wealth -= investment
189
self.investments.append(investment)
190
191
# Trade with neighbors
192
if hasattr(self, 'pos'):
193
neighbors = [agent for agent in self.model.grid.get_neighbors(self.pos, moore=True)
194
if isinstance(agent, EconomicAgent)]
195
196
if neighbors and self.wealth > 0:
197
partner = self.random.choice(neighbors)
198
if partner.wealth > 0:
199
# Simple trade
200
trade_amount = min(self.wealth, partner.wealth) * 0.05
201
if self.random.random() < 0.5:
202
self.wealth += trade_amount * self.random.uniform(0.9, 1.1)
203
partner.wealth -= trade_amount
204
else:
205
self.wealth -= trade_amount
206
partner.wealth += trade_amount * self.random.uniform(0.9, 1.1)
207
208
self.transactions_count += 1
209
partner.transactions_count += 1
210
211
# Investment returns (risky)
212
if self.investments:
213
for i, investment in enumerate(self.investments):
214
if self.random.random() < 0.1: # 10% chance per investment per step
215
return_rate = self.random.lognormal(0, 0.5) # Log-normal returns
216
self.wealth += investment * return_rate
217
self.investments.pop(i)
218
break
219
220
class EconomicModel(Model):
221
def __init__(self, n_agents=100, width=20, height=20,
222
mean_initial_wealth=100, wealth_std=20,
223
market_volatility=0.1, taxation_rate=0.0):
224
super().__init__()
225
226
self.n_agents = n_agents
227
self.market_volatility = market_volatility
228
self.taxation_rate = taxation_rate
229
self.total_taxes_collected = 0
230
231
# Create spatial environment
232
self.grid = MultiGrid(width, height, torus=True)
233
234
# Advanced data collection
235
self.datacollector = DataCollector(
236
model_reporters={
237
"Total Wealth": lambda m: sum(a.wealth for a in m.agents),
238
"Mean Wealth": lambda m: np.mean([a.wealth for a in m.agents]),
239
"Wealth Std": lambda m: np.std([a.wealth for a in m.agents]),
240
"Gini Coefficient": self.calculate_gini,
241
"Wealth Inequality Ratio": lambda m: (
242
np.percentile([a.wealth for a in m.agents], 90) /
243
max(1, np.percentile([a.wealth for a in m.agents], 10))
244
),
245
"Active Investors": lambda m: len([a for a in m.agents if len(a.investments) > 0]),
246
"Total Transactions": lambda m: sum(a.transactions_count for a in m.agents),
247
"Market Activity": lambda m: sum(a.transactions_count for a in m.agents) / len(m.agents),
248
"Taxes Collected": "total_taxes_collected"
249
},
250
agent_reporters={
251
"Wealth": "wealth",
252
"Initial Wealth": "initial_wealth",
253
"Risk Tolerance": "risk_tolerance",
254
"Active Investments": lambda a: len(a.investments),
255
"Transaction Count": "transactions_count",
256
"Wealth Change": lambda a: a.wealth - a.initial_wealth,
257
"ROI": lambda a: (a.wealth - a.initial_wealth) / a.initial_wealth if a.initial_wealth > 0 else 0
258
}
259
)
260
261
# Create agents with varied initial conditions
262
for i in range(n_agents):
263
# Wealth follows log-normal distribution
264
wealth = max(10, self.random.lognormvariate(
265
np.log(mean_initial_wealth), wealth_std / mean_initial_wealth
266
))
267
268
# Risk tolerance varies
269
risk_tolerance = self.random.betavariate(2, 2) # Beta distribution
270
271
agent = EconomicAgent(self, wealth=wealth, risk_tolerance=risk_tolerance)
272
273
# Place randomly on grid
274
x = self.random.randrange(width)
275
y = self.random.randrange(height)
276
self.grid.place_agent(agent, (x, y))
277
278
self.running = True
279
280
def calculate_gini(self):
281
"""Calculate Gini coefficient of wealth distribution."""
282
wealth_values = sorted([a.wealth for a in self.agents])
283
n = len(wealth_values)
284
285
if n == 0 or sum(wealth_values) == 0:
286
return 0
287
288
cumsum = sum((i + 1) * wealth for i, wealth in enumerate(wealth_values))
289
return (2 * cumsum) / (n * sum(wealth_values)) - (n + 1) / n
290
291
def step(self):
292
self.datacollector.collect(self)
293
294
# Market shock (random events)
295
if self.random.random() < self.market_volatility:
296
shock_magnitude = self.random.normalvariate(0, 0.1)
297
for agent in self.agents:
298
agent.wealth *= (1 + shock_magnitude)
299
agent.wealth = max(0, agent.wealth) # Prevent negative wealth
300
301
# Taxation
302
if self.taxation_rate > 0:
303
for agent in self.agents:
304
if agent.wealth > 100: # Tax threshold
305
tax = agent.wealth * self.taxation_rate
306
agent.wealth -= tax
307
self.total_taxes_collected += tax
308
309
# Agent actions
310
self.agents.shuffle_do("step")
311
312
# Complex parameter sweep
313
parameters = {
314
# Population parameters
315
"n_agents": [50, 100, 200],
316
"mean_initial_wealth": [100, 200, 500],
317
"wealth_std": [20, 50, 100],
318
319
# Policy parameters
320
"taxation_rate": [0.0, 0.01, 0.02, 0.05],
321
"market_volatility": [0.05, 0.1, 0.2],
322
323
# Spatial parameters
324
"width": 20, # Fixed
325
"height": 20 # Fixed
326
}
327
328
# Run comprehensive batch experiment
329
results = batch_run(
330
EconomicModel,
331
parameters=parameters,
332
iterations=15, # 15 runs per parameter combination
333
max_steps=500,
334
number_processes=None, # Use all available cores
335
data_collection_period=25, # Collect data every 25 steps
336
display_progress=True
337
)
338
339
# Analysis
340
df = pd.DataFrame(results)
341
342
print(f"Experiment completed:")
343
print(f"- Total data points: {len(df):,}")
344
print(f"- Parameter combinations: {len(df.groupby(['n_agents', 'mean_initial_wealth', 'wealth_std', 'taxation_rate', 'market_volatility'])):,}")
345
print(f"- Average final Gini coefficient: {df[df['Step'] == df['Step'].max()]['Gini Coefficient'].mean():.3f}")
346
347
# Group by policy parameters for analysis
348
policy_effects = df[df['Step'] == df['Step'].max()].groupby(['taxation_rate', 'market_volatility']).agg({
349
'Gini Coefficient': ['mean', 'std'],
350
'Total Wealth': ['mean', 'std'],
351
'Market Activity': ['mean', 'std']
352
}).round(3)
353
354
print("\nPolicy effects on final outcomes:")
355
print(policy_effects)
356
```
357
358
### Time Series Data Collection
359
360
```python { .api }
361
from mesa import Agent, Model, DataCollector, batch_run
362
363
class PopulationAgent(Agent):
364
def __init__(self, model, age=0, max_age=100):
365
super().__init__(model)
366
self.age = age
367
self.max_age = max_age
368
self.reproduced_this_step = False
369
370
def step(self):
371
self.age += 1
372
self.reproduced_this_step = False
373
374
# Death by old age
375
if self.age >= self.max_age:
376
self.remove()
377
return
378
379
# Death by random events
380
death_probability = 0.001 + (self.age / self.max_age) * 0.02
381
if self.random.random() < death_probability:
382
self.remove()
383
return
384
385
# Reproduction
386
if 18 <= self.age <= 50: # Reproductive age range
387
reproduction_rate = self.model.base_reproduction_rate * (
388
1 - len(self.model.agents) / self.model.carrying_capacity
389
) # Logistic growth
390
391
if self.random.random() < reproduction_rate:
392
# Create offspring
393
offspring = PopulationAgent(self.model, age=0, max_age=self.max_age)
394
self.reproduced_this_step = True
395
396
class PopulationModel(Model):
397
def __init__(self, initial_population=100, carrying_capacity=1000,
398
base_reproduction_rate=0.05, max_lifespan=100):
399
super().__init__()
400
401
self.carrying_capacity = carrying_capacity
402
self.base_reproduction_rate = base_reproduction_rate
403
404
# Detailed time series data collection
405
self.datacollector = DataCollector(
406
model_reporters={
407
"Population": lambda m: len(m.agents),
408
"Births": lambda m: len([a for a in m.agents if a.reproduced_this_step]),
409
"Deaths": lambda m: m.deaths_this_step,
410
"Average Age": lambda m: sum(a.age for a in m.agents) / len(m.agents) if m.agents else 0,
411
"Age Distribution 0-18": lambda m: len([a for a in m.agents if 0 <= a.age < 18]),
412
"Age Distribution 18-65": lambda m: len([a for a in m.agents if 18 <= a.age < 65]),
413
"Age Distribution 65+": lambda m: len([a for a in m.agents if a.age >= 65]),
414
"Growth Rate": lambda m: (len(m.agents) - m.previous_population) / max(1, m.previous_population),
415
"Carrying Capacity Utilization": lambda m: len(m.agents) / m.carrying_capacity
416
}
417
)
418
419
# Create initial population with age distribution
420
for i in range(initial_population):
421
age = self.random.randint(0, max_lifespan // 2) # Younger initial population
422
agent = PopulationAgent(self, age=age, max_age=max_lifespan)
423
424
self.previous_population = initial_population
425
self.deaths_this_step = 0
426
self.running = True
427
428
def step(self):
429
self.datacollector.collect(self)
430
431
initial_count = len(self.agents)
432
self.agents.shuffle_do("step")
433
final_count = len(self.agents)
434
435
self.deaths_this_step = initial_count - final_count + len([a for a in self.agents if a.reproduced_this_step])
436
self.previous_population = initial_count
437
438
# Stop if population extinct or simulation very long
439
if len(self.agents) == 0 or self.steps > 1000:
440
self.running = False
441
442
# Time series parameter sweep
443
parameters = {
444
"initial_population": [50, 100, 200],
445
"carrying_capacity": [500, 1000, 2000],
446
"base_reproduction_rate": [0.02, 0.05, 0.08, 0.1],
447
"max_lifespan": [80, 100, 120]
448
}
449
450
# Run with frequent data collection for time series
451
results = batch_run(
452
PopulationModel,
453
parameters=parameters,
454
iterations=20,
455
max_steps=500,
456
number_processes=6,
457
data_collection_period=1, # Collect data every step for time series
458
display_progress=True
459
)
460
461
# Time series analysis
462
df = pd.DataFrame(results)
463
464
# Calculate population dynamics metrics
465
def analyze_population_dynamics(group):
466
"""Analyze population dynamics for a parameter combination."""
467
population = group['Population'].values
468
steps = group['Step'].values
469
470
# Find equilibrium (if reached)
471
if len(population) > 100:
472
recent_pop = population[-50:] # Last 50 steps
473
equilibrium = recent_pop.mean()
474
equilibrium_stability = recent_pop.std() / equilibrium if equilibrium > 0 else float('inf')
475
else:
476
equilibrium = population[-1] if len(population) > 0 else 0
477
equilibrium_stability = float('inf')
478
479
# Growth phase analysis
480
max_pop = population.max()
481
max_pop_step = steps[population.argmax()]
482
483
# Extinction check
484
went_extinct = population[-1] == 0 if len(population) > 0 else True
485
486
return pd.Series({
487
'max_population': max_pop,
488
'max_population_step': max_pop_step,
489
'equilibrium_population': equilibrium,
490
'equilibrium_stability': equilibrium_stability,
491
'went_extinct': went_extinct,
492
'final_population': population[-1] if len(population) > 0 else 0
493
})
494
495
# Group by parameter combinations and analyze
496
param_cols = ['initial_population', 'carrying_capacity', 'base_reproduction_rate', 'max_lifespan', 'iteration']
497
dynamics_analysis = df.groupby(param_cols).apply(analyze_population_dynamics).reset_index()
498
499
# Summary statistics
500
print("Population Dynamics Summary:")
501
print(dynamics_analysis.groupby(['carrying_capacity', 'base_reproduction_rate']).agg({
502
'max_population': 'mean',
503
'equilibrium_population': 'mean',
504
'went_extinct': 'sum',
505
'equilibrium_stability': 'mean'
506
}).round(2))
507
508
# Find optimal parameters (high equilibrium, low extinction rate)
509
optimal_params = dynamics_analysis.groupby(['carrying_capacity', 'base_reproduction_rate']).agg({
510
'equilibrium_population': 'mean',
511
'went_extinct': 'mean',
512
'equilibrium_stability': 'mean'
513
}).reset_index()
514
515
print("\nOptimal parameter combinations (low extinction, high stability):")
516
stable_params = optimal_params[
517
(optimal_params['went_extinct'] < 0.1) &
518
(optimal_params['equilibrium_stability'] < 50)
519
].sort_values('equilibrium_population', ascending=False)
520
print(stable_params.head())
521
```
522
523
## Performance Optimization
524
525
### Parallel Processing
526
527
```python { .api }
528
# Optimal number of processes
529
import multiprocessing
530
531
# Use all available cores
532
results = batch_run(
533
MyModel,
534
parameters=params,
535
number_processes=None # Uses all available cores
536
)
537
538
# Use specific number of cores (recommended for shared systems)
539
results = batch_run(
540
MyModel,
541
parameters=params,
542
number_processes=multiprocessing.cpu_count() - 1 # Leave one core free
543
)
544
545
# For large parameter sweeps, monitor memory usage
546
results = batch_run(
547
MyModel,
548
parameters=params,
549
number_processes=4, # Conservative for memory-intensive models
550
data_collection_period=50 # Reduce data collection frequency
551
)
552
```
553
554
### Memory Management
555
556
```python { .api }
557
# For very large experiments, process in batches
558
def large_batch_experiment(model_cls, all_parameters, batch_size=100):
559
"""Run large experiments in batches to manage memory."""
560
all_results = []
561
562
# Split parameter combinations into batches
563
param_combinations = list(param_combinations_generator(all_parameters))
564
565
for i in range(0, len(param_combinations), batch_size):
566
batch_params = param_combinations[i:i + batch_size]
567
568
print(f"Running batch {i//batch_size + 1}/{len(param_combinations)//batch_size + 1}")
569
570
batch_results = batch_run(
571
model_cls,
572
parameters=batch_params,
573
iterations=10,
574
max_steps=200,
575
number_processes=4
576
)
577
578
all_results.extend(batch_results)
579
580
# Save intermediate results
581
pd.DataFrame(batch_results).to_parquet(f"batch_{i//batch_size}.parquet")
582
583
return all_results
584
585
# Efficient data collection for long runs
586
efficient_params = {
587
"data_collection_period": 20, # Collect every 20 steps instead of every step
588
"max_steps": 1000
589
}
590
```
591
592
## Data Analysis Patterns
593
594
### Parameter Sensitivity Analysis
595
596
```python { .api }
597
# Convert results to DataFrame
598
df = pd.DataFrame(results)
599
600
# Parameter sensitivity analysis
601
def parameter_sensitivity_analysis(df, output_var='Total Wealth', step_filter=None):
602
"""Analyze how sensitive outputs are to parameter changes."""
603
604
if step_filter:
605
analysis_df = df[df['Step'] == step_filter].copy()
606
else:
607
analysis_df = df[df['Step'] == df['Step'].max()].copy() # Final step only
608
609
# Get parameter columns (exclude data columns)
610
param_cols = [col for col in analysis_df.columns
611
if col not in ['Step', 'iteration', output_var] and
612
not col.startswith('Agent')]
613
614
sensitivity_results = {}
615
616
for param in param_cols:
617
# Calculate correlation between parameter and output
618
correlation = analysis_df[param].corr(analysis_df[output_var])
619
620
# Calculate variance explained
621
param_groups = analysis_df.groupby(param)[output_var].agg(['mean', 'std', 'count'])
622
between_var = param_groups['mean'].var()
623
within_var = (param_groups['std'] ** 2 * param_groups['count']).sum() / param_groups['count'].sum()
624
variance_explained = between_var / (between_var + within_var)
625
626
sensitivity_results[param] = {
627
'correlation': correlation,
628
'variance_explained': variance_explained,
629
'range_effect': param_groups['mean'].max() - param_groups['mean'].min()
630
}
631
632
return pd.DataFrame(sensitivity_results).T
633
634
# Run sensitivity analysis
635
sensitivity = parameter_sensitivity_analysis(df, 'Gini Coefficient')
636
print("Parameter sensitivity for Gini Coefficient:")
637
print(sensitivity.sort_values('variance_explained', ascending=False))
638
```
639
640
### Statistical Analysis
641
642
```python { .api }
643
import scipy.stats as stats
644
645
# Statistical testing of parameter effects
646
def compare_parameter_effects(df, param_name, output_var, param_values=None):
647
"""Compare output distributions across parameter values."""
648
649
final_data = df[df['Step'] == df['Step'].max()].copy()
650
651
if param_values is None:
652
param_values = sorted(final_data[param_name].unique())
653
654
# Extract data for each parameter value
655
groups = [final_data[final_data[param_name] == val][output_var].values
656
for val in param_values]
657
658
# ANOVA test
659
f_stat, p_value = stats.f_oneway(*groups)
660
661
# Pairwise t-tests
662
pairwise_results = {}
663
for i, val1 in enumerate(param_values):
664
for j, val2 in enumerate(param_values[i+1:], i+1):
665
t_stat, p_val = stats.ttest_ind(groups[i], groups[j])
666
pairwise_results[f"{val1}_vs_{val2}"] = {'t_stat': t_stat, 'p_value': p_val}
667
668
return {
669
'anova': {'f_stat': f_stat, 'p_value': p_value},
670
'pairwise': pairwise_results,
671
'group_stats': {val: {'mean': groups[i].mean(), 'std': groups[i].std()}
672
for i, val in enumerate(param_values)}
673
}
674
675
# Statistical comparison
676
tax_effects = compare_parameter_effects(df, 'taxation_rate', 'Gini Coefficient')
677
print("Statistical analysis of taxation effects:")
678
print(f"ANOVA p-value: {tax_effects['anova']['p_value']:.4f}")
679
for comparison, result in tax_effects['pairwise'].items():
680
print(f"{comparison}: p={result['p_value']:.4f}")
681
```
682
683
## Best Practices
684
685
### Model Design for Batch Running
686
687
```python { .api }
688
class BatchFriendlyModel(Model):
689
"""Example model designed for efficient batch running."""
690
691
def __init__(self, **kwargs):
692
super().__init__()
693
694
# Store all parameters as model attributes
695
for key, value in kwargs.items():
696
setattr(self, key, value)
697
698
# Efficient data collection setup
699
self.datacollector = DataCollector(
700
model_reporters={
701
# Use lambda functions for efficient computation
702
"Population": lambda m: len(m.agents),
703
"Parameter_Summary": lambda m: {
704
param: getattr(m, param) for param in
705
['param1', 'param2', 'param3'] if hasattr(m, param)
706
}
707
}
708
)
709
710
# Model initialization based on parameters
711
self._initialize_model()
712
713
def _initialize_model(self):
714
"""Initialize model components based on parameters."""
715
# Create agents, space, etc. based on stored parameters
716
pass
717
718
def step(self):
719
"""Efficient step implementation."""
720
# Collect data first (for consistency)
721
self.datacollector.collect(self)
722
723
# Agent actions
724
self.agents.shuffle_do("step")
725
726
# Termination conditions
727
if len(self.agents) == 0 or self.steps >= getattr(self, 'max_internal_steps', 1000):
728
self.running = False
729
730
# Use with batch_run
731
results = batch_run(
732
BatchFriendlyModel,
733
parameters={
734
'param1': [1, 2, 3],
735
'param2': [0.1, 0.2],
736
'max_internal_steps': 200
737
},
738
iterations=50,
739
max_steps=200,
740
data_collection_period=-1 # Only collect at end for efficiency
741
)
742
```