SMAC3 is a versatile Bayesian Optimization package for hyperparameter optimization and algorithm configuration.
—
Data structures for representing optimization trials, their configurations, results, and execution context. These classes provide structured interfaces for the ask-and-tell optimization pattern.
Container for configuration and context information that defines what should be evaluated.
class TrialInfo:
def __init__(
self,
config: Configuration,
instance: str | None = None,
seed: int | None = None,
budget: float | None = None
):
"""
Configuration and context information for a trial.
Parameters:
- config: Configuration to evaluate (required)
- instance: Problem instance identifier
- seed: Random seed for evaluation
- budget: Multi-fidelity budget allocation
"""
config: Configuration # Configuration to evaluate
instance: str | None # Problem instance
seed: int | None # Random seed
budget: float | None # Multi-fidelity budget
def get_instance_seed_key(self) -> InstanceSeedKey:
"""Get instance-seed key for this trial."""
def get_instance_seed_budget_key(self) -> InstanceSeedBudgetKey:
"""Get instance-seed-budget key for this trial."""Container for results and metadata from trial execution.
class TrialValue:
def __init__(
self,
cost: float | list[float],
time: float,
cpu_time: float,
status: StatusType,
starttime: float,
endtime: float,
additional_info: dict[str, Any] | None = None
):
"""
Results and metadata from trial execution.
Parameters:
- cost: Objective value(s) - single float for single-objective, list for multi-objective
- time: Wall-clock time in seconds
- cpu_time: CPU time in seconds
- status: Trial execution status (StatusType enum)
- starttime: Start timestamp (Unix time)
- endtime: End timestamp (Unix time)
- additional_info: Extra trial information dictionary
"""
cost: float | list[float] # Objective value(s)
time: float # Wall-clock time (seconds)
cpu_time: float # CPU time (seconds)
status: StatusType # Execution status
starttime: float # Start timestamp
endtime: float # End timestamp
additional_info: dict[str, Any] # Extra informationUnique identifier for trials combining configuration, instance, seed, and budget.
class TrialKey:
def __init__(
self,
config_id: int,
instance: str | None = None,
seed: int | None = None,
budget: float | None = None
):
"""
Unique identifier for optimization trials.
Parameters:
- config_id: Configuration ID from RunHistory
- instance: Problem instance identifier
- seed: Random seed
- budget: Multi-fidelity budget
"""
config_id: int # Configuration ID
instance: str | None # Problem instance
seed: int | None # Random seed
budget: float | None # Multi-fidelity budgetUtility keys for organizing trials by instance and seed combinations.
class InstanceSeedKey:
def __init__(self, instance: str | None, seed: int | None):
"""Key for instance-seed combinations."""
instance: str | None # Problem instance
seed: int | None # Random seed
class InstanceSeedBudgetKey:
def __init__(self, instance: str | None, seed: int | None, budget: float | None):
"""Key for instance-seed-budget combinations."""
instance: str | None # Problem instance
seed: int | None # Random seed
budget: float | None # Multi-fidelity budgetContainer for tracking optimization progress over time.
class TrajectoryItem:
def __init__(
self,
config_ids: list[int],
costs: list[float | list[float]],
trial: int,
walltime: float
):
"""
Optimization progress tracking.
Parameters:
- config_ids: Current incumbent configuration IDs
- costs: Incumbent costs (parallel to config_ids)
- trial: Number of trials executed
- walltime: Elapsed wall-clock time in seconds
"""
config_ids: list[int] # Incumbent configuration IDs
costs: list[float | list[float]] # Incumbent costs
trial: int # Number of trials
walltime: float # Elapsed timefrom smac import HyperparameterOptimizationFacade, Scenario
from smac.runhistory import TrialInfo, TrialValue, StatusType
import time
# Set up optimization
facade = HyperparameterOptimizationFacade(scenario, objective)
# Manual ask-and-tell loop
for i in range(10):
# Ask for next configuration
trial_info = facade.ask()
print(f"Trial {i+1}:")
print(f" Config: {trial_info.config}")
print(f" Instance: {trial_info.instance}")
print(f" Seed: {trial_info.seed}")
print(f" Budget: {trial_info.budget}")
# Evaluate configuration
start_time = time.time()
try:
cost = objective(trial_info.config, seed=trial_info.seed)
status = StatusType.SUCCESS
except Exception as e:
cost = float('inf')
status = StatusType.CRASHED
print(f" Trial failed: {e}")
end_time = time.time()
# Create trial result
trial_value = TrialValue(
cost=cost,
time=end_time - start_time,
cpu_time=end_time - start_time, # Simplified
status=status,
starttime=start_time,
endtime=end_time,
additional_info={"iteration": i+1}
)
# Report results
facade.tell(trial_info, trial_value)
print(f" Cost: {cost}")
print(f" Time: {trial_value.time:.3f}s")
print(f" Status: {status}")
# Get best configuration
best_config = facade.runhistory.get_configs(sort_by="cost")[0]
best_cost = facade.runhistory.get_cost(best_config)
print(f"Best configuration: {best_config}")
print(f"Best cost: {best_cost}")from smac import MultiFidelityFacade, Scenario
def multi_fidelity_objective(config, seed=0, budget=1.0):
# Simulate different fidelities
base_cost = config["x"]**2 + config["y"]**2
noise_level = 1.0 / budget # Less noise with higher budget
noise = noise_level * (seed % 10 - 5) / 10
return base_cost + noise
scenario = Scenario(
configspace=config_space,
n_trials=30,
min_budget=0.1,
max_budget=1.0
)
facade = MultiFidelityFacade(scenario, multi_fidelity_objective)
# Run optimization with automatic budget allocation
for i in range(10):
trial_info = facade.ask()
# Note the budget allocation
print(f"Trial {i+1}: Budget = {trial_info.budget}")
# Objective function receives budget parameter
cost = multi_fidelity_objective(
trial_info.config,
seed=trial_info.seed,
budget=trial_info.budget
)
trial_value = TrialValue(
cost=cost,
time=0.1, # Simplified
cpu_time=0.1,
status=StatusType.SUCCESS,
starttime=time.time(),
endtime=time.time() + 0.1
)
facade.tell(trial_info, trial_value)# Access trial history after optimization
runhistory = facade.runhistory
print(f"Total trials: {runhistory.finished}")
print(f"Running trials: {runhistory.running}")
# Get all configurations sorted by performance
configs = runhistory.get_configs(sort_by="cost")
print(f"Best 3 configurations:")
for i, config in enumerate(configs[:3]):
cost = runhistory.get_cost(config)
trials = runhistory.get_trials(config)
print(f" {i+1}. Cost: {cost:.4f}, Trials: {len(trials)}")
print(f" Config: {config}")
# Analyze specific configuration
best_config = configs[0]
avg_cost = runhistory.average_cost(best_config)
min_cost = runhistory.min_cost(best_config)
print(f"Best config - Average: {avg_cost:.4f}, Minimum: {min_cost:.4f}")Install with Tessl CLI
npx tessl i tessl/pypi-smac