CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-smac

SMAC3 is a versatile Bayesian Optimization package for hyperparameter optimization and algorithm configuration.

Pending
Overview
Eval results
Files

core.mddocs/

Core Framework

Essential classes for configuring optimization environments, managing trial history, and handling optimization events through callbacks.

Capabilities

Environment Configuration

The Scenario class defines all optimization settings, constraints, and environment configuration.

class Scenario:
    def __init__(
        self,
        configspace: ConfigurationSpace,
        name: str | None = None,
        output_directory: Path = Path("smac3_output"),
        deterministic: bool = False,
        objectives: str | list[str] | None = "cost",
        crash_cost: float | list[float] = np.inf,
        termination_cost_threshold: float | list[float] = np.inf,
        walltime_limit: float = np.inf,
        cputime_limit: float = np.inf,
        trial_walltime_limit: float | None = None,
        trial_memory_limit: int | None = None,
        n_trials: int = 100,
        use_default_config: bool = False,
        instances: list[str] | None = None,
        instance_features: dict[str, list[float]] | None = None,
        min_budget: float | int | None = None,
        max_budget: float | int | None = None,
        seed: int = 0,
        n_workers: int = 1
    ):
        """
        Environment configuration for optimization.
        
        Parameters:
        - configspace: Search space definition (required)
        - name: Run identifier for output organization
        - output_directory: Directory for logs and results
        - deterministic: Whether to use single seed per evaluation
        - objectives: Objective names (single or multi-objective)
        - crash_cost: Cost assigned to failed trials
        - termination_cost_threshold: Early stopping threshold
        - walltime_limit: Maximum wall-clock time in seconds
        - cputime_limit: Maximum CPU time in seconds
        - trial_walltime_limit: Per-trial time limit in seconds
        - trial_memory_limit: Per-trial memory limit in MB
        - n_trials: Maximum number of trials
        - use_default_config: Include default config in initial design
        - instances: Problem instances for algorithm configuration
        - instance_features: Feature vectors for each instance
        - min_budget: Multi-fidelity minimum budget
        - max_budget: Multi-fidelity maximum budget
        - seed: Random seed for reproducibility
        - n_workers: Number of parallel workers
        """
    
    def count_objectives(self) -> int:
        """Get number of objectives."""
    
    def count_instance_features(self) -> int:
        """Get number of instance features."""
    
    def save(self) -> None:
        """Save scenario to file."""
    
    @staticmethod
    def load(path: Path) -> Scenario:
        """Load scenario from file."""
    
    @staticmethod
    def make_serializable(scenario: Scenario) -> dict[str, Any]:
        """Convert scenario to JSON-serializable format."""
    
    @property
    def meta(self) -> dict[str, Any]:
        """Metadata dictionary."""

Example usage:

from smac import Scenario
from ConfigSpace import ConfigurationSpace, Float
from pathlib import Path

# Basic scenario
config_space = ConfigurationSpace()
config_space.add_hyperparameter(Float("learning_rate", bounds=(1e-5, 1e-1), log=True))

scenario = Scenario(
    configspace=config_space,
    name="ml_optimization",
    n_trials=100,
    walltime_limit=3600,  # 1 hour
    seed=42
)

# Multi-fidelity scenario
multi_fidelity_scenario = Scenario(
    configspace=config_space,
    name="multi_fidelity_opt",
    n_trials=50,
    min_budget=0.1,
    max_budget=1.0,
    seed=42
)

# Algorithm configuration scenario
instances = ["instance_1", "instance_2", "instance_3"]
instance_features = {
    "instance_1": [1.0, 2.0, 3.0],
    "instance_2": [2.0, 3.0, 4.0],
    "instance_3": [3.0, 4.0, 5.0]
}

algo_config_scenario = Scenario(
    configspace=config_space,
    instances=instances,
    instance_features=instance_features,
    n_trials=200,
    trial_walltime_limit=60,  # 1 minute per trial
    seed=42
)

Trial History Management

The RunHistory class stores and manages optimization trial results with support for multi-objective and multi-fidelity scenarios.

class RunHistory:
    def __init__(
        self,
        multi_objective_algorithm: AbstractMultiObjectiveAlgorithm | None = None,
        overwrite_existing_trials: bool = False
    ):
        """
        Container for storing and managing optimization trial results.
        
        Parameters:
        - multi_objective_algorithm: Strategy for handling multiple objectives
        - overwrite_existing_trials: Whether to overwrite existing trials
        """
    
    def add(
        self,
        config: Configuration,
        cost: int | float | list[int | float],
        time: float = 0.0,
        cpu_time: float = 0.0,
        status: StatusType = StatusType.SUCCESS,
        instance: str | None = None,
        seed: int | None = None,
        budget: float | None = None,
        starttime: float = 0.0,
        endtime: float = 0.0,
        additional_info: dict[str, Any] = None,
        force_update: bool = False
    ) -> None:
        """Add trial result with individual parameters."""
    
    def add_trial(self, info: TrialInfo, value: TrialValue) -> None:
        """Add trial using structured data objects."""
    
    def add_running_trial(self, trial: TrialInfo) -> None:
        """Mark trial as currently running."""
    
    def get_cost(
        self,
        config: Configuration,
        *,
        instance: str | None = None,
        seed: int | None = None,
        budget: float | None = None
    ) -> float:
        """Get empirical cost for specific configuration context."""
    
    def get_min_cost(
        self,
        config: Configuration,
        *,
        instance: str | None = None,
        seed: int | None = None,
        budget: float | None = None
    ) -> float:
        """Get minimum observed cost for configuration context."""
    
    def average_cost(
        self,
        config: Configuration,
        *,
        instances: list[str] | None = None,
        seeds: list[int] | None = None,
        budgets: list[float] | None = None,
        normalize: bool = True
    ) -> float:
        """Compute average cost across specified contexts."""
    
    def sum_cost(
        self,
        config: Configuration,
        *,
        instances: list[str] | None = None,
        seeds: list[int] | None = None,
        budgets: list[float] | None = None,
        normalize: bool = True
    ) -> float:
        """Compute sum of costs across specified contexts."""
    
    def min_cost(
        self,
        config: Configuration,
        *,
        instances: list[str] | None = None,
        seeds: list[int] | None = None,
        budgets: list[float] | None = None,
        normalize: bool = True
    ) -> float:
        """Compute minimum cost across specified contexts."""
    
    def get_configs(self, sort_by: str | None = None) -> list[Configuration]:
        """Get all evaluated configurations, optionally sorted."""
    
    def get_trials(
        self,
        config: Configuration,
        highest_observed_budget_only: bool = True
    ) -> list[TrialKey]:
        """Get trials for specific configuration."""
    
    def get_running_trials(self, config: Configuration) -> list[TrialKey]:
        """Get currently running trials for configuration."""
    
    def update_cost(self, config: Configuration) -> None:
        """Recompute aggregated costs for configuration."""
    
    def save(self, filename: str | Path) -> None:
        """Save run history to JSON file."""
    
    @staticmethod
    def load(filename: str | Path, configspace: ConfigurationSpace) -> RunHistory:
        """Load run history from JSON file."""
    
    def update(self, runhistory: RunHistory) -> None:
        """Merge another run history into this one."""
    
    def get_config(self, config_id: int) -> Configuration:
        """Get configuration by ID."""
    
    def get_config_id(self, config: Configuration) -> int:
        """Get ID for configuration."""
    
    def has_config(self, config: Configuration) -> bool:
        """Check if configuration exists in history."""
    
    def empty(self) -> bool:
        """Check if run history is empty."""
    
    def reset(self) -> None:
        """Reset run history to empty state."""
    
    @property
    def submitted(self) -> int:
        """Number of submitted trials."""
    
    @property
    def finished(self) -> int:
        """Number of finished trials."""
    
    @property
    def running(self) -> int:
        """Number of currently running trials."""
    
    @property
    def ids_config(self) -> dict[int, Configuration]:
        """Mapping from configuration ID to Configuration object."""
    
    @property
    def config_ids(self) -> dict[Configuration, int]:
        """Mapping from Configuration object to ID."""
    
    @property
    def objective_bounds(self) -> list[tuple[float, float]]:
        """Min/max bounds for each objective."""

Trial Execution Status

Enumeration of possible trial execution outcomes.

class StatusType(IntEnum):
    """Trial execution status constants."""
    RUNNING = 0     # In case a job was submitted, but it has not finished
    SUCCESS = 1     # Trial completed successfully
    CRASHED = 2     # Trial crashed or failed
    TIMEOUT = 3     # Trial exceeded time limit
    MEMORYOUT = 4   # Trial exceeded memory limit

Event Handling

Base callback class for handling optimization events and implementing custom logging, visualization, or early stopping logic.

class Callback:
    """Abstract base class for optimization event hooks."""
    
    def on_start(self, smbo: SMBO) -> None:
        """Called before optimization starts."""
    
    def on_end(self, smbo: SMBO) -> None:
        """Called after optimization finishes."""
    
    def on_iteration_start(self, smbo: SMBO) -> None:
        """Called before each optimization iteration."""
    
    def on_iteration_end(self, smbo: SMBO) -> None:
        """Called after each optimization iteration."""
    
    def on_next_configurations_start(self, config_selector: ConfigSelector) -> None:
        """Called before model training and configuration selection."""
    
    def on_next_configurations_end(
        self,
        config_selector: ConfigSelector,
        config: list[Configuration]
    ) -> None:
        """Called after configuration selection."""
    
    def on_ask_start(self, smbo: SMBO) -> None:
        """Called before intensifier asks for next trial."""
    
    def on_ask_end(self, smbo: SMBO, info: TrialInfo) -> None:
        """Called after intensifier provides trial information."""
    
    def on_tell_start(self, smbo: SMBO, info: TrialInfo, value: TrialValue) -> None:
        """Called before processing trial result."""
    
    def on_tell_end(self, smbo: SMBO, info: TrialInfo, value: TrialValue) -> None:
        """Called after processing trial result."""

Metadata Callback

Concrete callback for saving optimization run metadata.

class MetadataCallback(Callback):
    def __init__(self, **kwargs: str | int | float | dict | list) -> None:
        """
        Callback for saving optimization run metadata.
        
        Parameters:
        - **kwargs: Arbitrary JSON-serializable metadata key-value pairs to save
        """
    
    def on_start(self, smbo: SMBO) -> None:
        """Called before the optimization starts to save metadata."""

Example callback usage:

from smac import HyperparameterOptimizationFacade, Scenario, Callback, MetadataCallback

class EarlyStoppingCallback(Callback):
    def __init__(self, patience=10, min_improvement=0.01):
        self.patience = patience
        self.min_improvement = min_improvement
        self.best_cost = float('inf')
        self.no_improvement_count = 0
    
    def on_tell_end(self, smbo, info, value):
        current_cost = value.cost
        if current_cost < self.best_cost - self.min_improvement:
            self.best_cost = current_cost
            self.no_improvement_count = 0
        else:
            self.no_improvement_count += 1
        
        if self.no_improvement_count >= self.patience:
            print(f"Early stopping after {self.patience} iterations without improvement")
            # Custom early stopping logic here

# Use callbacks
metadata_callback = MetadataCallback(
    experiment_name="hyperparameter_tuning",
    dataset="mnist",
    model_type="neural_network"
)

early_stop_callback = EarlyStoppingCallback(patience=15)

facade = HyperparameterOptimizationFacade(
    scenario,
    objective,
    callbacks=[metadata_callback, early_stop_callback]
)

Install with Tessl CLI

npx tessl i tessl/pypi-smac

docs

advanced.md

core.md

facades.md

index.md

trials.md

tile.json