A fast library for automated machine learning and tuning
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Advanced hyperparameter optimization framework with multiple search algorithms, flexible search space definitions, and seamless integration with popular ML frameworks. FLAML's tuning module provides efficient optimization strategies that balance exploration and exploitation.
Main function for running hyperparameter optimization experiments.
def run(trainable, search_space=None, searcher=None, scheduler=None,
time_budget_s=None, num_samples=None, config=None, **kwargs):
"""
Run hyperparameter tuning experiment.
Args:
trainable (callable): Function to optimize that takes config dict and returns metrics
search_space (dict): Search space specification
searcher: Search algorithm instance (BlendSearch, CFO, FLOW2, etc.)
scheduler: Scheduler for early stopping and resource allocation
time_budget_s (float): Time budget in seconds
num_samples (int): Maximum number of trials to run
config (dict): Fixed configuration parameters
**kwargs: Additional arguments
Returns:
Analysis object with optimization results
"""
def report(**metrics):
"""
Report metrics from within trainable function.
Args:
**metrics: Metric name-value pairs to report
"""Advanced search algorithm that blends local and global search strategies for efficient hyperparameter optimization.
class BlendSearch:
def __init__(self, metric, mode="min", space=None, low_cost_partial_config=None,
cat_hp_cost=None, points_to_evaluate=None, time_budget_s=None,
resource_attr=None, global_search_alg=None, config_constraints=None,
metric_constraints=None, seed=None):
"""
Initialize BlendSearch algorithm.
Args:
metric (str): Metric name to optimize
mode (str): Optimization mode - 'min' or 'max'
space (dict): Search space configuration
low_cost_partial_config (dict): Initial low-cost configuration
cat_hp_cost (dict): Cost specification for categorical hyperparameters
points_to_evaluate (list): Initial points to evaluate
time_budget_s (float): Time budget in seconds
resource_attr (str): Resource dimension name for multi-fidelity optimization
global_search_alg: Global search algorithm class to use
config_constraints (list): Configuration constraint functions
metric_constraints (list): Metric constraint specifications
seed (int): Random seed for reproducibility
"""
def suggest(self, trial_id):
"""
Suggest next configuration to evaluate.
Args:
trial_id (str): Unique trial identifier
Returns:
dict: Configuration to evaluate
"""
def on_trial_result(self, trial_id, result):
"""
Process trial result.
Args:
trial_id (str): Trial identifier
result (dict): Trial results including metrics
"""
def on_trial_complete(self, trial_id, result=None, error=False):
"""
Handle trial completion.
Args:
trial_id (str): Trial identifier
result (dict): Final trial results
error (bool): Whether trial ended in error
"""
def save(self, checkpoint_path):
"""Save searcher state to checkpoint."""
def restore(self, checkpoint_path):
"""Restore searcher state from checkpoint."""class CFO:
"""Cost-Frugal Optimization searcher (alias for BlendSearch with specific defaults)."""
class FLOW2:
"""Fast Local Search algorithm with adaptive step sizes."""
def __init__(self, metric, mode="min", **kwargs):
"""Initialize FLOW2 searcher."""
class RandomSearch:
"""Random sampling baseline for hyperparameter optimization."""
def __init__(self, metric, mode="min", **kwargs):
"""Initialize random search."""
class ChampionFrontierSearcher:
"""Online searcher for champion frontier optimization."""Functions and classes for defining hyperparameter search spaces.
def uniform(low, high):
"""
Uniform distribution sampling.
Args:
low (float): Lower bound
high (float): Upper bound
Returns:
dict: Uniform distribution specification
"""
def loguniform(low, high):
"""
Log-uniform distribution sampling.
Args:
low (float): Lower bound (log scale)
high (float): Upper bound (log scale)
Returns:
dict: Log-uniform distribution specification
"""
def randint(low, high):
"""
Random integer sampling.
Args:
low (int): Lower bound (inclusive)
high (int): Upper bound (exclusive)
Returns:
dict: Random integer specification
"""
def lograndint(low, high):
"""
Log-scale random integer sampling.
Args:
low (int): Lower bound
high (int): Upper bound
Returns:
dict: Log-scale integer specification
"""
def quniform(low, high, q):
"""
Quantized uniform distribution.
Args:
low (float): Lower bound
high (float): Upper bound
q (float): Quantization step
Returns:
dict: Quantized uniform specification
"""
def qloguniform(low, high, q):
"""Quantized log-uniform distribution."""
def qrandint(low, high, q):
"""Quantized random integer."""
def qlograndint(low, high, q):
"""Quantized log-scale random integer."""
def randn(mean, sd):
"""
Normal distribution sampling.
Args:
mean (float): Mean value
sd (float): Standard deviation
Returns:
dict: Normal distribution specification
"""
def qrandn(mean, sd, q):
"""Quantized normal distribution."""
def choice(categories):
"""
Categorical choice sampling.
Args:
categories (list): List of categorical values
Returns:
dict: Categorical choice specification
"""class Categorical:
"""Categorical parameter space for discrete choices."""
def __init__(self, categories, ordered=None):
"""
Initialize categorical parameter.
Args:
categories (list): Available categorical values
ordered (bool): Whether categories have natural ordering
"""
class Float:
"""Continuous float parameter space."""
def __init__(self, lower, upper, log=False):
"""
Initialize float parameter.
Args:
lower (float): Lower bound
upper (float): Upper bound
log (bool): Whether to use log scale
"""
class PolynomialExpansionSet:
"""Polynomial expansion parameter set for feature interactions."""
def polynomial_expansion_set(feature_names, interaction_terms=2):
"""
Create polynomial expansion set.
Args:
feature_names (list): Base feature names
interaction_terms (int): Maximum interaction order
Returns:
PolynomialExpansionSet: Polynomial expansion specification
"""class Trial:
"""Individual trial/experiment representation."""
@property
def trial_id(self):
"""Unique trial identifier."""
@property
def config(self):
"""Trial configuration parameters."""
@property
def last_result(self):
"""Last reported result from trial."""from flaml.tune import run
from flaml.tune.searcher import BlendSearch
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
def train_rf(config):
"""Training function for Random Forest."""
model = RandomForestClassifier(
n_estimators=config["n_estimators"],
max_depth=config["max_depth"],
min_samples_split=config["min_samples_split"],
random_state=42
)
# Cross-validation score
scores = cross_val_score(model, X_train, y_train, cv=3)
return {"accuracy": scores.mean()}
# Define search space
search_space = {
"n_estimators": {"_type": "randint", "_value": [10, 100]},
"max_depth": {"_type": "randint", "_value": [3, 20]},
"min_samples_split": {"_type": "randint", "_value": [2, 20]}
}
# Run optimization
analysis = run(
train_rf,
search_space,
searcher=BlendSearch(metric="accuracy", mode="max"),
time_budget_s=300,
num_samples=50
)
print(f"Best config: {analysis.best_config}")
print(f"Best accuracy: {analysis.best_result['accuracy']}")from flaml.tune import run, uniform, loguniform, choice
from flaml.tune.searcher import BlendSearch
def objective(config):
"""Complex objective function with multiple metrics."""
# Your model training code here
accuracy = train_and_evaluate(config)
training_time = get_training_time()
model_size = get_model_size(config)
return {
"accuracy": accuracy,
"training_time": training_time,
"model_size": model_size
}
# Search space using convenience functions
search_space = {
"learning_rate": loguniform(0.001, 0.1),
"batch_size": choice([16, 32, 64, 128]),
"hidden_dim": uniform(64, 512),
"dropout": uniform(0.0, 0.5)
}
# Constraints: accuracy > 0.8 and training_time < 300
def config_constraint(config):
# Custom configuration validation
return config["batch_size"] <= 64 if config["hidden_dim"] > 256 else True
metric_constraints = [
("accuracy", ">=", 0.8),
("training_time", "<=", 300)
]
# Run with constraints
analysis = run(
objective,
search_space,
searcher=BlendSearch(
metric="accuracy",
mode="max",
config_constraints=[config_constraint],
metric_constraints=metric_constraints
),
time_budget_s=1800
)from flaml.tune import run
from flaml.tune.searcher import BlendSearch
def train_with_budget(config):
"""Training function with resource budget."""
epochs = config.get("epochs", 100) # Resource dimension
model = create_model(config)
for epoch in range(epochs):
model.train_one_epoch()
# Report intermediate results
if epoch % 10 == 0:
val_loss = model.evaluate()
report(loss=val_loss, epoch=epoch)
final_loss = model.evaluate()
return {"loss": final_loss}
# Multi-fidelity search space
search_space = {
"lr": loguniform(1e-4, 1e-1),
"batch_size": choice([16, 32, 64]),
"epochs": randint(10, 200) # Resource attribute
}
analysis = run(
train_with_budget,
search_space,
searcher=BlendSearch(
metric="loss",
mode="min",
resource_attr="epochs",
time_budget_s=3600
)
)from flaml.tune.searcher import BlendSearchTuner
import ray
from ray import tune
# Initialize Ray
ray.init()
# Use FLAML searcher in Ray Tune
tune.run(
trainable_fn,
config=search_space,
search_alg=BlendSearchTuner(
metric="accuracy",
mode="max"
),
num_samples=100,
time_budget_s=1800
)Advanced search space components for complex parameter definitions.
class PolynomialExpansionSet:
"""Polynomial expansion set for hierarchical search spaces."""
def __init__(self, init_monomials=None, **kwargs): ...
def add_monomial(self, monomial): ...
def expand(self, degree): ...
def polynomial_expansion_set(init_monomials=None, **kwargs):
"""
Create polynomial expansion set for search space.
Args:
init_monomials (list): Initial monomials for expansion
**kwargs: Additional parameters
Returns:
PolynomialExpansionSet: Polynomial expansion set instance
"""Classes for managing individual trials in hyperparameter optimization.
class Trial:
"""Individual trial in hyperparameter optimization experiment."""
def __init__(self, config, trial_id=None):
"""
Initialize trial.
Args:
config (dict): Trial configuration parameters
trial_id (str): Unique trial identifier
"""
@property
def config(self):
"""dict: Trial configuration parameters"""
@property
def trial_id(self):
"""str: Unique trial identifier"""
@property
def status(self):
"""str: Current trial status"""
def set_status(self, status):
"""Set trial status."""INCUMBENT_RESULT = "INCUMBENT_RESULT" # Special result indicator for best configuration
def choice(*categories):
"""
Create categorical choice parameter.
Args:
*categories: Available categories to choose from
Returns:
Choice parameter specification
"""Install with Tessl CLI
npx tessl i tessl/pypi-flaml