or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

advanced.mdcore.mdfacades.mdindex.mdtrials.md

trials.mddocs/

0

# Trial Management

1

2

Data structures for representing optimization trials, their configurations, results, and execution context. These classes provide structured interfaces for the ask-and-tell optimization pattern.

3

4

## Capabilities

5

6

### Trial Information

7

8

Container for configuration and context information that defines what should be evaluated.

9

10

```python { .api }

11

class TrialInfo:

12

def __init__(

13

self,

14

config: Configuration,

15

instance: str | None = None,

16

seed: int | None = None,

17

budget: float | None = None

18

):

19

"""

20

Configuration and context information for a trial.

21

22

Parameters:

23

- config: Configuration to evaluate (required)

24

- instance: Problem instance identifier

25

- seed: Random seed for evaluation

26

- budget: Multi-fidelity budget allocation

27

"""

28

29

config: Configuration # Configuration to evaluate

30

instance: str | None # Problem instance

31

seed: int | None # Random seed

32

budget: float | None # Multi-fidelity budget

33

34

def get_instance_seed_key(self) -> InstanceSeedKey:

35

"""Get instance-seed key for this trial."""

36

37

def get_instance_seed_budget_key(self) -> InstanceSeedBudgetKey:

38

"""Get instance-seed-budget key for this trial."""

39

```

40

41

### Trial Results

42

43

Container for results and metadata from trial execution.

44

45

```python { .api }

46

class TrialValue:

47

def __init__(

48

self,

49

cost: float | list[float],

50

time: float,

51

cpu_time: float,

52

status: StatusType,

53

starttime: float,

54

endtime: float,

55

additional_info: dict[str, Any] | None = None

56

):

57

"""

58

Results and metadata from trial execution.

59

60

Parameters:

61

- cost: Objective value(s) - single float for single-objective, list for multi-objective

62

- time: Wall-clock time in seconds

63

- cpu_time: CPU time in seconds

64

- status: Trial execution status (StatusType enum)

65

- starttime: Start timestamp (Unix time)

66

- endtime: End timestamp (Unix time)

67

- additional_info: Extra trial information dictionary

68

"""

69

70

cost: float | list[float] # Objective value(s)

71

time: float # Wall-clock time (seconds)

72

cpu_time: float # CPU time (seconds)

73

status: StatusType # Execution status

74

starttime: float # Start timestamp

75

endtime: float # End timestamp

76

additional_info: dict[str, Any] # Extra information

77

```

78

79

### Trial Identification

80

81

Unique identifier for trials combining configuration, instance, seed, and budget.

82

83

```python { .api }

84

class TrialKey:

85

def __init__(

86

self,

87

config_id: int,

88

instance: str | None = None,

89

seed: int | None = None,

90

budget: float | None = None

91

):

92

"""

93

Unique identifier for optimization trials.

94

95

Parameters:

96

- config_id: Configuration ID from RunHistory

97

- instance: Problem instance identifier

98

- seed: Random seed

99

- budget: Multi-fidelity budget

100

"""

101

102

config_id: int # Configuration ID

103

instance: str | None # Problem instance

104

seed: int | None # Random seed

105

budget: float | None # Multi-fidelity budget

106

```

107

108

### Instance-Seed Keys

109

110

Utility keys for organizing trials by instance and seed combinations.

111

112

```python { .api }

113

class InstanceSeedKey:

114

def __init__(self, instance: str | None, seed: int | None):

115

"""Key for instance-seed combinations."""

116

117

instance: str | None # Problem instance

118

seed: int | None # Random seed

119

120

class InstanceSeedBudgetKey:

121

def __init__(self, instance: str | None, seed: int | None, budget: float | None):

122

"""Key for instance-seed-budget combinations."""

123

124

instance: str | None # Problem instance

125

seed: int | None # Random seed

126

budget: float | None # Multi-fidelity budget

127

```

128

129

### Optimization Progress Tracking

130

131

Container for tracking optimization progress over time.

132

133

```python { .api }

134

class TrajectoryItem:

135

def __init__(

136

self,

137

config_ids: list[int],

138

costs: list[float | list[float]],

139

trial: int,

140

walltime: float

141

):

142

"""

143

Optimization progress tracking.

144

145

Parameters:

146

- config_ids: Current incumbent configuration IDs

147

- costs: Incumbent costs (parallel to config_ids)

148

- trial: Number of trials executed

149

- walltime: Elapsed wall-clock time in seconds

150

"""

151

152

config_ids: list[int] # Incumbent configuration IDs

153

costs: list[float | list[float]] # Incumbent costs

154

trial: int # Number of trials

155

walltime: float # Elapsed time

156

```

157

158

## Usage Examples

159

160

### Ask-and-Tell Pattern

161

162

```python

163

from smac import HyperparameterOptimizationFacade, Scenario

164

from smac.runhistory import TrialInfo, TrialValue, StatusType

165

import time

166

167

# Set up optimization

168

facade = HyperparameterOptimizationFacade(scenario, objective)

169

170

# Manual ask-and-tell loop

171

for i in range(10):

172

# Ask for next configuration

173

trial_info = facade.ask()

174

175

print(f"Trial {i+1}:")

176

print(f" Config: {trial_info.config}")

177

print(f" Instance: {trial_info.instance}")

178

print(f" Seed: {trial_info.seed}")

179

print(f" Budget: {trial_info.budget}")

180

181

# Evaluate configuration

182

start_time = time.time()

183

try:

184

cost = objective(trial_info.config, seed=trial_info.seed)

185

status = StatusType.SUCCESS

186

except Exception as e:

187

cost = float('inf')

188

status = StatusType.CRASHED

189

print(f" Trial failed: {e}")

190

191

end_time = time.time()

192

193

# Create trial result

194

trial_value = TrialValue(

195

cost=cost,

196

time=end_time - start_time,

197

cpu_time=end_time - start_time, # Simplified

198

status=status,

199

starttime=start_time,

200

endtime=end_time,

201

additional_info={"iteration": i+1}

202

)

203

204

# Report results

205

facade.tell(trial_info, trial_value)

206

207

print(f" Cost: {cost}")

208

print(f" Time: {trial_value.time:.3f}s")

209

print(f" Status: {status}")

210

211

# Get best configuration

212

best_config = facade.runhistory.get_configs(sort_by="cost")[0]

213

best_cost = facade.runhistory.get_cost(best_config)

214

print(f"Best configuration: {best_config}")

215

print(f"Best cost: {best_cost}")

216

```

217

218

### Multi-Fidelity Trial Management

219

220

```python

221

from smac import MultiFidelityFacade, Scenario

222

223

def multi_fidelity_objective(config, seed=0, budget=1.0):

224

# Simulate different fidelities

225

base_cost = config["x"]**2 + config["y"]**2

226

noise_level = 1.0 / budget # Less noise with higher budget

227

noise = noise_level * (seed % 10 - 5) / 10

228

return base_cost + noise

229

230

scenario = Scenario(

231

configspace=config_space,

232

n_trials=30,

233

min_budget=0.1,

234

max_budget=1.0

235

)

236

237

facade = MultiFidelityFacade(scenario, multi_fidelity_objective)

238

239

# Run optimization with automatic budget allocation

240

for i in range(10):

241

trial_info = facade.ask()

242

243

# Note the budget allocation

244

print(f"Trial {i+1}: Budget = {trial_info.budget}")

245

246

# Objective function receives budget parameter

247

cost = multi_fidelity_objective(

248

trial_info.config,

249

seed=trial_info.seed,

250

budget=trial_info.budget

251

)

252

253

trial_value = TrialValue(

254

cost=cost,

255

time=0.1, # Simplified

256

cpu_time=0.1,

257

status=StatusType.SUCCESS,

258

starttime=time.time(),

259

endtime=time.time() + 0.1

260

)

261

262

facade.tell(trial_info, trial_value)

263

```

264

265

### Trial History Analysis

266

267

```python

268

# Access trial history after optimization

269

runhistory = facade.runhistory

270

271

print(f"Total trials: {runhistory.finished}")

272

print(f"Running trials: {runhistory.running}")

273

274

# Get all configurations sorted by performance

275

configs = runhistory.get_configs(sort_by="cost")

276

print(f"Best 3 configurations:")

277

for i, config in enumerate(configs[:3]):

278

cost = runhistory.get_cost(config)

279

trials = runhistory.get_trials(config)

280

print(f" {i+1}. Cost: {cost:.4f}, Trials: {len(trials)}")

281

print(f" Config: {config}")

282

283

# Analyze specific configuration

284

best_config = configs[0]

285

avg_cost = runhistory.average_cost(best_config)

286

min_cost = runhistory.min_cost(best_config)

287

print(f"Best config - Average: {avg_cost:.4f}, Minimum: {min_cost:.4f}")

288

```