or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

core-agents.mdguardrails.mdhandoffs.mdindex.mditems-streaming.mdlifecycle.mdmcp.mdmemory-sessions.mdmodel-providers.mdrealtime.mdresults-exceptions.mdtools.mdtracing.mdvoice-pipeline.md

lifecycle.mddocs/

0

# Lifecycle Hooks

1

2

Lifecycle hooks provide callbacks at key points during agent execution, enabling observability, logging, custom metrics, and workflow control. The SDK offers two types of hooks: run-level hooks for workflow-wide events and agent-level hooks for individual agent events.

3

4

## Capabilities

5

6

### Run Hooks

7

8

Lifecycle hooks for entire agent runs, tracking all agents in a workflow.

9

10

```python { .api }

11

class RunHooks[TContext]:

12

"""

13

Lifecycle hooks for agent runs.

14

15

Type Parameters:

16

- TContext: Type of context object

17

18

All methods are async and optional to override.

19

"""

20

21

async def on_llm_start(

22

context: TContext | None,

23

agent: Agent,

24

system_prompt: str | None,

25

input_items: list[TResponseInputItem]

26

) -> None:

27

"""

28

Called before LLM call.

29

30

Parameters:

31

- context: Context object

32

- agent: Current agent

33

- system_prompt: System prompt being used

34

- input_items: Input items for LLM

35

"""

36

37

async def on_llm_end(

38

context: TContext | None,

39

agent: Agent,

40

response: ModelResponse

41

) -> None:

42

"""

43

Called after LLM call.

44

45

Parameters:

46

- context: Context object

47

- agent: Current agent

48

- response: LLM response

49

"""

50

51

async def on_agent_start(

52

context: TContext | None,

53

agent: Agent

54

) -> None:

55

"""

56

Called before agent invoked.

57

58

Parameters:

59

- context: Context object

60

- agent: Agent starting

61

"""

62

63

async def on_agent_end(

64

context: TContext | None,

65

agent: Agent,

66

output: Any

67

) -> None:

68

"""

69

Called when agent produces output.

70

71

Parameters:

72

- context: Context object

73

- agent: Agent ending

74

- output: Agent output

75

"""

76

77

async def on_handoff(

78

context: TContext | None,

79

from_agent: Agent,

80

to_agent: Agent

81

) -> None:

82

"""

83

Called when handoff occurs.

84

85

Parameters:

86

- context: Context object

87

- from_agent: Source agent

88

- to_agent: Target agent

89

"""

90

91

async def on_tool_start(

92

context: TContext | None,

93

agent: Agent,

94

tool: Tool

95

) -> None:

96

"""

97

Called before tool invoked.

98

99

Parameters:

100

- context: Context object

101

- agent: Agent using tool

102

- tool: Tool being invoked

103

"""

104

105

async def on_tool_end(

106

context: TContext | None,

107

agent: Agent,

108

tool: Tool,

109

result: Any

110

) -> None:

111

"""

112

Called after tool invoked.

113

114

Parameters:

115

- context: Context object

116

- agent: Agent that used tool

117

- tool: Tool that was invoked

118

- result: Tool result

119

"""

120

```

121

122

Usage example:

123

124

```python

125

from agents import RunHooks, Runner

126

import logging

127

128

class LoggingHooks(RunHooks):

129

"""Log all agent operations."""

130

131

async def on_agent_start(self, context, agent):

132

logging.info(f"Agent started: {agent.name}")

133

134

async def on_agent_end(self, context, agent, output):

135

logging.info(f"Agent ended: {agent.name}, output: {output}")

136

137

async def on_tool_start(self, context, agent, tool):

138

logging.info(f"Tool called: {tool.name}")

139

140

async def on_tool_end(self, context, agent, tool, result):

141

logging.info(f"Tool result: {result}")

142

143

async def on_handoff(self, context, from_agent, to_agent):

144

logging.info(f"Handoff: {from_agent.name} -> {to_agent.name}")

145

146

async def on_llm_start(self, context, agent, system_prompt, input_items):

147

logging.info(f"LLM call for {agent.name}")

148

149

async def on_llm_end(self, context, agent, response):

150

logging.info(f"LLM response: {response.usage.total_tokens} tokens")

151

152

hooks = LoggingHooks()

153

result = await Runner.run(agent, "Hello", hooks=hooks)

154

```

155

156

### Agent Hooks

157

158

Lifecycle hooks for specific agents.

159

160

```python { .api }

161

class AgentHooks[TContext]:

162

"""

163

Lifecycle hooks for specific agent.

164

165

Type Parameters:

166

- TContext: Type of context object

167

168

All methods are async and optional to override.

169

"""

170

171

async def on_start(

172

context: TContext | None,

173

agent: Agent

174

) -> None:

175

"""

176

Called when agent starts.

177

178

Parameters:

179

- context: Context object

180

- agent: This agent

181

"""

182

183

async def on_end(

184

context: TContext | None,

185

agent: Agent,

186

output: Any

187

) -> None:

188

"""

189

Called when agent produces output.

190

191

Parameters:

192

- context: Context object

193

- agent: This agent

194

- output: Agent output

195

"""

196

197

async def on_handoff(

198

context: TContext | None,

199

agent: Agent,

200

source: Agent

201

) -> None:

202

"""

203

Called when handed off to this agent.

204

205

Parameters:

206

- context: Context object

207

- agent: This agent (target)

208

- source: Source agent

209

"""

210

211

async def on_tool_start(

212

context: TContext | None,

213

agent: Agent,

214

tool: Tool

215

) -> None:

216

"""

217

Called before tool invoked by this agent.

218

219

Parameters:

220

- context: Context object

221

- agent: This agent

222

- tool: Tool being invoked

223

"""

224

225

async def on_tool_end(

226

context: TContext | None,

227

agent: Agent,

228

tool: Tool,

229

result: Any

230

) -> None:

231

"""

232

Called after tool invoked by this agent.

233

234

Parameters:

235

- context: Context object

236

- agent: This agent

237

- tool: Tool that was invoked

238

- result: Tool result

239

"""

240

241

async def on_llm_start(

242

context: TContext | None,

243

agent: Agent,

244

system_prompt: str | None,

245

input_items: list[TResponseInputItem]

246

) -> None:

247

"""

248

Called before LLM call for this agent.

249

250

Parameters:

251

- context: Context object

252

- agent: This agent

253

- system_prompt: System prompt

254

- input_items: Input items

255

"""

256

257

async def on_llm_end(

258

context: TContext | None,

259

agent: Agent,

260

response: ModelResponse

261

) -> None:

262

"""

263

Called after LLM call for this agent.

264

265

Parameters:

266

- context: Context object

267

- agent: This agent

268

- response: LLM response

269

"""

270

```

271

272

Usage example:

273

274

```python

275

from agents import Agent, AgentHooks

276

277

class SpecialistHooks(AgentHooks):

278

"""Hooks for specialist agent."""

279

280

async def on_start(self, context, agent):

281

print(f"{agent.name} is starting work")

282

# Initialize resources

283

context.start_time = time.time()

284

285

async def on_end(self, context, agent, output):

286

duration = time.time() - context.start_time

287

print(f"{agent.name} completed in {duration:.2f}s")

288

289

async def on_tool_start(self, context, agent, tool):

290

print(f"Using tool: {tool.name}")

291

292

specialist = Agent(

293

name="Specialist",

294

instructions="Provide expert analysis",

295

hooks=SpecialistHooks()

296

)

297

```

298

299

## Hook Patterns

300

301

### Metrics Collection

302

303

```python

304

from agents import RunHooks

305

import time

306

307

class MetricsHooks(RunHooks):

308

"""Collect metrics during agent execution."""

309

310

def __init__(self):

311

self.metrics = {

312

"llm_calls": 0,

313

"tool_calls": 0,

314

"handoffs": 0,

315

"total_tokens": 0,

316

"duration": 0

317

}

318

self.start_time = None

319

320

async def on_agent_start(self, context, agent):

321

self.start_time = time.time()

322

323

async def on_agent_end(self, context, agent, output):

324

if self.start_time:

325

self.metrics["duration"] = time.time() - self.start_time

326

327

async def on_llm_end(self, context, agent, response):

328

self.metrics["llm_calls"] += 1

329

self.metrics["total_tokens"] += response.usage.total_tokens

330

331

async def on_tool_end(self, context, agent, tool, result):

332

self.metrics["tool_calls"] += 1

333

334

async def on_handoff(self, context, from_agent, to_agent):

335

self.metrics["handoffs"] += 1

336

337

# Use hooks

338

hooks = MetricsHooks()

339

result = await Runner.run(agent, "Task", hooks=hooks)

340

print(f"Metrics: {hooks.metrics}")

341

```

342

343

### Cost Tracking

344

345

```python

346

class CostTrackingHooks(RunHooks):

347

"""Track LLM costs during execution."""

348

349

COST_PER_1K_TOKENS = {

350

"gpt-4o": {"input": 0.0025, "output": 0.01},

351

"gpt-4o-mini": {"input": 0.00015, "output": 0.0006}

352

}

353

354

def __init__(self):

355

self.total_cost = 0.0

356

357

async def on_llm_end(self, context, agent, response):

358

model = agent.model or "gpt-4o"

359

costs = self.COST_PER_1K_TOKENS.get(model, {"input": 0, "output": 0})

360

361

input_cost = (response.usage.input_tokens / 1000) * costs["input"]

362

output_cost = (response.usage.output_tokens / 1000) * costs["output"]

363

self.total_cost += input_cost + output_cost

364

365

print(f"Call cost: ${input_cost + output_cost:.4f}")

366

367

hooks = CostTrackingHooks()

368

result = await Runner.run(agent, "Task", hooks=hooks)

369

print(f"Total cost: ${hooks.total_cost:.4f}")

370

```

371

372

### Audit Logging

373

374

```python

375

class AuditHooks(RunHooks):

376

"""Audit log for compliance."""

377

378

def __init__(self, user_id: str):

379

self.user_id = user_id

380

self.audit_log = []

381

382

async def on_agent_start(self, context, agent):

383

self.audit_log.append({

384

"timestamp": time.time(),

385

"event": "agent_start",

386

"agent": agent.name,

387

"user": self.user_id

388

})

389

390

async def on_tool_start(self, context, agent, tool):

391

self.audit_log.append({

392

"timestamp": time.time(),

393

"event": "tool_call",

394

"agent": agent.name,

395

"tool": tool.name,

396

"user": self.user_id

397

})

398

399

async def on_llm_start(self, context, agent, system_prompt, input_items):

400

self.audit_log.append({

401

"timestamp": time.time(),

402

"event": "llm_call",

403

"agent": agent.name,

404

"user": self.user_id

405

})

406

407

def save_audit_log(self, filename):

408

import json

409

with open(filename, 'w') as f:

410

json.dump(self.audit_log, f, indent=2)

411

412

hooks = AuditHooks(user_id="user_123")

413

result = await Runner.run(agent, "Task", hooks=hooks)

414

hooks.save_audit_log("audit.json")

415

```

416

417

### Error Handling

418

419

```python

420

class ErrorHandlingHooks(RunHooks):

421

"""Capture and handle errors during execution."""

422

423

def __init__(self):

424

self.errors = []

425

426

async def on_tool_end(self, context, agent, tool, result):

427

# Check for tool errors

428

if isinstance(result, Exception):

429

self.errors.append({

430

"tool": tool.name,

431

"error": str(result),

432

"agent": agent.name

433

})

434

435

async def on_llm_end(self, context, agent, response):

436

# Check for refusals

437

for item in response.output:

438

if hasattr(item, 'refusal') and item.refusal:

439

self.errors.append({

440

"type": "refusal",

441

"message": item.refusal,

442

"agent": agent.name

443

})

444

445

hooks = ErrorHandlingHooks()

446

try:

447

result = await Runner.run(agent, "Task", hooks=hooks)

448

finally:

449

if hooks.errors:

450

print(f"Errors encountered: {hooks.errors}")

451

```

452

453

### Rate Limiting

454

455

```python

456

import asyncio

457

from collections import deque

458

459

class RateLimitingHooks(RunHooks):

460

"""Implement rate limiting for LLM calls."""

461

462

def __init__(self, max_calls_per_minute=10):

463

self.max_calls = max_calls_per_minute

464

self.calls = deque()

465

466

async def on_llm_start(self, context, agent, system_prompt, input_items):

467

now = time.time()

468

469

# Remove calls older than 1 minute

470

while self.calls and now - self.calls[0] > 60:

471

self.calls.popleft()

472

473

# Wait if at rate limit

474

if len(self.calls) >= self.max_calls:

475

wait_time = 60 - (now - self.calls[0])

476

print(f"Rate limit reached, waiting {wait_time:.1f}s")

477

await asyncio.sleep(wait_time)

478

479

self.calls.append(now)

480

481

hooks = RateLimitingHooks(max_calls_per_minute=5)

482

result = await Runner.run(agent, "Task", hooks=hooks)

483

```

484

485

### Progress Reporting

486

487

```python

488

class ProgressHooks(RunHooks):

489

"""Report progress during execution."""

490

491

def __init__(self):

492

self.steps = []

493

494

async def on_agent_start(self, context, agent):

495

self.steps.append(f"Starting {agent.name}")

496

print(f"[{len(self.steps)}] Starting {agent.name}")

497

498

async def on_tool_start(self, context, agent, tool):

499

self.steps.append(f"Calling {tool.name}")

500

print(f"[{len(self.steps)}] Calling {tool.name}")

501

502

async def on_handoff(self, context, from_agent, to_agent):

503

self.steps.append(f"Handoff: {from_agent.name} -> {to_agent.name}")

504

print(f"[{len(self.steps)}] Handoff to {to_agent.name}")

505

506

async def on_agent_end(self, context, agent, output):

507

self.steps.append(f"Completed {agent.name}")

508

print(f"[{len(self.steps)}] Completed {agent.name}")

509

510

hooks = ProgressHooks()

511

result = await Runner.run(agent, "Task", hooks=hooks)

512

print(f"Total steps: {len(hooks.steps)}")

513

```

514

515

## Combining Hooks

516

517

Use multiple hooks together:

518

519

```python

520

class CombinedHooks(RunHooks):

521

"""Combine multiple hook behaviors."""

522

523

def __init__(self):

524

self.metrics_hooks = MetricsHooks()

525

self.cost_hooks = CostTrackingHooks()

526

self.audit_hooks = AuditHooks("user_123")

527

528

async def on_llm_end(self, context, agent, response):

529

# Call all sub-hooks

530

await self.metrics_hooks.on_llm_end(context, agent, response)

531

await self.cost_hooks.on_llm_end(context, agent, response)

532

await self.audit_hooks.on_llm_end(context, agent, response)

533

534

# Implement other methods similarly...

535

536

hooks = CombinedHooks()

537

result = await Runner.run(agent, "Task", hooks=hooks)

538

```

539

540

## Best Practices

541

542

1. **Keep Hooks Lightweight**: Avoid heavy operations in hooks to prevent slowing down execution

543

2. **Error Handling**: Handle exceptions in hooks to prevent breaking agent execution

544

3. **Async Operations**: Use async properly for I/O operations in hooks

545

4. **Selective Overrides**: Only override methods you need

546

5. **State Management**: Store state in hook instance variables

547

6. **Composability**: Design hooks to be composable with others

548

7. **Testing**: Test hooks independently from agents

549

8. **Documentation**: Document what each hook does for team members

550

9. **Performance**: Monitor hook performance impact

551

10. **Logging**: Use appropriate log levels in hooks

552