or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

agents-tools.mdcallbacks-monitoring.mdchains-workflows.mddocument-processing.mdindex.mdmemory-context.md

callbacks-monitoring.mddocs/

0

# Callbacks and Monitoring

1

2

Event handling system for monitoring, logging, debugging, and observing LangChain operations. Callbacks provide hooks into the execution lifecycle of chains, agents, and other components for comprehensive observability.

3

4

## Capabilities

5

6

### Core Callback Handlers

7

8

Base callback handlers for common monitoring and logging scenarios.

9

10

```python { .api }

11

from langchain_core.callbacks import (

12

BaseCallbackHandler,

13

AsyncCallbackHandler,

14

FileCallbackHandler,

15

StdOutCallbackHandler,

16

StreamingStdOutCallbackHandler

17

)

18

19

class BaseCallbackHandler:

20

"""Base callback handler for LangChain events."""

21

22

def on_llm_start(

23

self,

24

serialized: dict,

25

prompts: List[str],

26

**kwargs: Any

27

) -> None:

28

"""Called when LLM starts running."""

29

30

def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:

31

"""Called when LLM finishes running."""

32

33

def on_llm_error(self, error: BaseException, **kwargs: Any) -> None:

34

"""Called when LLM encounters an error."""

35

36

def on_chain_start(

37

self,

38

serialized: dict,

39

inputs: dict,

40

**kwargs: Any

41

) -> None:

42

"""Called when chain starts running."""

43

44

def on_chain_end(self, outputs: dict, **kwargs: Any) -> None:

45

"""Called when chain finishes running."""

46

47

def on_chain_error(self, error: BaseException, **kwargs: Any) -> None:

48

"""Called when chain encounters an error."""

49

50

def on_tool_start(

51

self,

52

serialized: dict,

53

input_str: str,

54

**kwargs: Any

55

) -> None:

56

"""Called when tool starts running."""

57

58

def on_tool_end(self, output: str, **kwargs: Any) -> None:

59

"""Called when tool finishes running."""

60

61

def on_tool_error(self, error: BaseException, **kwargs: Any) -> None:

62

"""Called when tool encounters an error."""

63

64

def on_agent_action(self, action: AgentAction, **kwargs: Any) -> None:

65

"""Called when agent takes an action."""

66

67

def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> None:

68

"""Called when agent finishes execution."""

69

70

class AsyncCallbackHandler(BaseCallbackHandler):

71

"""Async version of base callback handler."""

72

73

async def on_llm_start(

74

self,

75

serialized: dict,

76

prompts: List[str],

77

**kwargs: Any

78

) -> None: ...

79

80

async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: ...

81

82

# Additional async versions of all callback methods...

83

84

class StdOutCallbackHandler(BaseCallbackHandler):

85

"""Callback handler that prints to stdout."""

86

pass

87

88

class StreamingStdOutCallbackHandler(BaseCallbackHandler):

89

"""Callback handler that streams LLM outputs to stdout."""

90

91

def on_llm_new_token(self, token: str, **kwargs: Any) -> None:

92

"""Called when LLM generates a new token."""

93

94

class FileCallbackHandler(BaseCallbackHandler):

95

"""Callback handler that logs to file."""

96

97

def __init__(self, filename: str): ...

98

```

99

100

### LangChain-Specific Callback Handlers

101

102

Custom callback handlers for LangChain workflows and debugging.

103

104

```python { .api }

105

from langchain.callbacks import (

106

AsyncIteratorCallbackHandler,

107

FinalStreamingStdOutCallbackHandler

108

)

109

110

class AsyncIteratorCallbackHandler(AsyncCallbackHandler):

111

"""Callback handler for async iteration over LLM outputs."""

112

113

def __init__(self): ...

114

115

async def on_llm_new_token(self, token: str, **kwargs: Any) -> None: ...

116

117

async def aiter(self) -> AsyncIterator[str]:

118

"""Async iterator over generated tokens."""

119

120

class FinalStreamingStdOutCallbackHandler(BaseCallbackHandler):

121

"""Stream only the final output to stdout, not intermediate tokens."""

122

123

def __init__(self, answer_prefix_tokens: Optional[List[str]] = None): ...

124

```

125

126

### Tracing and Observability

127

128

LangSmith integration and tracing capabilities for monitoring LangChain applications.

129

130

```python { .api }

131

from langchain_core.tracers import LangChainTracer

132

from langchain_core.tracers.context import (

133

collect_runs,

134

tracing_enabled,

135

tracing_v2_enabled

136

)

137

138

class LangChainTracer(BaseCallbackHandler):

139

"""Tracer for LangSmith observability platform."""

140

141

def __init__(

142

self,

143

project_name: Optional[str] = None,

144

client: Optional[Any] = None,

145

**kwargs: Any

146

): ...

147

148

def tracing_enabled() -> bool:

149

"""Check if LangSmith tracing is enabled."""

150

151

def tracing_v2_enabled() -> bool:

152

"""Check if LangSmith v2 tracing is enabled."""

153

154

def collect_runs():

155

"""Context manager to collect run information."""

156

```

157

158

### Callback Management

159

160

Systems for managing and coordinating multiple callback handlers.

161

162

```python { .api }

163

from langchain_core.callbacks import (

164

BaseCallbackManager,

165

CallbackManager,

166

AsyncCallbackManager,

167

Callbacks

168

)

169

170

class BaseCallbackManager:

171

"""Base callback manager for handling multiple callbacks."""

172

173

def add_handler(self, handler: BaseCallbackHandler) -> None:

174

"""Add callback handler."""

175

176

def remove_handler(self, handler: BaseCallbackHandler) -> None:

177

"""Remove callback handler."""

178

179

def copy(self) -> "BaseCallbackManager":

180

"""Create copy of callback manager."""

181

182

class CallbackManager(BaseCallbackManager):

183

"""Synchronous callback manager."""

184

185

def __init__(self, handlers: Optional[List[BaseCallbackHandler]] = None): ...

186

187

def on_llm_start(

188

self,

189

serialized: dict,

190

prompts: List[str],

191

run_id: Optional[str] = None,

192

**kwargs: Any

193

) -> List[CallbackManagerForLLMRun]: ...

194

195

class AsyncCallbackManager(BaseCallbackManager):

196

"""Asynchronous callback manager."""

197

198

def __init__(self, handlers: Optional[List[BaseCallbackHandler]] = None): ...

199

200

async def on_llm_start(

201

self,

202

serialized: dict,

203

prompts: List[str],

204

run_id: Optional[str] = None,

205

**kwargs: Any

206

) -> List[AsyncCallbackManagerForLLMRun]: ...

207

208

# Type alias for callback specification

209

Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]

210

```

211

212

### Run Managers

213

214

Specialized callback managers for different component types during execution.

215

216

```python { .api }

217

from langchain_core.callbacks import (

218

CallbackManagerForLLMRun,

219

AsyncCallbackManagerForLLMRun,

220

CallbackManagerForChainRun,

221

AsyncCallbackManagerForChainRun,

222

CallbackManagerForToolRun,

223

AsyncCallbackManagerForToolRun

224

)

225

226

class CallbackManagerForLLMRun:

227

"""Callback manager for LLM execution runs."""

228

229

def on_llm_new_token(self, token: str, **kwargs: Any) -> None: ...

230

231

def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: ...

232

233

def on_llm_error(self, error: BaseException, **kwargs: Any) -> None: ...

234

235

class CallbackManagerForChainRun:

236

"""Callback manager for chain execution runs."""

237

238

def get_child(self, tag: Optional[str] = None) -> CallbackManager: ...

239

240

def on_chain_end(self, outputs: dict, **kwargs: Any) -> None: ...

241

242

def on_chain_error(self, error: BaseException, **kwargs: Any) -> None: ...

243

244

class CallbackManagerForToolRun:

245

"""Callback manager for tool execution runs."""

246

247

def on_tool_end(self, output: str, **kwargs: Any) -> None: ...

248

249

def on_tool_error(self, error: BaseException, **kwargs: Any) -> None: ...

250

```

251

252

### Third-Party Integrations

253

254

**Note**: Third-party callback handlers have been moved to langchain_community with deprecation warnings:

255

256

```python { .api }

257

# Available in langchain_community (deprecated imports from main package)

258

from langchain_community.callbacks import (

259

OpenAICallbackHandler, # OpenAI usage tracking

260

WandbCallbackHandler, # Weights & Biases logging

261

CometCallbackHandler, # Comet ML logging

262

MlflowCallbackHandler, # MLflow logging

263

StreamlitCallbackHandler, # Streamlit integration

264

HumanApprovalCallbackHandler, # Human-in-the-loop

265

get_openai_callback, # OpenAI usage context manager

266

wandb_tracing_enabled # W&B tracing status

267

)

268

```

269

270

### Global Callback Configuration

271

272

Global settings and utilities for callback configuration across LangChain.

273

274

```python { .api }

275

from langchain.globals import (

276

set_verbose,

277

get_verbose,

278

set_debug,

279

get_debug

280

)

281

282

def set_verbose(value: bool) -> None:

283

"""Set global verbose mode for callbacks."""

284

285

def get_verbose() -> bool:

286

"""Get current global verbose setting."""

287

288

def set_debug(value: bool) -> None:

289

"""Set global debug mode for enhanced logging."""

290

291

def get_debug() -> bool:

292

"""Get current global debug setting."""

293

```

294

295

## Usage Examples

296

297

### Basic Callback Usage

298

299

```python

300

from langchain.callbacks import StdOutCallbackHandler

301

from langchain.chains import LLMChain

302

from langchain_core.prompts import PromptTemplate

303

from langchain_openai import OpenAI

304

305

# Create callback handler

306

callback = StdOutCallbackHandler()

307

308

# Use with chain

309

prompt = PromptTemplate.from_template("Tell me about {topic}")

310

chain = LLMChain(

311

llm=OpenAI(temperature=0),

312

prompt=prompt,

313

callbacks=[callback],

314

verbose=True

315

)

316

317

# Callbacks will log execution details

318

result = chain.run(topic="machine learning")

319

```

320

321

### Streaming Callbacks

322

323

```python

324

from langchain.callbacks import StreamingStdOutCallbackHandler

325

from langchain_openai import OpenAI

326

327

# Stream LLM output as it's generated

328

streaming_handler = StreamingStdOutCallbackHandler()

329

330

llm = OpenAI(

331

temperature=0.7,

332

callbacks=[streaming_handler],

333

streaming=True

334

)

335

336

# Output will stream to console

337

response = llm.predict("Write a short story about AI")

338

```

339

340

### Custom Callback Handler

341

342

```python

343

from langchain_core.callbacks import BaseCallbackHandler

344

345

class CustomCallbackHandler(BaseCallbackHandler):

346

"""Custom callback to log execution metrics."""

347

348

def __init__(self):

349

self.llm_calls = 0

350

self.chain_calls = 0

351

self.total_tokens = 0

352

353

def on_llm_start(self, serialized, prompts, **kwargs):

354

self.llm_calls += 1

355

print(f"LLM call #{self.llm_calls} started")

356

357

def on_llm_end(self, response, **kwargs):

358

if hasattr(response, 'llm_output') and response.llm_output:

359

token_usage = response.llm_output.get('token_usage', {})

360

tokens = token_usage.get('total_tokens', 0)

361

self.total_tokens += tokens

362

print(f"Used {tokens} tokens (total: {self.total_tokens})")

363

364

def on_chain_start(self, serialized, inputs, **kwargs):

365

self.chain_calls += 1

366

print(f"Chain call #{self.chain_calls}: {serialized.get('name', 'Unknown')}")

367

368

# Use custom callback

369

custom_handler = CustomCallbackHandler()

370

chain = LLMChain(

371

llm=OpenAI(temperature=0),

372

prompt=prompt,

373

callbacks=[custom_handler]

374

)

375

```

376

377

### Async Callback with Iterator

378

379

```python

380

from langchain.callbacks import AsyncIteratorCallbackHandler

381

from langchain_openai import ChatOpenAI

382

import asyncio

383

384

async def stream_chat():

385

# Create async iterator callback

386

callback = AsyncIteratorCallbackHandler()

387

388

# Create chat model with streaming

389

chat = ChatOpenAI(

390

temperature=0.7,

391

streaming=True,

392

callbacks=[callback]

393

)

394

395

# Start generation task

396

task = asyncio.create_task(

397

chat.agenerate([["Tell me a story about robots"]])

398

)

399

400

# Stream tokens as they arrive

401

async for token in callback.aiter():

402

print(token, end="", flush=True)

403

404

await task

405

406

# Run async streaming

407

asyncio.run(stream_chat())

408

```

409

410

### LangSmith Tracing

411

412

```python

413

import os

414

from langchain_core.tracers.context import tracing_v2_enabled

415

416

# Set up LangSmith (requires API key)

417

os.environ["LANGCHAIN_TRACING_V2"] = "true"

418

os.environ["LANGCHAIN_API_KEY"] = "your-api-key"

419

os.environ["LANGCHAIN_PROJECT"] = "my-project"

420

421

# Check if tracing is enabled

422

if tracing_v2_enabled():

423

print("LangSmith tracing is active")

424

425

# All chains/agents will now automatically send traces to LangSmith

426

chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)

427

result = chain.run(topic="AI applications")

428

```

429

430

### Multiple Callback Handlers

431

432

```python

433

from langchain.callbacks import (

434

StdOutCallbackHandler,

435

FileCallbackHandler,

436

StreamingStdOutCallbackHandler

437

)

438

439

# Create multiple handlers

440

stdout_handler = StdOutCallbackHandler()

441

file_handler = FileCallbackHandler("langchain.log")

442

streaming_handler = StreamingStdOutCallbackHandler()

443

444

# Use multiple callbacks

445

chain = LLMChain(

446

llm=OpenAI(temperature=0, streaming=True),

447

prompt=prompt,

448

callbacks=[stdout_handler, file_handler, streaming_handler]

449

)

450

451

# All handlers will receive events

452

result = chain.run(topic="renewable energy")

453

```

454

455

### Callback Context Manager

456

457

```python

458

from langchain_community.callbacks import get_openai_callback

459

460

# Track OpenAI API usage

461

with get_openai_callback() as cb:

462

# Multiple operations within context

463

result1 = chain.run(topic="solar power")

464

result2 = chain.run(topic="wind energy")

465

466

# Access usage statistics

467

print(f"Total tokens: {cb.total_tokens}")

468

print(f"Total cost: ${cb.total_cost}")

469

print(f"Successful requests: {cb.successful_requests}")

470

```

471

472

### Global Verbose Mode

473

474

```python

475

from langchain.globals import set_verbose, set_debug

476

477

# Enable global verbose mode for all components

478

set_verbose(True)

479

set_debug(True)

480

481

# All chains and agents will now log detailed information

482

chain = LLMChain(llm=OpenAI(temperature=0), prompt=prompt)

483

result = chain.run(topic="climate change")

484

485

# Disable verbose mode

486

set_verbose(False)

487

```