or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

advanced.mdcore-tracing.mddatasets.mdexperiments.mdindex.mdintegrations.mdobservation-types.mdprompts.mdscoring.md

integrations.mddocs/

0

# Integration Support

1

2

Pre-built integrations for popular AI frameworks with automatic instrumentation and minimal configuration required. Enables seamless observability for existing AI applications.

3

4

## Capabilities

5

6

### OpenAI Integration

7

8

Drop-in replacement for the OpenAI SDK with automatic Langfuse tracing for all API calls.

9

10

```python { .api }

11

# Import replacements for automatic tracing

12

from langfuse.openai import OpenAI, AsyncOpenAI, AzureOpenAI, AsyncAzureOpenAI

13

from langfuse.openai import openai # Patched openai module

14

15

# All standard OpenAI classes with automatic tracing

16

class OpenAI:

17

"""Drop-in replacement for openai.OpenAI with automatic Langfuse tracing."""

18

19

class AsyncOpenAI:

20

"""Drop-in replacement for openai.AsyncOpenAI with automatic tracing."""

21

22

class AzureOpenAI:

23

"""Drop-in replacement for openai.AzureOpenAI with automatic tracing."""

24

25

class AsyncAzureOpenAI:

26

"""Drop-in replacement for openai.AsyncAzureOpenAI with automatic tracing."""

27

```

28

29

**Features:**

30

- Automatic generation-type spans for completions and chat completions

31

- Automatic capture of model parameters, token usage, and costs

32

- Support for streaming responses with proper span lifecycle management

33

- Error handling and status tracking

34

- Compatible with all OpenAI SDK features and methods

35

36

### LangChain Integration

37

38

Callback handler for comprehensive LangChain application tracing.

39

40

```python { .api }

41

class CallbackHandler:

42

def __init__(self, *, public_key: str = None, secret_key: str = None,

43

host: str = None, tracing_enabled: bool = True,

44

environment: str = None, **kwargs):

45

"""Initialize Langfuse callback handler for LangChain.

46

47

Args:

48

public_key: Langfuse public key (optional if set via env)

49

secret_key: Langfuse secret key (optional if set via env)

50

host: Langfuse host URL

51

tracing_enabled: Enable/disable tracing

52

environment: Environment tag for traces

53

**kwargs: Additional configuration options

54

"""

55

```

56

57

**Features:**

58

- Automatic tracing of LangChain chains, agents, and tools

59

- Proper observation type mapping (chain → chain, tool → tool, etc.)

60

- Token usage and cost tracking for LLM calls

61

- Support for complex nested chains and agent workflows

62

- Integration with LangChain's callback system

63

64

## Usage Examples

65

66

### OpenAI Integration

67

68

```python

69

# Before (standard OpenAI)

70

# import openai

71

# client = openai.OpenAI(api_key="your-key")

72

73

# After (with Langfuse tracing)

74

from langfuse.openai import OpenAI

75

76

client = OpenAI(

77

api_key="your-openai-key"

78

# Langfuse configuration via environment variables:

79

# LANGFUSE_PUBLIC_KEY, LANGFUSE_SECRET_KEY, LANGFUSE_HOST

80

)

81

82

# All API calls automatically traced

83

response = client.chat.completions.create(

84

model="gpt-4",

85

messages=[

86

{"role": "system", "content": "You are a helpful assistant."},

87

{"role": "user", "content": "What is the capital of France?"}

88

],

89

temperature=0.7,

90

max_tokens=150

91

)

92

93

print(response.choices[0].message.content)

94

# Automatically creates:

95

# - Generation-type span with model="gpt-4"

96

# - Usage details (prompt_tokens, completion_tokens, total_tokens)

97

# - Model parameters (temperature=0.7, max_tokens=150)

98

# - Input (messages) and output (response)

99

```

100

101

### Async OpenAI Integration

102

103

```python

104

import asyncio

105

from langfuse.openai import AsyncOpenAI

106

107

async def main():

108

client = AsyncOpenAI(api_key="your-openai-key")

109

110

# Async calls automatically traced

111

response = await client.chat.completions.create(

112

model="gpt-3.5-turbo",

113

messages=[{"role": "user", "content": "Hello!"}]

114

)

115

116

print(response.choices[0].message.content)

117

118

asyncio.run(main())

119

```

120

121

### OpenAI Streaming with Tracing

122

123

```python

124

from langfuse.openai import OpenAI

125

126

client = OpenAI(api_key="your-openai-key")

127

128

# Streaming responses automatically traced

129

stream = client.chat.completions.create(

130

model="gpt-4",

131

messages=[{"role": "user", "content": "Tell me a story"}],

132

stream=True

133

)

134

135

full_response = ""

136

for chunk in stream:

137

if chunk.choices[0].delta.content is not None:

138

content = chunk.choices[0].delta.content

139

print(content, end="")

140

full_response += content

141

142

# Complete response automatically captured in trace

143

```

144

145

### Azure OpenAI Integration

146

147

```python

148

from langfuse.openai import AzureOpenAI

149

150

client = AzureOpenAI(

151

api_key="your-azure-key",

152

api_version="2023-12-01-preview",

153

azure_endpoint="https://your-resource.openai.azure.com/"

154

)

155

156

# Works exactly like OpenAI with automatic tracing

157

response = client.chat.completions.create(

158

model="gpt-4", # Your deployment name

159

messages=[{"role": "user", "content": "Hello Azure!"}]

160

)

161

```

162

163

### LangChain Integration

164

165

```python

166

from langchain.chains import LLMChain

167

from langchain.llms import OpenAI

168

from langchain.prompts import PromptTemplate

169

from langfuse.langchain import CallbackHandler

170

171

# Initialize Langfuse callback handler

172

langfuse_handler = CallbackHandler(

173

public_key="your-public-key",

174

secret_key="your-secret-key"

175

)

176

177

# Create LangChain components

178

llm = OpenAI(temperature=0.7)

179

prompt = PromptTemplate(

180

input_variables=["topic"],

181

template="Write a short poem about {topic}"

182

)

183

chain = LLMChain(llm=llm, prompt=prompt)

184

185

# Run with Langfuse tracing

186

result = chain.run(

187

topic="artificial intelligence",

188

callbacks=[langfuse_handler]

189

)

190

191

print(result)

192

# Automatically creates:

193

# - Chain-type span for the LLMChain

194

# - Generation-type span for the OpenAI LLM call

195

# - Proper parent-child relationships

196

# - Input/output capture at each level

197

```

198

199

### LangChain Agent Tracing

200

201

```python

202

from langchain.agents import initialize_agent, Tool

203

from langchain.agents import AgentType

204

from langchain.llms import OpenAI

205

from langfuse.langchain import CallbackHandler

206

207

# Define tools

208

def search_tool(query):

209

# Your search implementation

210

return f"Search results for: {query}"

211

212

def calculator_tool(expression):

213

# Your calculator implementation

214

return str(eval(expression))

215

216

tools = [

217

Tool(

218

name="Search",

219

func=search_tool,

220

description="Useful for searching information"

221

),

222

Tool(

223

name="Calculator",

224

func=calculator_tool,

225

description="Useful for mathematical calculations"

226

)

227

]

228

229

# Initialize agent with Langfuse callback

230

llm = OpenAI(temperature=0)

231

agent = initialize_agent(

232

tools,

233

llm,

234

agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,

235

verbose=True

236

)

237

238

langfuse_handler = CallbackHandler()

239

240

# Run agent with automatic tracing

241

response = agent.run(

242

"What is the square root of 144?",

243

callbacks=[langfuse_handler]

244

)

245

246

# Automatically creates:

247

# - Agent-type span for the overall agent execution

248

# - Tool-type spans for each tool call

249

# - Generation-type spans for LLM reasoning steps

250

# - Proper hierarchical trace structure

251

```

252

253

### Combining Multiple Integrations

254

255

```python

256

from langfuse import Langfuse

257

from langfuse.openai import OpenAI

258

from langfuse.langchain import CallbackHandler

259

260

# Initialize Langfuse client for manual tracing

261

langfuse = Langfuse()

262

263

# OpenAI client with automatic tracing

264

openai_client = OpenAI(api_key="your-key")

265

266

# LangChain callback handler

267

langfuse_handler = CallbackHandler()

268

269

@langfuse.observe(as_type="chain")

270

def complex_workflow(user_query):

271

"""Workflow combining manual tracing, OpenAI, and LangChain."""

272

273

# Step 1: Direct OpenAI call (automatically traced)

274

initial_response = openai_client.chat.completions.create(

275

model="gpt-4",

276

messages=[{"role": "user", "content": f"Analyze this query: {user_query}"}]

277

)

278

279

analysis = initial_response.choices[0].message.content

280

281

# Step 2: LangChain processing (automatically traced)

282

from langchain.chains import LLMChain

283

from langchain.prompts import PromptTemplate

284

from langchain.llms import OpenAI

285

286

llm = OpenAI()

287

prompt = PromptTemplate(

288

input_variables=["analysis", "query"],

289

template="Based on this analysis: {analysis}\n\nProvide a detailed response to: {query}"

290

)

291

chain = LLMChain(llm=llm, prompt=prompt)

292

293

detailed_response = chain.run(

294

analysis=analysis,

295

query=user_query,

296

callbacks=[langfuse_handler]

297

)

298

299

# Step 3: Manual span for custom processing

300

with langfuse.start_as_current_observation(name="post-processing", as_type="span") as span:

301

final_result = post_process_response(detailed_response)

302

span.update(output=final_result)

303

304

return final_result

305

306

# Usage creates comprehensive trace with all integration types

307

result = complex_workflow("Explain quantum computing")

308

```

309

310

### Custom Integration Configuration

311

312

```python

313

import os

314

from langfuse.openai import OpenAI

315

from langfuse.langchain import CallbackHandler

316

317

# Configure via environment variables

318

os.environ["LANGFUSE_PUBLIC_KEY"] = "your-public-key"

319

os.environ["LANGFUSE_SECRET_KEY"] = "your-secret-key"

320

os.environ["LANGFUSE_HOST"] = "https://cloud.langfuse.com"

321

322

# OpenAI client automatically picks up Langfuse config

323

openai_client = OpenAI(api_key="your-openai-key")

324

325

# LangChain handler also picks up config

326

langfuse_handler = CallbackHandler()

327

328

# Or configure explicitly

329

langfuse_handler = CallbackHandler(

330

public_key="explicit-key",

331

secret_key="explicit-secret",

332

host="https://your-langfuse-instance.com",

333

environment="production",

334

tracing_enabled=True

335

)

336

```

337

338

### Integration with Existing Observability

339

340

```python

341

from langfuse import Langfuse, observe

342

from langfuse.openai import OpenAI

343

344

# Custom wrapper for additional observability

345

class ObservabilityWrapper:

346

def __init__(self):

347

self.langfuse = Langfuse()

348

self.openai_client = OpenAI()

349

350

@observe(as_type="chain")

351

def traced_workflow(self, input_data):

352

"""Workflow with comprehensive tracing."""

353

354

# Custom metrics

355

start_time = time.time()

356

357

try:

358

# OpenAI call (automatically traced as generation)

359

response = self.openai_client.chat.completions.create(

360

model="gpt-4",

361

messages=[{"role": "user", "content": input_data}]

362

)

363

364

result = response.choices[0].message.content

365

366

# Add custom scoring

367

current_span = self.langfuse.get_current_observation()

368

if current_span:

369

execution_time = time.time() - start_time

370

current_span.score(

371

name="execution_time",

372

value=execution_time,

373

comment=f"Execution took {execution_time:.2f} seconds"

374

)

375

376

# Quality assessment

377

quality_score = assess_quality(result)

378

current_span.score(

379

name="quality",

380

value=quality_score,

381

comment="Automated quality assessment"

382

)

383

384

return result

385

386

except Exception as e:

387

# Error automatically captured by @observe decorator

388

raise

389

390

# Usage

391

wrapper = ObservabilityWrapper()

392

result = wrapper.traced_workflow("What is machine learning?")

393

```

394

395

### Migration from Existing Instrumentations

396

397

```python

398

# Before: Using standard libraries without tracing

399

"""

400

import openai

401

from langchain.chains import LLMChain

402

403

client = openai.OpenAI()

404

response = client.chat.completions.create(...)

405

"""

406

407

# After: Drop-in replacement with automatic tracing

408

from langfuse.openai import OpenAI # Just change the import

409

from langchain.chains import LLMChain

410

from langfuse.langchain import CallbackHandler

411

412

client = OpenAI() # Everything else stays the same

413

response = client.chat.completions.create(...)

414

415

# Add LangChain tracing with callback

416

langfuse_handler = CallbackHandler()

417

chain = LLMChain(...)

418

result = chain.run(..., callbacks=[langfuse_handler]) # Just add callbacks parameter

419

```

420

421

### Integration Best Practices

422

423

```python

424

# 1. Environment-based configuration

425

import os

426

427

class LangfuseConfig:

428

"""Centralized configuration management."""

429

430

@classmethod

431

def setup_environment(cls):

432

required_vars = [

433

"LANGFUSE_PUBLIC_KEY",

434

"LANGFUSE_SECRET_KEY"

435

]

436

437

for var in required_vars:

438

if not os.getenv(var):

439

raise ValueError(f"Missing required environment variable: {var}")

440

441

@classmethod

442

def get_callback_handler(cls):

443

cls.setup_environment()

444

return CallbackHandler(

445

environment=os.getenv("ENVIRONMENT", "development")

446

)

447

448

# 2. Conditional tracing for different environments

449

def get_openai_client():

450

if os.getenv("ENVIRONMENT") == "production":

451

from langfuse.openai import OpenAI

452

else:

453

import openai

454

OpenAI = openai.OpenAI

455

456

return OpenAI(api_key=os.getenv("OPENAI_API_KEY"))

457

458

# 3. Integration testing

459

def test_integrations():

460

"""Test that integrations work correctly."""

461

from langfuse.openai import OpenAI

462

from langfuse.langchain import CallbackHandler

463

464

# Test OpenAI integration

465

client = OpenAI(api_key="test-key")

466

assert hasattr(client, 'chat')

467

468

# Test LangChain integration

469

handler = CallbackHandler()

470

assert callable(handler)

471

472

print("All integrations working correctly")

473

474

test_integrations()

475

```