or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

client.mddatasets.mdexperiments.mdindex.mdprompts.mdsdk-integration.mdspans.md

sdk-integration.mddocs/

0

# SDK Format Conversion

1

2

Format conversion utilities for seamless integration with popular LLM provider SDKs including OpenAI, Anthropic, and Vercel AI SDK. Enables easy conversion of Phoenix prompts to SDK-specific formats.

3

4

## Capabilities

5

6

### Universal SDK Conversion

7

8

Convert Phoenix prompts to various SDK formats with variable substitution support.

9

10

```typescript { .api }

11

/**

12

* Convert a Phoenix prompt to a specific SDK format

13

* @param params - SDK conversion parameters including prompt, variables, and target SDK

14

* @returns SDK-specific format ready for use

15

*/

16

function toSDK<T extends SupportedSDK, V extends Variables = Variables>(

17

params: ToSDKParams<T, V> & SDKParams<T>

18

): ReturnType<SDKConverter<T>>;

19

20

type ToSDKParams<T extends SupportedSDK, V extends Variables = Variables> = {

21

sdk: T;

22

variables?: V;

23

};

24

25

type SDKParams<T extends SupportedSDK> = {

26

prompt: PromptVersion;

27

};

28

29

type SupportedSDK = "openai" | "anthropic" | "ai";

30

31

type Variables = Record<string, string | { toString: () => string }>;

32

```

33

34

**Usage Example:**

35

36

```typescript

37

import { toSDK } from "@arizeai/phoenix-client/prompts";

38

import { getPrompt } from "@arizeai/phoenix-client/prompts";

39

40

// Get a prompt from Phoenix

41

const prompt = await getPrompt({

42

prompt: { name: "customer-support", tag: "production" }

43

});

44

45

if (prompt) {

46

// Convert to OpenAI format

47

const openaiFormat = toSDK({

48

prompt,

49

variables: {

50

user_name: "Alice",

51

issue_type: "billing"

52

},

53

sdk: "openai"

54

});

55

56

// Use with OpenAI SDK

57

import OpenAI from "openai";

58

const openai = new OpenAI();

59

60

const response = await openai.chat.completions.create({

61

model: prompt.model_name,

62

...openaiFormat,

63

...prompt.invocation_parameters.openai

64

});

65

}

66

```

67

68

### OpenAI SDK Integration

69

70

Convert Phoenix prompts to OpenAI SDK format with full parameter support.

71

72

```typescript { .api }

73

/**

74

* Convert Phoenix prompt to OpenAI SDK format

75

* @param params - OpenAI conversion parameters

76

* @returns OpenAI-compatible chat completion format

77

*/

78

function toOpenAI(params: {

79

prompt: PromptVersion;

80

variables?: Variables;

81

}): OpenAIFormat;

82

83

interface OpenAIFormat {

84

messages: OpenAIMessage[];

85

}

86

87

interface OpenAIMessage {

88

role: "system" | "user" | "assistant" | "tool";

89

content: string;

90

name?: string;

91

tool_call_id?: string;

92

tool_calls?: any[];

93

}

94

```

95

96

**Usage Examples:**

97

98

```typescript

99

import { toOpenAI } from "@arizeai/phoenix-client/prompts";

100

import OpenAI from "openai";

101

102

const prompt = await getPrompt({ prompt: { name: "qa-assistant" } });

103

const openai = new OpenAI();

104

105

// Convert and use

106

const { messages } = toOpenAI({

107

prompt,

108

variables: { question: "What is machine learning?" }

109

});

110

111

const response = await openai.chat.completions.create({

112

model: prompt.model_name,

113

messages,

114

temperature: prompt.invocation_parameters.openai?.temperature || 0.7

115

});

116

117

console.log(response.choices[0].message.content);

118

```

119

120

### Anthropic SDK Integration

121

122

Convert Phoenix prompts to Anthropic SDK format with Claude-specific formatting.

123

124

```typescript { .api }

125

/**

126

* Convert Phoenix prompt to Anthropic SDK format

127

* @param params - Anthropic conversion parameters

128

* @returns Anthropic-compatible message format

129

*/

130

function toAnthropic(params: {

131

prompt: PromptVersion;

132

variables?: Variables;

133

}): AnthropicFormat;

134

135

interface AnthropicFormat {

136

messages: AnthropicMessage[];

137

system?: string;

138

}

139

140

interface AnthropicMessage {

141

role: "user" | "assistant";

142

content: string | AnthropicContentBlock[];

143

}

144

145

interface AnthropicContentBlock {

146

type: "text" | "image";

147

text?: string;

148

source?: {

149

type: "base64";

150

media_type: string;

151

data: string;

152

};

153

}

154

```

155

156

**Usage Examples:**

157

158

```typescript

159

import { toAnthropic } from "@arizeai/phoenix-client/prompts";

160

import Anthropic from "@anthropic-ai/sdk";

161

162

const prompt = await getPrompt({ prompt: { name: "writing-assistant" } });

163

const anthropic = new Anthropic();

164

165

// Convert and use

166

const { messages, system } = toAnthropic({

167

prompt,

168

variables: {

169

topic: "sustainable energy",

170

tone: "professional"

171

}

172

});

173

174

const response = await anthropic.messages.create({

175

model: prompt.model_name,

176

max_tokens: prompt.invocation_parameters.anthropic.max_tokens,

177

system,

178

messages,

179

temperature: prompt.invocation_parameters.anthropic.temperature || 0.7

180

});

181

182

console.log(response.content[0].text);

183

```

184

185

### Vercel AI SDK Integration

186

187

Convert Phoenix prompts to Vercel AI SDK format for streamlined AI application development.

188

189

```typescript { .api }

190

/**

191

* Convert Phoenix prompt to Vercel AI SDK format

192

* @param params - Vercel AI conversion parameters

193

* @returns Vercel AI-compatible message format

194

*/

195

function toAI(params: {

196

prompt: PromptVersion;

197

variables?: Variables;

198

}): VercelAIFormat;

199

200

interface VercelAIFormat {

201

messages: VercelAIMessage[];

202

}

203

204

interface VercelAIMessage {

205

role: "system" | "user" | "assistant" | "tool";

206

content: string;

207

name?: string;

208

toolInvocations?: any[];

209

}

210

```

211

212

**Usage Examples:**

213

214

```typescript

215

import { toAI } from "@arizeai/phoenix-client/prompts";

216

import { generateText } from "ai";

217

import { openai } from "@ai-sdk/openai";

218

219

const prompt = await getPrompt({ prompt: { name: "code-assistant" } });

220

221

// Convert and use with Vercel AI SDK

222

const { messages } = toAI({

223

prompt,

224

variables: {

225

programming_language: "TypeScript",

226

task_description: "Create a REST API endpoint"

227

}

228

});

229

230

const { text } = await generateText({

231

model: openai(prompt.model_name),

232

messages,

233

temperature: prompt.invocation_parameters.openai?.temperature || 0.7

234

});

235

236

console.log(text);

237

```

238

239

### Variable Substitution

240

241

All SDK conversion functions support Mustache-style variable substitution in prompt templates.

242

243

**Variable Syntax:**

244

245

- `{{variable_name}}` - Simple variable substitution

246

- `{{#if variable}}...{{/if}}` - Conditional blocks (where supported)

247

- `{{#each items}}...{{/each}}` - Iteration blocks (where supported)

248

249

**Variable Types:**

250

251

```typescript

252

interface Variables {

253

[key: string]: string | number | boolean | string[] | Record<string, any>;

254

}

255

```

256

257

**Usage Examples:**

258

259

```typescript

260

// Simple variable substitution

261

const result = toSDK({

262

prompt,

263

variables: {

264

user_name: "Alice",

265

temperature_value: 0.7,

266

is_premium_user: true

267

},

268

sdk: "openai"

269

});

270

271

// Complex variable substitution

272

const result = toSDK({

273

prompt,

274

variables: {

275

user_profile: {

276

name: "Alice",

277

preferences: ["tech", "science"],

278

subscription: "premium"

279

},

280

conversation_context: [

281

"Previous question about AI",

282

"User interested in machine learning"

283

]

284

},

285

sdk: "anthropic"

286

});

287

```

288

289

### SDK-Specific Parameters

290

291

Each SDK conversion maintains compatibility with provider-specific parameters and features.

292

293

**OpenAI Parameters:**

294

295

```typescript

296

// Phoenix prompt with OpenAI parameters

297

const prompt = await createPrompt({

298

name: "openai-optimized",

299

version: promptVersion({

300

modelProvider: "OPENAI",

301

modelName: "gpt-4o",

302

template: [...],

303

invocationParameters: {

304

temperature: 0.7,

305

max_tokens: 2000,

306

top_p: 0.9,

307

frequency_penalty: 0.1,

308

presence_penalty: 0.1,

309

response_format: { type: "json_object" },

310

tools: [...] // Function calling tools

311

}

312

})

313

});

314

315

const openaiFormat = toOpenAI({ prompt });

316

// Use all parameters when calling OpenAI

317

```

318

319

**Anthropic Parameters:**

320

321

```typescript

322

// Phoenix prompt with Anthropic parameters

323

const prompt = await createPrompt({

324

name: "anthropic-optimized",

325

version: promptVersion({

326

modelProvider: "ANTHROPIC",

327

modelName: "claude-3-5-sonnet-20241022",

328

template: [...],

329

invocationParameters: {

330

max_tokens: 4000, // Required

331

temperature: 0.5,

332

top_p: 0.9,

333

top_k: 40,

334

stop_sequences: ["</thinking>"]

335

}

336

})

337

});

338

339

const anthropicFormat = toAnthropic({ prompt });

340

// Parameters preserved for Anthropic API calls

341

```

342

343

### Integration Patterns

344

345

Common patterns for integrating Phoenix prompts with different SDKs.

346

347

**Dynamic Model Selection:**

348

349

```typescript

350

const prompt = await getPrompt({ prompt: { name: "multi-provider-prompt" } });

351

352

// Route to appropriate SDK based on model provider

353

switch (prompt.model_provider) {

354

case "OPENAI":

355

const openaiResult = toOpenAI({ prompt, variables });

356

return await callOpenAI(openaiResult);

357

358

case "ANTHROPIC":

359

const anthropicResult = toAnthropic({ prompt, variables });

360

return await callAnthropic(anthropicResult);

361

362

case "GOOGLE":

363

// Handle Google models

364

break;

365

}

366

```

367

368

**A/B Testing with Prompts:**

369

370

```typescript

371

const promptA = await getPrompt({ prompt: { name: "variant-a" } });

372

const promptB = await getPrompt({ prompt: { name: "variant-b" } });

373

374

const variant = Math.random() < 0.5 ? promptA : promptB;

375

const format = toSDK({ prompt: variant, variables, sdk: "openai" });

376

377

// Track which variant was used for analysis

378

await trackExperiment({

379

variant: variant.id,

380

format: format

381

});

382

```

383

384

**Prompt Caching:**

385

386

```typescript

387

// Cache converted formats for performance

388

const cache = new Map<string, any>();

389

390

function getCachedPrompt(promptId: string, variables: Variables, sdk: SupportedSDK) {

391

const key = `${promptId}-${JSON.stringify(variables)}-${sdk}`;

392

393

if (!cache.has(key)) {

394

const prompt = await getPrompt({ prompt: { promptId } });

395

const format = toSDK({ prompt, variables, sdk });

396

cache.set(key, format);

397

}

398

399

return cache.get(key);

400

}

401

```

402

403

### Best Practices

404

405

- **Variable Validation**: Validate that all required variables are provided

406

- **Format Testing**: Test converted formats with target SDKs in development

407

- **Parameter Compatibility**: Ensure model parameters are compatible with target providers

408

- **Error Handling**: Handle conversion errors gracefully

409

- **Performance**: Cache converted prompts when appropriate

410

- **Version Control**: Track prompt versions used in production

411

- **Provider Fallbacks**: Implement fallback logic for different providers