or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

client.mddatasets.mdexperiments.mdindex.mdprompts.mdsdk-integration.mdspans.md

spans.mddocs/

0

# Span Tracing and Annotation

1

2

Tracing data access and annotation system for observability, debugging, and evaluation of AI applications with comprehensive span management and annotation capabilities.

3

4

## Capabilities

5

6

### Span Retrieval

7

8

Query and retrieve spans with flexible filtering options including time bounds and pagination.

9

10

```typescript { .api }

11

/**

12

* Get spans from a project with filtering criteria

13

* @param params - Span retrieval parameters

14

* @returns Promise resolving to spans and pagination info

15

*/

16

function getSpans(params: {

17

client?: PhoenixClient;

18

project: ProjectSelector;

19

startTime?: Date | string | null;

20

endTime?: Date | string | null;

21

cursor?: string | null;

22

limit?: number;

23

}): Promise<GetSpansResult>;

24

25

interface ProjectSelector {

26

projectId?: string;

27

projectName?: string;

28

}

29

30

interface GetSpansResult {

31

spans: Span[];

32

nextCursor: string | null;

33

}

34

35

interface Span {

36

context: SpanContext;

37

name: string;

38

span_kind: SpanKind;

39

parent_id?: string | null;

40

start_time: string;

41

end_time?: string | null;

42

status_code: SpanStatusCode;

43

status_message?: string | null;

44

attributes: Record<string, any>;

45

events: SpanEvent[];

46

cumulative_error_count: number;

47

cumulative_llm_token_count_prompt: number;

48

cumulative_llm_token_count_completion: number;

49

}

50

51

interface SpanContext {

52

trace_id: string;

53

span_id: string;

54

}

55

56

type SpanKind = "INTERNAL" | "SERVER" | "CLIENT" | "PRODUCER" | "CONSUMER" | "CHAIN" | "TOOL" | "LLM" | "RETRIEVER" | "EMBEDDING";

57

58

type SpanStatusCode = "UNSET" | "OK" | "ERROR";

59

60

interface SpanEvent {

61

name: string;

62

timestamp: string;

63

attributes: Record<string, any>;

64

}

65

```

66

67

**Usage Examples:**

68

69

```typescript

70

import { getSpans } from "@arizeai/phoenix-client/spans";

71

72

// Get recent spans

73

const result = await getSpans({

74

project: { projectName: "my-ai-app" },

75

limit: 100

76

});

77

78

// Get spans with time filtering

79

const result = await getSpans({

80

project: { projectId: "proj_123" },

81

startTime: new Date("2024-01-01T00:00:00Z"),

82

endTime: new Date("2024-01-31T23:59:59Z"),

83

limit: 50

84

});

85

86

// Paginate through spans

87

let cursor = null;

88

do {

89

const result = await getSpans({

90

project: { projectName: "my-ai-app" },

91

cursor,

92

limit: 100

93

});

94

95

for (const span of result.spans) {

96

console.log(`Span: ${span.name}, Status: ${span.status_code}`);

97

}

98

99

cursor = result.nextCursor;

100

} while (cursor);

101

```

102

103

### Span Annotation

104

105

Add individual annotations to spans for evaluation, debugging, or quality assessment.

106

107

```typescript { .api }

108

/**

109

* Add an annotation to a specific span

110

* @param params - Span annotation parameters

111

* @returns Promise resolving when annotation is added

112

*/

113

function addSpanAnnotation(params: {

114

client?: PhoenixClient;

115

spanId: string;

116

name: string;

117

label?: string | null;

118

score?: number | null;

119

explanation?: string | null;

120

annotatorKind?: AnnotatorKind;

121

metadata?: Record<string, unknown>;

122

}): Promise<void>;

123

124

type AnnotatorKind = "HUMAN" | "LLM" | "HEURISTIC";

125

```

126

127

**Usage Example:**

128

129

```typescript

130

import { addSpanAnnotation } from "@arizeai/phoenix-client/spans";

131

132

// Add a quality score annotation

133

await addSpanAnnotation({

134

spanId: "span_123456",

135

name: "response_quality",

136

score: 4.2,

137

label: "good",

138

explanation: "Response was accurate and helpful but could be more concise",

139

annotatorKind: "HUMAN",

140

metadata: {

141

reviewer: "alice@company.com",

142

review_date: new Date().toISOString()

143

}

144

});

145

146

// Add a thumbs up/down annotation

147

await addSpanAnnotation({

148

spanId: "span_789012",

149

name: "user_feedback",

150

label: "thumbs_up",

151

annotatorKind: "HUMAN"

152

});

153

```

154

155

### Batch Span Annotation

156

157

Log multiple span annotations efficiently in a single operation.

158

159

```typescript { .api }

160

/**

161

* Log multiple span annotations in batch

162

* @param params - Batch annotation parameters

163

* @returns Promise resolving when all annotations are added

164

*/

165

function logSpanAnnotations(params: {

166

client?: PhoenixClient;

167

annotations: SpanAnnotation[];

168

}): Promise<void>;

169

170

interface SpanAnnotation {

171

spanId: string;

172

name: string;

173

label?: string | null;

174

score?: number | null;

175

explanation?: string | null;

176

annotatorKind?: AnnotatorKind;

177

metadata?: Record<string, unknown>;

178

}

179

```

180

181

**Usage Example:**

182

183

```typescript

184

import { logSpanAnnotations } from "@arizeai/phoenix-client/spans";

185

186

const annotations: SpanAnnotation[] = [

187

{

188

spanId: "span_123",

189

name: "accuracy",

190

score: 0.85,

191

annotatorKind: "LLM",

192

metadata: { model: "gpt-4o" }

193

},

194

{

195

spanId: "span_456",

196

name: "helpfulness",

197

label: "helpful",

198

score: 4.0,

199

annotatorKind: "HUMAN"

200

},

201

{

202

spanId: "span_789",

203

name: "toxicity",

204

score: 0.05,

205

label: "safe",

206

annotatorKind: "HEURISTIC"

207

}

208

];

209

210

await logSpanAnnotations({ annotations });

211

```

212

213

### Document Annotation

214

215

Add annotations to document spans for retrieval and document-level evaluation.

216

217

```typescript { .api }

218

/**

219

* Add an annotation to a document span

220

* @param params - Document annotation parameters

221

* @returns Promise resolving when annotation is added

222

*/

223

function addDocumentAnnotation(params: {

224

client?: PhoenixClient;

225

documentAnnotation: DocumentAnnotation;

226

sync?: boolean;

227

}): Promise<string | null>;

228

229

/**

230

* Log multiple document annotations in batch

231

* @param params - Batch document annotation parameters

232

* @returns Promise resolving when all annotations are added

233

*/

234

function logDocumentAnnotations(params: {

235

client?: PhoenixClient;

236

annotations: DocumentAnnotation[];

237

}): Promise<void>;

238

239

interface DocumentAnnotation {

240

spanId: string;

241

documentPosition: number;

242

name: string;

243

label?: string | null;

244

score?: number | null;

245

explanation?: string | null;

246

annotatorKind?: AnnotatorKind;

247

metadata?: Record<string, unknown>;

248

}

249

```

250

251

**Usage Examples:**

252

253

```typescript

254

import { addDocumentAnnotation, logDocumentAnnotations } from "@arizeai/phoenix-client/spans";

255

256

// Annotate relevance of a retrieved document

257

await addDocumentAnnotation({

258

spanId: "retrieval_span_123",

259

documentPosition: 0, // First document in results

260

name: "relevance",

261

score: 0.92,

262

label: "relevant",

263

explanation: "Document contains key information to answer the query",

264

annotatorKind: "LLM"

265

});

266

267

// Batch document annotations

268

const documentAnnotations: DocumentAnnotation[] = [

269

{

270

spanId: "retrieval_span_456",

271

documentPosition: 0,

272

name: "relevance",

273

score: 0.88,

274

annotatorKind: "LLM"

275

},

276

{

277

spanId: "retrieval_span_456",

278

documentPosition: 1,

279

name: "relevance",

280

score: 0.65,

281

annotatorKind: "LLM"

282

}

283

];

284

285

await logDocumentAnnotations({ annotations: documentAnnotations });

286

```

287

288

### Span Annotation Retrieval

289

290

Retrieve annotations associated with spans for analysis and evaluation.

291

292

```typescript { .api }

293

/**

294

* Get annotations for specific spans

295

* @param params - Span annotation retrieval parameters

296

* @returns Promise resolving to span annotations

297

*/

298

function getSpanAnnotations(params: {

299

client?: PhoenixClient;

300

spanIds: string[];

301

annotationNames?: string[];

302

}): Promise<SpanAnnotationResult[]>;

303

304

interface SpanAnnotationResult {

305

spanId: string;

306

annotations: AnnotationData[];

307

}

308

309

interface AnnotationData {

310

name: string;

311

label?: string | null;

312

score?: number | null;

313

explanation?: string | null;

314

annotatorKind: AnnotatorKind;

315

metadata?: Record<string, unknown>;

316

createdAt: Date;

317

}

318

```

319

320

**Usage Example:**

321

322

```typescript

323

import { getSpanAnnotations } from "@arizeai/phoenix-client/spans";

324

325

const results = await getSpanAnnotations({

326

spanIds: ["span_123", "span_456", "span_789"],

327

annotationNames: ["quality", "relevance", "accuracy"]

328

});

329

330

for (const result of results) {

331

console.log(`Span ${result.spanId}:`);

332

for (const annotation of result.annotations) {

333

console.log(` ${annotation.name}: ${annotation.score} (${annotation.label})`);

334

}

335

}

336

```

337

338

### Span Deletion

339

340

Delete spans from the system for data management and cleanup.

341

342

```typescript { .api }

343

/**

344

* Delete a specific span

345

* @param params - Span deletion parameters

346

* @returns Promise resolving when span is deleted

347

*/

348

function deleteSpan(params: {

349

client?: PhoenixClient;

350

spanId: string;

351

}): Promise<void>;

352

```

353

354

**Usage Example:**

355

356

```typescript

357

import { deleteSpan } from "@arizeai/phoenix-client/spans";

358

359

// Delete a span (use carefully!)

360

await deleteSpan({

361

spanId: "span_to_delete_123"

362

});

363

```

364

365

### Annotation Use Cases

366

367

Common patterns and use cases for span annotations.

368

369

**Quality Assessment:**

370

371

```typescript

372

// Human quality review

373

await addSpanAnnotation({

374

spanId: "llm_response_span",

375

name: "response_quality",

376

score: 4.2,

377

label: "good",

378

explanation: "Accurate and well-structured response",

379

annotatorKind: "HUMAN"

380

});

381

```

382

383

**Automated Evaluation:**

384

385

```typescript

386

// LLM-as-judge evaluation

387

await addSpanAnnotation({

388

spanId: "generation_span",

389

name: "faithfulness",

390

score: 0.87,

391

explanation: "Response is mostly faithful to the source material",

392

annotatorKind: "LLM",

393

metadata: { judge_model: "gpt-4o", prompt_version: "v2.1" }

394

});

395

```

396

397

**User Feedback:**

398

399

```typescript

400

// Direct user feedback

401

await addSpanAnnotation({

402

spanId: "chat_response_span",

403

name: "user_satisfaction",

404

label: "satisfied",

405

score: 5.0,

406

annotatorKind: "HUMAN",

407

metadata: { user_id: "user_123", feedback_type: "explicit" }

408

});

409

```

410

411

**Retrieval Evaluation:**

412

413

```typescript

414

// Document relevance for RAG systems

415

await addDocumentAnnotation({

416

spanId: "retrieval_span",

417

documentPosition: 0,

418

name: "relevance",

419

score: 0.95,

420

label: "highly_relevant",

421

annotatorKind: "LLM"

422

});

423

```

424

425

**Toxicity Detection:**

426

427

```typescript

428

// Automated safety evaluation

429

await addSpanAnnotation({

430

spanId: "user_input_span",

431

name: "toxicity",

432

score: 0.02,

433

label: "safe",

434

annotatorKind: "HEURISTIC",

435

metadata: { detector: "perspective_api", threshold: 0.7 }

436

});

437

```

438

439

### Best Practices

440

441

- **Consistent Naming**: Use consistent annotation names across your application

442

- **Meaningful Scores**: Use scales that make sense for your evaluation criteria

443

- **Detailed Explanations**: Provide explanations for human reviewers and debugging

444

- **Metadata**: Include relevant context information in metadata

445

- **Batch Operations**: Use batch annotation functions for efficiency

446

- **Annotation Strategy**: Plan your annotation strategy based on evaluation needs

447

- **Data Governance**: Consider data retention and privacy policies for annotations