A TypeScript client for the Phoenix API providing AI observability, prompt management, datasets, experiments, and tracing capabilities.
Tracing data access and annotation system for observability, debugging, and evaluation of AI applications with comprehensive span management and annotation capabilities.
Query and retrieve spans with flexible filtering options including time bounds and pagination.
/**
* Get spans from a project with filtering criteria
* @param params - Span retrieval parameters
* @returns Promise resolving to spans and pagination info
*/
function getSpans(params: {
client?: PhoenixClient;
project: ProjectSelector;
startTime?: Date | string | null;
endTime?: Date | string | null;
cursor?: string | null;
limit?: number;
}): Promise<GetSpansResult>;
interface ProjectSelector {
projectId?: string;
projectName?: string;
}
interface GetSpansResult {
spans: Span[];
nextCursor: string | null;
}
interface Span {
context: SpanContext;
name: string;
span_kind: SpanKind;
parent_id?: string | null;
start_time: string;
end_time?: string | null;
status_code: SpanStatusCode;
status_message?: string | null;
attributes: Record<string, any>;
events: SpanEvent[];
cumulative_error_count: number;
cumulative_llm_token_count_prompt: number;
cumulative_llm_token_count_completion: number;
}
interface SpanContext {
trace_id: string;
span_id: string;
}
type SpanKind = "INTERNAL" | "SERVER" | "CLIENT" | "PRODUCER" | "CONSUMER" | "CHAIN" | "TOOL" | "LLM" | "RETRIEVER" | "EMBEDDING";
type SpanStatusCode = "UNSET" | "OK" | "ERROR";
interface SpanEvent {
name: string;
timestamp: string;
attributes: Record<string, any>;
}Usage Examples:
import { getSpans } from "@arizeai/phoenix-client/spans";
// Get recent spans
const result = await getSpans({
project: { projectName: "my-ai-app" },
limit: 100
});
// Get spans with time filtering
const result = await getSpans({
project: { projectId: "proj_123" },
startTime: new Date("2024-01-01T00:00:00Z"),
endTime: new Date("2024-01-31T23:59:59Z"),
limit: 50
});
// Paginate through spans
let cursor = null;
do {
const result = await getSpans({
project: { projectName: "my-ai-app" },
cursor,
limit: 100
});
for (const span of result.spans) {
console.log(`Span: ${span.name}, Status: ${span.status_code}`);
}
cursor = result.nextCursor;
} while (cursor);Add individual annotations to spans for evaluation, debugging, or quality assessment.
/**
* Add an annotation to a specific span
* @param params - Span annotation parameters
* @returns Promise resolving when annotation is added
*/
function addSpanAnnotation(params: {
client?: PhoenixClient;
spanId: string;
name: string;
label?: string | null;
score?: number | null;
explanation?: string | null;
annotatorKind?: AnnotatorKind;
metadata?: Record<string, unknown>;
}): Promise<void>;
type AnnotatorKind = "HUMAN" | "LLM" | "HEURISTIC";Usage Example:
import { addSpanAnnotation } from "@arizeai/phoenix-client/spans";
// Add a quality score annotation
await addSpanAnnotation({
spanId: "span_123456",
name: "response_quality",
score: 4.2,
label: "good",
explanation: "Response was accurate and helpful but could be more concise",
annotatorKind: "HUMAN",
metadata: {
reviewer: "alice@company.com",
review_date: new Date().toISOString()
}
});
// Add a thumbs up/down annotation
await addSpanAnnotation({
spanId: "span_789012",
name: "user_feedback",
label: "thumbs_up",
annotatorKind: "HUMAN"
});Log multiple span annotations efficiently in a single operation.
/**
* Log multiple span annotations in batch
* @param params - Batch annotation parameters
* @returns Promise resolving when all annotations are added
*/
function logSpanAnnotations(params: {
client?: PhoenixClient;
annotations: SpanAnnotation[];
}): Promise<void>;
interface SpanAnnotation {
spanId: string;
name: string;
label?: string | null;
score?: number | null;
explanation?: string | null;
annotatorKind?: AnnotatorKind;
metadata?: Record<string, unknown>;
}Usage Example:
import { logSpanAnnotations } from "@arizeai/phoenix-client/spans";
const annotations: SpanAnnotation[] = [
{
spanId: "span_123",
name: "accuracy",
score: 0.85,
annotatorKind: "LLM",
metadata: { model: "gpt-4o" }
},
{
spanId: "span_456",
name: "helpfulness",
label: "helpful",
score: 4.0,
annotatorKind: "HUMAN"
},
{
spanId: "span_789",
name: "toxicity",
score: 0.05,
label: "safe",
annotatorKind: "HEURISTIC"
}
];
await logSpanAnnotations({ annotations });Add annotations to document spans for retrieval and document-level evaluation.
/**
* Add an annotation to a document span
* @param params - Document annotation parameters
* @returns Promise resolving when annotation is added
*/
function addDocumentAnnotation(params: {
client?: PhoenixClient;
documentAnnotation: DocumentAnnotation;
sync?: boolean;
}): Promise<string | null>;
/**
* Log multiple document annotations in batch
* @param params - Batch document annotation parameters
* @returns Promise resolving when all annotations are added
*/
function logDocumentAnnotations(params: {
client?: PhoenixClient;
annotations: DocumentAnnotation[];
}): Promise<void>;
interface DocumentAnnotation {
spanId: string;
documentPosition: number;
name: string;
label?: string | null;
score?: number | null;
explanation?: string | null;
annotatorKind?: AnnotatorKind;
metadata?: Record<string, unknown>;
}Usage Examples:
import { addDocumentAnnotation, logDocumentAnnotations } from "@arizeai/phoenix-client/spans";
// Annotate relevance of a retrieved document
await addDocumentAnnotation({
spanId: "retrieval_span_123",
documentPosition: 0, // First document in results
name: "relevance",
score: 0.92,
label: "relevant",
explanation: "Document contains key information to answer the query",
annotatorKind: "LLM"
});
// Batch document annotations
const documentAnnotations: DocumentAnnotation[] = [
{
spanId: "retrieval_span_456",
documentPosition: 0,
name: "relevance",
score: 0.88,
annotatorKind: "LLM"
},
{
spanId: "retrieval_span_456",
documentPosition: 1,
name: "relevance",
score: 0.65,
annotatorKind: "LLM"
}
];
await logDocumentAnnotations({ annotations: documentAnnotations });Retrieve annotations associated with spans for analysis and evaluation.
/**
* Get annotations for specific spans
* @param params - Span annotation retrieval parameters
* @returns Promise resolving to span annotations
*/
function getSpanAnnotations(params: {
client?: PhoenixClient;
spanIds: string[];
annotationNames?: string[];
}): Promise<SpanAnnotationResult[]>;
interface SpanAnnotationResult {
spanId: string;
annotations: AnnotationData[];
}
interface AnnotationData {
name: string;
label?: string | null;
score?: number | null;
explanation?: string | null;
annotatorKind: AnnotatorKind;
metadata?: Record<string, unknown>;
createdAt: Date;
}Usage Example:
import { getSpanAnnotations } from "@arizeai/phoenix-client/spans";
const results = await getSpanAnnotations({
spanIds: ["span_123", "span_456", "span_789"],
annotationNames: ["quality", "relevance", "accuracy"]
});
for (const result of results) {
console.log(`Span ${result.spanId}:`);
for (const annotation of result.annotations) {
console.log(` ${annotation.name}: ${annotation.score} (${annotation.label})`);
}
}Delete spans from the system for data management and cleanup.
/**
* Delete a specific span
* @param params - Span deletion parameters
* @returns Promise resolving when span is deleted
*/
function deleteSpan(params: {
client?: PhoenixClient;
spanId: string;
}): Promise<void>;Usage Example:
import { deleteSpan } from "@arizeai/phoenix-client/spans";
// Delete a span (use carefully!)
await deleteSpan({
spanId: "span_to_delete_123"
});Common patterns and use cases for span annotations.
Quality Assessment:
// Human quality review
await addSpanAnnotation({
spanId: "llm_response_span",
name: "response_quality",
score: 4.2,
label: "good",
explanation: "Accurate and well-structured response",
annotatorKind: "HUMAN"
});Automated Evaluation:
// LLM-as-judge evaluation
await addSpanAnnotation({
spanId: "generation_span",
name: "faithfulness",
score: 0.87,
explanation: "Response is mostly faithful to the source material",
annotatorKind: "LLM",
metadata: { judge_model: "gpt-4o", prompt_version: "v2.1" }
});User Feedback:
// Direct user feedback
await addSpanAnnotation({
spanId: "chat_response_span",
name: "user_satisfaction",
label: "satisfied",
score: 5.0,
annotatorKind: "HUMAN",
metadata: { user_id: "user_123", feedback_type: "explicit" }
});Retrieval Evaluation:
// Document relevance for RAG systems
await addDocumentAnnotation({
spanId: "retrieval_span",
documentPosition: 0,
name: "relevance",
score: 0.95,
label: "highly_relevant",
annotatorKind: "LLM"
});Toxicity Detection:
// Automated safety evaluation
await addSpanAnnotation({
spanId: "user_input_span",
name: "toxicity",
score: 0.02,
label: "safe",
annotatorKind: "HEURISTIC",
metadata: { detector: "perspective_api", threshold: 0.7 }
});Install with Tessl CLI
npx tessl i tessl/npm-arizeai--phoenix-client