- Spec files
npm-anthropic-ai--sdk
Describes: pkg:npm/@anthropic-ai/sdk@0.61.x
- Description
- The official TypeScript library for the Anthropic API providing comprehensive client functionality for Claude AI models.
- Author
- tessl
- Last updated
messages-api.md docs/
1# Messages API23The Messages API is the primary interface for conversational AI interactions with Claude models. It provides comprehensive support for multi-turn conversations, tool usage, and both streaming and non-streaming responses.45## Capabilities67### Create Message89Send a structured list of input messages with text and/or image content, and the model will generate the next message in the conversation.1011```typescript { .api }12/**13* Create a message with Claude models14* @param params - Message creation parameters15* @returns Promise resolving to Message response or Stream for streaming16*/17create(params: MessageCreateParamsNonStreaming): APIPromise<Message>;18create(params: MessageCreateParamsStreaming): APIPromise<Stream<RawMessageStreamEvent>>;1920interface MessageCreateParams {21/** The model to use for generation */22model: Model;23/** Array of input messages */24messages: MessageParam[];25/** Maximum number of tokens to generate */26max_tokens: number;27/** System prompt to set context and behavior */28system?: string;29/** Available tools for the model to use */30tools?: Tool[];31/** How the model should choose tools */32tool_choice?: ToolChoice;33/** Sampling temperature (0.0 to 1.0) */34temperature?: number;35/** Whether to stream the response */36stream?: boolean;37/** Message metadata */38metadata?: Metadata;39/** Stop sequences to halt generation */40stop_sequences?: string[];41/** Maximum number of tokens in response */42max_tokens?: number;43}4445interface MessageCreateParamsNonStreaming extends MessageCreateParams {46stream?: false;47}4849interface MessageCreateParamsStreaming extends MessageCreateParams {50stream: true;51}52```5354**Usage Examples:**5556```typescript57import Anthropic from "@anthropic-ai/sdk";5859const client = new Anthropic();6061// Basic message62const message = await client.messages.create({63model: "claude-3-sonnet-20240229",64max_tokens: 1024,65messages: [66{ role: "user", content: "What is the capital of France?" }67],68});6970// Multi-turn conversation71const conversation = await client.messages.create({72model: "claude-3-sonnet-20240229",73max_tokens: 1024,74messages: [75{ role: "user", content: "Hello, I'm learning about astronomy." },76{ role: "assistant", content: "That's wonderful! I'd be happy to help you learn about astronomy. What specific topics interest you?" },77{ role: "user", content: "Tell me about black holes." }78],79});8081// With system prompt82const guidedMessage = await client.messages.create({83model: "claude-3-sonnet-20240229",84max_tokens: 1024,85system: "You are a helpful astronomy teacher. Explain concepts clearly and encourage questions.",86messages: [87{ role: "user", content: "What causes the northern lights?" }88],89});90```9192### Stream Message9394Create a streaming message that returns events in real-time as the model generates the response.9596```typescript { .api }97/**98* Create a streaming message with real-time response events99* @param params - Message stream parameters100* @returns MessageStream instance for handling events101*/102stream(params: MessageStreamParams): MessageStream;103104interface MessageStreamParams {105model: Model;106messages: MessageParam[];107max_tokens: number;108system?: string;109tools?: Tool[];110tool_choice?: ToolChoice;111temperature?: number;112}113114class MessageStream {115/** Get the final message once streaming completes */116finalMessage(): Promise<Message>;117/** Iterate over streaming events */118[Symbol.asyncIterator](): AsyncIterableIterator<MessageStreamEvent>;119/** Handle specific event types */120on(event: "text", handler: (text: string) => void): this;121on(event: "message", handler: (message: Message) => void): this;122on(event: "error", handler: (error: Error) => void): this;123}124```125126**Usage Examples:**127128```typescript129// Streaming with event handling130const stream = client.messages.stream({131model: "claude-3-sonnet-20240229",132max_tokens: 1024,133messages: [{ role: "user", content: "Write a short story about a robot." }],134});135136stream.on("text", (text) => {137process.stdout.write(text);138});139140stream.on("message", (message) => {141console.log("Final message:", message);142});143144// Async iteration over events145for await (const event of stream) {146if (event.type === "content_block_delta") {147process.stdout.write(event.delta.text);148}149}150```151152### Count Tokens153154Count the number of tokens in a message without creating it, useful for managing token limits and costs.155156```typescript { .api }157/**158* Count tokens in a message without creating it159* @param params - Token counting parameters160* @returns Promise resolving to token count information161*/162countTokens(params: MessageCountTokensParams): APIPromise<MessageTokensCount>;163164interface MessageCountTokensParams {165model: Model;166messages: MessageParam[];167system?: string;168tools?: MessageCountTokensTool[];169}170171interface MessageTokensCount {172input_tokens: number;173cache_creation_input_tokens?: number;174cache_read_input_tokens?: number;175}176```177178**Usage Examples:**179180```typescript181const tokenCount = await client.messages.countTokens({182model: "claude-3-sonnet-20240229",183messages: [184{ role: "user", content: "Explain quantum computing in simple terms." }185],186});187188console.log(`This message would use ${tokenCount.input_tokens} input tokens`);189```190191### Message Batches192193Process multiple messages efficiently using the batching API.194195```typescript { .api }196class Batches extends APIResource {197create(params: BatchCreateParams): APIPromise<MessageBatch>;198retrieve(batchId: string): APIPromise<MessageBatch>;199list(params?: BatchListParams): PagePromise<MessageBatchesPage, MessageBatch>;200cancel(batchId: string): APIPromise<MessageBatch>;201delete(batchId: string): APIPromise<DeletedMessageBatch>;202}203204interface BatchCreateParams {205requests: MessageBatchRequest[];206}207208interface MessageBatchRequest {209custom_id: string;210params: MessageCreateParams;211}212```213214## Message Types215216```typescript { .api }217interface Message {218id: string;219type: "message";220role: "assistant";221content: ContentBlock[];222model: Model;223stop_reason: StopReason | null;224stop_sequence: string | null;225usage: Usage;226}227228interface MessageParam {229role: "user" | "assistant";230content: string | ContentBlockParam[];231}232233type ContentBlock = TextBlock | ToolUseBlock;234235interface TextBlock {236type: "text";237text: string;238}239240interface ToolUseBlock {241type: "tool_use";242id: string;243name: string;244input: Record<string, any>;245}246247type ContentBlockParam =248| TextBlockParam249| ImageBlockParam250| ToolUseBlockParam251| ToolResultBlockParam;252253interface TextBlockParam {254type: "text";255text: string;256cache_control?: CacheControlEphemeral;257}258259interface ImageBlockParam {260type: "image";261source: Base64ImageSource | URLImageSource;262cache_control?: CacheControlEphemeral;263}264265interface ToolUseBlockParam {266type: "tool_use";267id: string;268name: string;269input: Record<string, any>;270cache_control?: CacheControlEphemeral;271}272273interface ToolResultBlockParam {274type: "tool_result";275tool_use_id: string;276content?: string | ContentBlockParam[];277is_error?: boolean;278cache_control?: CacheControlEphemeral;279}280```281282## Tool Usage283284```typescript { .api }285interface Tool {286type: "function";287function: {288name: string;289description?: string;290parameters?: Record<string, any>;291};292}293294type ToolChoice = ToolChoiceAuto | ToolChoiceAny | ToolChoiceNone | ToolChoiceTool;295296interface ToolChoiceAuto {297type: "auto";298}299300interface ToolChoiceAny {301type: "any";302}303304interface ToolChoiceNone {305type: "none";306}307308interface ToolChoiceTool {309type: "tool";310name: string;311}312```313314## Streaming Events315316```typescript { .api }317type MessageStreamEvent =318| MessageStartEvent319| MessageDeltaEvent320| MessageStopEvent321| ContentBlockStartEvent322| ContentBlockDeltaEvent323| ContentBlockStopEvent;324325interface MessageStartEvent {326type: "message_start";327message: Message;328}329330interface MessageDeltaEvent {331type: "message_delta";332delta: {333stop_reason?: StopReason;334stop_sequence?: string;335};336usage: MessageDeltaUsage;337}338339interface MessageStopEvent {340type: "message_stop";341}342343interface ContentBlockStartEvent {344type: "content_block_start";345index: number;346content_block: ContentBlock;347}348349interface ContentBlockDeltaEvent {350type: "content_block_delta";351index: number;352delta: TextDelta | InputJSONDelta;353}354355interface ContentBlockStopEvent {356type: "content_block_stop";357index: number;358}359360interface TextDelta {361type: "text_delta";362text: string;363}364365interface InputJSONDelta {366type: "input_json_delta";367partial_json: string;368}369```370371## Image Support372373```typescript { .api }374interface Base64ImageSource {375type: "base64";376media_type: "image/jpeg" | "image/png" | "image/gif" | "image/webp";377data: string;378}379380interface URLImageSource {381type: "url";382url: string;383}384```385386**Usage Examples:**387388```typescript389// Send image with message390const imageMessage = await client.messages.create({391model: "claude-3-sonnet-20240229",392max_tokens: 1024,393messages: [394{395role: "user",396content: [397{ type: "text", text: "What do you see in this image?" },398{399type: "image",400source: {401type: "base64",402media_type: "image/jpeg",403data: "base64-encoded-image-data...",404},405},406],407},408],409});410```411412## Usage Information413414```typescript { .api }415interface Usage {416input_tokens: number;417output_tokens: number;418cache_creation_input_tokens?: number;419cache_read_input_tokens?: number;420}421422interface MessageDeltaUsage {423output_tokens: number;424}425```