Core interface for text generation models supporting streaming, tool calling, structured output generation, and comprehensive content handling including text, reasoning, files, and tool interactions.
Main type definition for language model implementations with support for both generation and streaming modes.
/**
* Core language model type providing text generation capabilities
*/
type LanguageModelV2 = {
/** API specification version */
specificationVersion: 'v2';
/** Provider identifier (e.g., 'openai', 'anthropic') */
provider: string;
/** Model identifier (e.g., 'gpt-4', 'claude-3') */
modelId: string;
/** Mapping of URL types to regex patterns for supported URLs */
supportedUrls: PromiseLike<Record<string, RegExp[]>> | Record<string, RegExp[]>;
/** Generate a single response */
doGenerate(options: LanguageModelV2CallOptions): PromiseLike<LanguageModelV2GenerateResult>;
/** Generate a streaming response */
doStream(options: LanguageModelV2CallOptions): PromiseLike<LanguageModelV2StreamResult>;
};Comprehensive options for configuring language model calls.
/**
* Configuration options for language model calls
*/
interface LanguageModelV2CallOptions {
/** The conversation prompt as an array of messages */
prompt: LanguageModelV2Prompt;
/** Maximum number of tokens to generate */
maxOutputTokens?: number;
/** Sampling temperature (0.0 to 2.0) */
temperature?: number;
/** Sequences that will stop generation */
stopSequences?: string[];
/** Top-p nucleus sampling parameter */
topP?: number;
/** Top-k sampling parameter */
topK?: number;
/** Presence penalty (-2.0 to 2.0) */
presencePenalty?: number;
/** Frequency penalty (-2.0 to 2.0) */
frequencyPenalty?: number;
/** Response format specification */
responseFormat?: { type: 'text' } | { type: 'json'; schema?: JSONSchema7; name?: string; description?: string };
/** Random seed for reproducible outputs */
seed?: number;
/** Available tools for function calling */
tools?: Array<LanguageModelV2FunctionTool | LanguageModelV2ProviderDefinedTool>;
/** Tool selection strategy */
toolChoice?: LanguageModelV2ToolChoice;
/** Include raw response chunks in stream */
includeRawChunks?: boolean;
/** Abort signal for cancellation */
abortSignal?: AbortSignal;
/** Custom HTTP headers */
headers?: Record<string, string | undefined>;
/** Provider-specific options */
providerOptions?: SharedV2ProviderOptions;
}Messages that form the conversation context.
/**
* Array of messages forming the conversation prompt
*/
type LanguageModelV2Prompt = Array<LanguageModelV2Message>;
/**
* Union of all possible message types
*/
type LanguageModelV2Message =
| LanguageModelV2SystemMessage
| LanguageModelV2UserMessage
| LanguageModelV2AssistantMessage
| LanguageModelV2ToolMessage;
/**
* System message providing instructions or context
*/
interface LanguageModelV2SystemMessage {
role: 'system';
content: string;
}
/**
* User message with mixed content types
*/
interface LanguageModelV2UserMessage {
role: 'user';
content: Array<LanguageModelV2TextPart | LanguageModelV2FilePart>;
}
/**
* Assistant message with rich content support
*/
interface LanguageModelV2AssistantMessage {
role: 'assistant';
content: Array<
| LanguageModelV2TextPart
| LanguageModelV2FilePart
| LanguageModelV2ReasoningPart
| LanguageModelV2ToolCallPart
| LanguageModelV2ToolResultPart
>;
}
/**
* Tool message containing tool execution results
*/
interface LanguageModelV2ToolMessage {
role: 'tool';
content: Array<LanguageModelV2ToolResultPart>;
}Rich content types supporting text, files, reasoning, tools, and sources.
/**
* Union of all content types
*/
type LanguageModelV2Content =
| LanguageModelV2Text
| LanguageModelV2Reasoning
| LanguageModelV2File
| LanguageModelV2Source
| LanguageModelV2ToolCall
| LanguageModelV2ToolResult;
/**
* Plain text content with optional metadata
*/
interface LanguageModelV2Text {
type: 'text';
text: string;
providerMetadata?: SharedV2ProviderMetadata;
}
/**
* Reasoning content (e.g., chain-of-thought)
*/
interface LanguageModelV2Reasoning {
type: 'reasoning';
text: string;
providerMetadata?: SharedV2ProviderMetadata;
}
/**
* File content with media type and data
*/
interface LanguageModelV2File {
type: 'file';
mediaType: string;
data: string | Uint8Array;
}
/**
* Source reference (URL or document)
*/
interface LanguageModelV2Source {
type: 'source';
source:
| { type: 'url'; url: string }
| { type: 'document'; id: string; description?: string };
}
/**
* Tool call with execution details
*/
interface LanguageModelV2ToolCall {
type: 'tool-call';
toolCallId: string;
toolName: string;
args: Record<string, JSONValue>;
providerMetadata?: SharedV2ProviderMetadata;
}
/**
* Tool execution result
*/
interface LanguageModelV2ToolResult {
type: 'tool-result';
toolCallId: string;
toolName: string;
result: JSONValue;
isError?: boolean;
providerMetadata?: SharedV2ProviderMetadata;
}Function calling and provider-defined tool support.
/**
* Function tool definition with JSON schema
*/
interface LanguageModelV2FunctionTool {
type: 'function';
name: string;
description?: string;
inputSchema: JSONSchema7;
providerOptions?: SharedV2ProviderOptions;
}
/**
* Provider-specific tool definition
*/
interface LanguageModelV2ProviderDefinedTool {
type: 'provider-defined';
id: `${string}.${string}`;
name: string;
args: Record<string, unknown>;
}
/**
* Tool selection strategy
*/
type LanguageModelV2ToolChoice =
| { type: 'auto' }
| { type: 'none' }
| { type: 'required' }
| { type: 'tool'; toolName: string };Response structures for generation and streaming modes.
/**
* Result from doGenerate method
*/
interface LanguageModelV2GenerateResult {
/** Generated content array */
content: LanguageModelV2Content[];
/** Reason generation finished */
finishReason: LanguageModelV2FinishReason;
/** Token usage information */
usage: LanguageModelV2Usage;
/** Provider-specific metadata */
providerMetadata?: SharedV2ProviderMetadata;
/** Request details */
request?: { body?: unknown };
/** Response details */
response?: LanguageModelV2ResponseMetadata & { headers?: SharedV2Headers; body?: unknown };
/** Warnings from the call */
warnings: LanguageModelV2CallWarning[];
}
/**
* Result from doStream method
*/
interface LanguageModelV2StreamResult {
/** Stream of response parts */
stream: ReadableStream<LanguageModelV2StreamPart>;
/** Request details */
request?: { body?: unknown };
/** Response details */
response?: { headers?: SharedV2Headers };
}
/**
* Reason why generation finished
*/
type LanguageModelV2FinishReason =
| 'stop'
| 'length'
| 'content-filter'
| 'tool-calls'
| 'error'
| 'other'
| 'unknown';
/**
* Token usage statistics
*/
interface LanguageModelV2Usage {
/** Input tokens consumed */
inputTokens: number | undefined;
/** Output tokens generated */
outputTokens: number | undefined;
/** Total tokens (input + output) */
totalTokens: number | undefined;
/** Reasoning tokens (if supported) */
reasoningTokens?: number | undefined;
/** Cached input tokens */
cachedInputTokens?: number | undefined;
}Comprehensive streaming types for real-time response processing.
/**
* Union of all streaming part types
*/
type LanguageModelV2StreamPart =
// Metadata
| { type: 'stream-start'; timestamp: number }
| { type: 'response-metadata'; id?: string; model?: string; timestamp: number }
| { type: 'finish'; finishReason: LanguageModelV2FinishReason; usage: LanguageModelV2Usage; providerMetadata?: SharedV2ProviderMetadata; timestamp: number }
| { type: 'error'; error: unknown; timestamp: number }
| { type: 'raw'; content: string }
// Text streaming
| { type: 'text-start'; timestamp: number }
| { type: 'text-delta'; textDelta: string; timestamp: number }
| { type: 'text-end'; timestamp: number }
// Reasoning streaming
| { type: 'reasoning-start'; timestamp: number }
| { type: 'reasoning-delta'; reasoningDelta: string; timestamp: number }
| { type: 'reasoning-end'; timestamp: number }
// Tool streaming
| { type: 'tool-input-start'; toolCallId: string; toolName: string; timestamp: number }
| { type: 'tool-input-delta'; toolCallId: string; toolName: string; argsTextDelta: string; timestamp: number }
| { type: 'tool-input-end'; toolCallId: string; toolName: string; args: Record<string, JSONValue>; timestamp: number };Additional information about the model response.
/**
* Response metadata interface
*/
interface LanguageModelV2ResponseMetadata {
id?: string;
model?: string;
timestamp: number;
}Warnings that can be returned from language model calls.
/**
* Warning types for language model calls
*/
type LanguageModelV2CallWarning =
| { type: 'unsupported-setting'; setting: keyof LanguageModelV2CallOptions; details?: string }
| { type: 'other'; message: string };Usage Examples:
import { LanguageModelV2, LanguageModelV2CallOptions } from "@ai-sdk/provider";
// Basic text generation
const result = await model.doGenerate({
prompt: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }
],
maxOutputTokens: 100,
temperature: 0.7
});
console.log(result.content[0].text);
console.log('Tokens used:', result.usage.totalTokens);
// Streaming generation
const streamResult = await model.doStream({
prompt: [
{ role: 'user', content: [{ type: 'text', text: 'Write a story' }] }
]
});
const reader = streamResult.stream.getReader();
while (true) {
const { done, value } = await reader.read();
if (done) break;
if (value.type === 'text-delta') {
process.stdout.write(value.textDelta);
}
}
// Function calling
const toolResult = await model.doGenerate({
prompt: [
{ role: 'user', content: [{ type: 'text', text: 'What is the weather like?' }] }
],
tools: [
{
type: 'function',
name: 'get_weather',
description: 'Get current weather for a location',
inputSchema: {
type: 'object',
properties: {
location: { type: 'string', description: 'City name' }
},
required: ['location'],
additionalProperties: false
}
}
],
toolChoice: { type: 'auto' }
});