Main provider interface for accessing all model types in a unified way, serving as the entry point for AI provider implementations.
Core interface that all AI providers must implement to provide access to different model types.
/**
* Main provider interface for accessing AI models
*/
interface ProviderV2 {
/** Get a language model instance */
languageModel(modelId: string): LanguageModelV2;
/** Get a text embedding model instance */
textEmbeddingModel(modelId: string): EmbeddingModelV2<string>;
/** Get an image generation model instance */
imageModel(modelId: string): ImageModelV2;
/** Get a transcription model instance (optional) */
transcriptionModel?(modelId: string): TranscriptionModelV2;
/** Get a speech generation model instance (optional) */
speechModel?(modelId: string): SpeechModelV2;
}Usage Examples:
import { ProviderV2, LanguageModelV2, EmbeddingModelV2 } from "@ai-sdk/provider";
// Implement a custom provider
class OpenAIProvider implements ProviderV2 {
private apiKey: string;
private baseURL: string;
constructor(apiKey: string, baseURL = 'https://api.openai.com/v1') {
this.apiKey = apiKey;
this.baseURL = baseURL;
}
languageModel(modelId: string): LanguageModelV2 {
return new OpenAILanguageModel({
modelId,
apiKey: this.apiKey,
baseURL: this.baseURL
});
}
textEmbeddingModel(modelId: string): EmbeddingModelV2<string> {
return new OpenAIEmbeddingModel({
modelId,
apiKey: this.apiKey,
baseURL: this.baseURL
});
}
imageModel(modelId: string): ImageModelV2 {
return new OpenAIImageModel({
modelId,
apiKey: this.apiKey,
baseURL: this.baseURL
});
}
transcriptionModel(modelId: string): TranscriptionModelV2 {
return new OpenAITranscriptionModel({
modelId,
apiKey: this.apiKey,
baseURL: this.baseURL
});
}
speechModel(modelId: string): SpeechModelV2 {
return new OpenAISpeechModel({
modelId,
apiKey: this.apiKey,
baseURL: this.baseURL
});
}
}
// Use the provider to access different models
const provider = new OpenAIProvider(process.env.OPENAI_API_KEY!);
// Get language model
const chatModel = provider.languageModel('gpt-4');
const chatResult = await chatModel.doGenerate({
prompt: [
{ role: 'user', content: [{ type: 'text', text: 'Hello!' }] }
]
});
// Get embedding model
const embeddingModel = provider.textEmbeddingModel('text-embedding-ada-002');
const embeddings = await embeddingModel.doEmbed({
values: ['Hello world', 'AI is amazing']
});
// Get image model
const imageModel = provider.imageModel('dall-e-3');
const images = await imageModel.doGenerate({
prompt: 'A beautiful sunset over mountains',
n: 1,
providerOptions: {}
});
// Get transcription model (if supported)
if (provider.transcriptionModel) {
const transcriptionModel = provider.transcriptionModel('whisper-1');
const audioData = new Uint8Array(/* audio bytes */);
const transcription = await transcriptionModel.doGenerate({
audio: audioData,
mediaType: 'audio/mpeg'
});
}
// Get speech model (if supported)
if (provider.speechModel) {
const speechModel = provider.speechModel('tts-1');
const speech = await speechModel.doGenerate({
text: 'Hello, this is a test of text-to-speech!'
});
}
// Provider factory pattern
interface ProviderConfig {
type: 'openai' | 'anthropic' | 'google';
apiKey: string;
baseURL?: string;
}
function createProvider(config: ProviderConfig): ProviderV2 {
switch (config.type) {
case 'openai':
return new OpenAIProvider(config.apiKey, config.baseURL);
case 'anthropic':
return new AnthropicProvider(config.apiKey, config.baseURL);
case 'google':
return new GoogleProvider(config.apiKey, config.baseURL);
default:
throw new Error(`Unsupported provider type: ${config.type}`);
}
}
// Multi-provider setup
const providers = {
openai: createProvider({ type: 'openai', apiKey: process.env.OPENAI_API_KEY! }),
anthropic: createProvider({ type: 'anthropic', apiKey: process.env.ANTHROPIC_API_KEY! }),
google: createProvider({ type: 'google', apiKey: process.env.GOOGLE_API_KEY! })
};
// Use different providers for different tasks
const chatGPT = providers.openai.languageModel('gpt-4');
const claude = providers.anthropic.languageModel('claude-3-opus');
const openaiEmbeddings = providers.openai.textEmbeddingModel('text-embedding-ada-002');
// Provider capability checking
function hasTranscriptionSupport(provider: ProviderV2): boolean {
return typeof provider.transcriptionModel === 'function';
}
function hasSpeechSupport(provider: ProviderV2): boolean {
return typeof provider.speechModel === 'function';
}
// Dynamic model selection
interface ModelSelection {
provider: ProviderV2;
modelId: string;
type: 'language' | 'embedding' | 'image' | 'speech' | 'transcription';
}
function getModel(selection: ModelSelection) {
switch (selection.type) {
case 'language':
return selection.provider.languageModel(selection.modelId);
case 'embedding':
return selection.provider.textEmbeddingModel(selection.modelId);
case 'image':
return selection.provider.imageModel(selection.modelId);
case 'speech':
if (!selection.provider.speechModel) {
throw new Error('Speech models not supported by this provider');
}
return selection.provider.speechModel(selection.modelId);
case 'transcription':
if (!selection.provider.transcriptionModel) {
throw new Error('Transcription models not supported by this provider');
}
return selection.provider.transcriptionModel(selection.modelId);
default:
throw new Error(`Unknown model type: ${selection.type}`);
}
}
// Provider with fallback support
class FallbackProvider implements ProviderV2 {
constructor(
private primaryProvider: ProviderV2,
private fallbackProvider: ProviderV2
) {}
languageModel(modelId: string): LanguageModelV2 {
try {
return this.primaryProvider.languageModel(modelId);
} catch {
return this.fallbackProvider.languageModel(modelId);
}
}
textEmbeddingModel(modelId: string): EmbeddingModelV2<string> {
try {
return this.primaryProvider.textEmbeddingModel(modelId);
} catch {
return this.fallbackProvider.textEmbeddingModel(modelId);
}
}
imageModel(modelId: string): ImageModelV2 {
try {
return this.primaryProvider.imageModel(modelId);
} catch {
return this.fallbackProvider.imageModel(modelId);
}
}
transcriptionModel?(modelId: string): TranscriptionModelV2 {
if (this.primaryProvider.transcriptionModel) {
try {
return this.primaryProvider.transcriptionModel(modelId);
} catch {
// Fall through to fallback
}
}
if (this.fallbackProvider.transcriptionModel) {
return this.fallbackProvider.transcriptionModel(modelId);
}
throw new Error('No transcription model support available');
}
speechModel?(modelId: string): SpeechModelV2 {
if (this.primaryProvider.speechModel) {
try {
return this.primaryProvider.speechModel(modelId);
} catch {
// Fall through to fallback
}
}
if (this.fallbackProvider.speechModel) {
return this.fallbackProvider.speechModel(modelId);
}
throw new Error('No speech model support available');
}
}
const robustProvider = new FallbackProvider(
providers.openai,
providers.anthropic
);