Text generation and chat functionality using Google's Gemini models with support for streaming, structured outputs, safety settings, and advanced features like thinking mode.
Get language model instances for text generation and chat functionality.
/**
* Get a language model instance for text generation
* @param modelId - Gemini model identifier
* @returns LanguageModelV2 instance
*/
languageModel(modelId: GoogleGenerativeAIModelId): LanguageModelV2<
GoogleGenerativeAIProviderOptions,
GoogleGenerativeAIProviderMetadata
>;
/**
* Get a chat model instance (alias for languageModel)
* @param modelId - Gemini model identifier
* @returns LanguageModelV2 instance
*/
chat(modelId: GoogleGenerativeAIModelId): LanguageModelV2<
GoogleGenerativeAIProviderOptions,
GoogleGenerativeAIProviderMetadata
>;Usage Examples:
import { google } from "@ai-sdk/google";
import { generateText, streamText } from "ai";
// Basic text generation
const result = await generateText({
model: google("gemini-1.5-flash"),
prompt: "Explain the concept of machine learning",
});
// Streaming text generation
const { textStream } = await streamText({
model: google("gemini-1.5-pro"),
prompt: "Write a story about a robot",
});
for await (const textPart of textStream) {
process.stdout.write(textPart);
}
// Chat model (same as languageModel)
const chatModel = google.chat("gemini-2.0-flash");Supported Google Generative AI language model identifiers.
type GoogleGenerativeAIModelId =
// Stable models
| "gemini-1.5-flash"
| "gemini-1.5-flash-latest"
| "gemini-1.5-flash-001"
| "gemini-1.5-flash-002"
| "gemini-1.5-flash-8b"
| "gemini-1.5-flash-8b-latest"
| "gemini-1.5-flash-8b-001"
| "gemini-1.5-pro"
| "gemini-1.5-pro-latest"
| "gemini-1.5-pro-001"
| "gemini-1.5-pro-002"
| "gemini-2.0-flash"
| "gemini-2.0-flash-001"
| "gemini-2.0-flash-live-001"
| "gemini-2.0-flash-lite"
| "gemini-2.0-pro-exp-02-05"
| "gemini-2.0-flash-thinking-exp-01-21"
| "gemini-2.0-flash-exp"
| "gemini-2.5-pro"
| "gemini-2.5-flash"
| "gemini-2.5-flash-lite"
| "gemini-2.5-flash-image-preview"
// Experimental models
| "gemini-2.5-pro-exp-03-25"
| "gemini-2.5-flash-preview-04-17"
| "gemini-exp-1206"
| "gemma-3-12b-it"
| "gemma-3-27b-it"
| (string & {});Model Recommendations:
Configuration options for language model behavior and safety settings.
interface GoogleGenerativeAIProviderOptions {
/** Response modalities (text, image, or both) */
responseModalities?: ("TEXT" | "IMAGE")[];
/** Thinking configuration for models that support reasoning */
thinkingConfig?: {
thinkingBudget?: number;
includeThoughts?: boolean;
};
/** Cached content reference for context reuse */
cachedContent?: string;
/** Enable/disable structured outputs (default: true) */
structuredOutputs?: boolean;
/** Safety settings for content filtering */
safetySettings?: SafetySetting[];
/** Global safety threshold (alternative to safetySettings) */
threshold?: SafetyThreshold;
/** Enable timestamp understanding for audio files */
audioTimestamp?: boolean;
/** Billing labels (Vertex AI only) */
labels?: Record<string, string>;
}
interface SafetySetting {
category: SafetyCategory;
threshold: SafetyThreshold;
}
type SafetyCategory =
| "HARM_CATEGORY_UNSPECIFIED"
| "HARM_CATEGORY_HATE_SPEECH"
| "HARM_CATEGORY_DANGEROUS_CONTENT"
| "HARM_CATEGORY_HARASSMENT"
| "HARM_CATEGORY_SEXUALLY_EXPLICIT"
| "HARM_CATEGORY_CIVIC_INTEGRITY";
type SafetyThreshold =
| "HARM_BLOCK_THRESHOLD_UNSPECIFIED"
| "BLOCK_LOW_AND_ABOVE"
| "BLOCK_MEDIUM_AND_ABOVE"
| "BLOCK_ONLY_HIGH"
| "BLOCK_NONE"
| "OFF";Usage Examples:
import { google } from "@ai-sdk/google";
import { generateText } from "ai";
// Basic safety configuration
const result = await generateText({
model: google("gemini-1.5-pro"),
prompt: "Tell me about AI safety",
providerOptions: {
safetySettings: [
{
category: "HARM_CATEGORY_HATE_SPEECH",
threshold: "BLOCK_MEDIUM_AND_ABOVE",
},
{
category: "HARM_CATEGORY_DANGEROUS_CONTENT",
threshold: "BLOCK_MEDIUM_AND_ABOVE",
},
],
},
});
// Thinking mode configuration
const thinkingResult = await generateText({
model: google("gemini-2.0-flash-thinking-exp-01-21"),
prompt: "Solve this complex math problem: 2x + 5 = 17",
providerOptions: {
thinkingConfig: {
thinkingBudget: 1000,
includeThoughts: true,
},
},
});
// Multimodal response configuration
const multimodalResult = await generateText({
model: google("gemini-2.5-flash-image-preview"),
prompt: "Create an image of a sunset and describe it",
providerOptions: {
responseModalities: ["TEXT", "IMAGE"],
},
});
// Structured outputs
const structuredResult = await generateText({
model: google("gemini-1.5-pro"),
prompt: "Extract person information",
providerOptions: {
structuredOutputs: true,
},
output: "object",
schema: z.object({
name: z.string(),
age: z.number(),
city: z.string(),
}),
});Response metadata from Google Generative AI models including grounding information and safety ratings.
interface GoogleGenerativeAIProviderMetadata {
/** Grounding metadata from Google Search tool usage */
groundingMetadata: GoogleGenerativeAIGroundingMetadata | null;
/** URL context metadata from URL context tool usage */
urlContextMetadata: GoogleGenerativeAIUrlContextMetadata | null;
/** Safety ratings for the generated content */
safetyRatings: GoogleGenerativeAISafetyRating[] | null;
}
interface GoogleGenerativeAIGroundingMetadata {
chunks: GroundingChunk[];
searchQueries: string[];
}
interface GoogleGenerativeAIUrlContextMetadata {
urls: UrlInfo[];
}
interface GoogleGenerativeAISafetyRating {
category: SafetyCategory;
probability: "NEGLIGIBLE" | "LOW" | "MEDIUM" | "HIGH";
blocked: boolean;
}Usage Examples:
import { google } from "@ai-sdk/google";
import { generateText } from "ai";
const result = await generateText({
model: google("gemini-1.5-pro"),
prompt: "What's the latest news about AI?",
tools: {
googleSearch: google.tools.googleSearch(),
},
});
// Access metadata
if (result.responseMetadata?.groundingMetadata) {
console.log("Search queries used:", result.responseMetadata.groundingMetadata.searchQueries);
console.log("Sources found:", result.responseMetadata.groundingMetadata.chunks.length);
}
if (result.responseMetadata?.safetyRatings) {
for (const rating of result.responseMetadata.safetyRatings) {
console.log(`${rating.category}: ${rating.probability} (blocked: ${rating.blocked})`);
}
}Reuse context across multiple requests for efficiency.
const result = await generateText({
model: google("gemini-1.5-pro"),
prompt: "Continue the story",
providerOptions: {
cachedContent: "cachedContents/your-cached-content-id",
},
});Enable reasoning traces for complex problem-solving.
const result = await generateText({
model: google("gemini-2.0-flash-thinking-exp-01-21"),
prompt: "Analyze this complex situation and provide recommendations",
providerOptions: {
thinkingConfig: {
thinkingBudget: 2000, // Thinking steps budget
includeThoughts: true, // Include reasoning in response
},
},
});
// Access thinking traces
console.log("Reasoning process:", result.experimental_thoughts);Enable timestamp understanding for audio files.
const result = await generateText({
model: google("gemini-1.5-pro"),
messages: [
{
role: "user",
content: [
{ type: "text", text: "Transcribe this audio with timestamps" },
{
type: "file",
data: audioFileBuffer,
mimeType: "audio/wav",
},
],
},
],
providerOptions: {
audioTimestamp: true,
},
});Add billing labels for cost tracking (Vertex AI only).
const result = await generateText({
model: google("gemini-1.5-pro"),
prompt: "Generate a report",
providerOptions: {
labels: {
"project": "my-ai-project",
"environment": "production",
"team": "data-science",
},
},
});