Google Gen AI JavaScript SDK for building applications powered by Gemini with content generation, image/video generation, function calling, caching, and real-time live sessions
The Chats module provides multi-turn conversation management with history tracking, streaming support, and automatic context management.
Create a new chat session with optional configuration and initial history.
/**
* Create a new chat session
* @param params - Chat creation parameters
* @returns Chat instance for multi-turn conversation
*/
function create(params: CreateChatParameters): Chat;
interface CreateChatParameters {
/** Model name (e.g., 'gemini-2.0-flash') */
model: string;
/** Generation configuration */
config?: GenerateContentConfig;
/** Initial chat history */
history?: Content[];
}Usage Examples:
import { GoogleGenAI } from '@google/genai';
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
// Create basic chat session
const chat = client.chats.create({
model: 'gemini-2.0-flash'
});
// Create chat with configuration
const configuredChat = client.chats.create({
model: 'gemini-2.0-flash',
config: {
temperature: 0.9,
maxOutputTokens: 2048,
systemInstruction: 'You are a helpful coding assistant'
}
});
// Create chat with initial history
const chatWithHistory = client.chats.create({
model: 'gemini-2.0-flash',
history: [
{ role: 'user', parts: [{ text: 'Hello!' }] },
{ role: 'model', parts: [{ text: 'Hi! How can I help you today?' }] }
]
});Send a message and wait for the complete response.
/**
* Send message and get complete response
* @param params - Message parameters
* @returns Promise resolving to generation response
*/
function sendMessage(
params: SendMessageParameters
): Promise<GenerateContentResponse>;
interface SendMessageParameters {
/** Message content (string, Part, or array of Parts) */
message: ContentUnion;
}Usage Examples:
// Simple text message
const response1 = await chat.sendMessage({
message: 'What is the capital of France?'
});
console.log(response1.text);
// Follow-up message (context is maintained)
const response2 = await chat.sendMessage({
message: 'What is its population?'
});
console.log(response2.text);
// Multimodal message
const response3 = await chat.sendMessage({
message: [
{ text: 'What is in this image?' },
{ fileData: {
fileUri: 'gs://bucket/image.jpg',
mimeType: 'image/jpeg'
}}
]
});Send a message with streaming response for real-time output.
/**
* Send message with streaming response
* @param params - Message parameters
* @returns Promise resolving to async generator of response chunks
*/
function sendMessageStream(
params: SendMessageParameters
): Promise<AsyncGenerator<GenerateContentResponse>>;Usage Examples:
// Stream response
const stream = await chat.sendMessageStream({
message: 'Write a story about a robot'
});
for await (const chunk of stream) {
process.stdout.write(chunk.text || '');
}
console.log('\n');
// Stream with progress tracking
const progressStream = await chat.sendMessageStream({
message: 'Explain quantum computing'
});
let tokens = 0;
for await (const chunk of progressStream) {
if (chunk.text) {
console.log(chunk.text);
}
if (chunk.usageMetadata) {
tokens = chunk.usageMetadata.totalTokenCount || 0;
}
}
console.log(`\nTotal tokens: ${tokens}`);Retrieve the conversation history.
/**
* Get chat history
* @param curated - Whether to return curated history (default: false)
* @returns Array of Content messages
*/
function getHistory(curated?: boolean): Content[];Usage Examples:
// Get full conversation history
const fullHistory = chat.getHistory();
console.log('Full history:', fullHistory);
// Get curated history (excludes function call internals)
const curatedHistory = chat.getHistory(true);
console.log('Curated history:', curatedHistory);
// Inspect history
fullHistory.forEach((content, index) => {
console.log(`Turn ${index}: ${content.role}`);
content.parts?.forEach(part => {
if (part.text) {
console.log(` Text: ${part.text}`);
}
if (part.functionCall) {
console.log(` Function call: ${part.functionCall.name}`);
}
});
});Response from chat message.
interface GenerateContentResponse {
/** Response candidates */
candidates?: Candidate[];
/** Helper property for first candidate text */
text?: string;
/** Function calls to execute */
functionCalls?: FunctionCall[];
/** Token usage information */
usageMetadata?: UsageMetadata;
/** Prompt evaluation feedback */
promptFeedback?: PromptFeedback;
/** Model version used */
modelVersion?: string;
/** Automatic function calling history */
automaticFunctionCallingHistory?: Content[];
/** HTTP response metadata */
sdkHttpResponse?: HttpResponse;
}Message content structure.
interface Content {
/** List of content parts */
parts?: Part[];
/** Role ('user' or 'model') */
role?: string;
}Flexible input type for messages.
/** Content, Part array, or Part/string */
type ContentUnion = Content | PartUnion[] | PartUnion;
/** Part or string */
type PartUnion = Part | string;Configuration options for chat generation.
interface GenerateContentConfig {
/** Randomness in generation (0.0-2.0) */
temperature?: number;
/** Nucleus sampling threshold (0.0-1.0) */
topP?: number;
/** Top-k sampling parameter */
topK?: number;
/** Number of response candidates */
candidateCount?: number;
/** Maximum output tokens */
maxOutputTokens?: number;
/** Stop sequences to end generation */
stopSequences?: string[];
/** Presence penalty (-2.0 to 2.0) */
presencePenalty?: number;
/** Frequency penalty (-2.0 to 2.0) */
frequencyPenalty?: number;
/** Output modalities (TEXT, AUDIO, IMAGE) */
responseModalities?: Modality[];
/** System instructions for the model */
systemInstruction?: Content | string;
/** Tools/functions available to model */
tools?: ToolListUnion;
/** Tool configuration */
toolConfig?: ToolConfig;
/** Safety filter settings */
safetySettings?: SafetySetting[];
/** Cached content reference */
cachedContent?: string;
/** Automatic function calling configuration */
automaticFunctionCalling?: AutomaticFunctionCallingConfig;
/** Thinking configuration for extended reasoning */
thinkingConfig?: ThinkingConfig;
/** Schema for structured output */
responseSchema?: Schema;
/** JSON schema for structured output */
responseJsonSchema?: unknown;
/** Response MIME type (e.g., 'application/json') */
responseMimeType?: string;
/** HTTP options */
httpOptions?: HttpOptions;
/** Abort signal for cancellation */
abortSignal?: AbortSignal;
}import { GoogleGenAI } from '@google/genai';
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
// Create chat session
const chat = client.chats.create({
model: 'gemini-2.0-flash'
});
// Have a multi-turn conversation
const r1 = await chat.sendMessage({
message: 'Hello! Can you help me learn about astronomy?'
});
console.log('Assistant:', r1.text);
const r2 = await chat.sendMessage({
message: 'What is a black hole?'
});
console.log('Assistant:', r2.text);
const r3 = await chat.sendMessage({
message: 'How are they formed?'
});
console.log('Assistant:', r3.text);
// Review conversation history
const history = chat.getHistory();
console.log(`\nConversation had ${history.length} turns`);// Create specialized assistant
const codeAssistant = client.chats.create({
model: 'gemini-2.0-flash',
config: {
systemInstruction: `You are an expert Python programmer.
Always provide code examples with detailed comments.
Follow PEP 8 style guidelines.`
}
});
const codeResponse = await codeAssistant.sendMessage({
message: 'How do I read a CSV file?'
});
console.log(codeResponse.text);import { Tool, FunctionDeclaration, Type } from '@google/genai';
// Define weather tool
const weatherTool: Tool = {
functionDeclarations: [{
name: 'getWeather',
description: 'Get current weather for a location',
parameters: {
type: Type.OBJECT,
properties: {
location: {
type: Type.STRING,
description: 'City name'
},
unit: {
type: Type.STRING,
enum: ['celsius', 'fahrenheit'],
description: 'Temperature unit'
}
},
required: ['location']
}
}]
};
// Create chat with tools
const chatWithTools = client.chats.create({
model: 'gemini-2.0-flash',
config: {
tools: [weatherTool]
}
});
// Send message that triggers function call
const response = await chatWithTools.sendMessage({
message: 'What is the weather in Paris?'
});
// Handle function calls
if (response.functionCalls) {
console.log('Function calls:', response.functionCalls);
// Execute functions and send results back...
}// Create chat for streaming
const streamChat = client.chats.create({
model: 'gemini-2.0-flash',
config: {
temperature: 0.8
}
});
// Interactive streaming conversation
console.log('User: Tell me a short story about space exploration');
console.log('Assistant: ');
const stream = await streamChat.sendMessageStream({
message: 'Tell me a short story about space exploration'
});
for await (const chunk of stream) {
process.stdout.write(chunk.text || '');
}
console.log('\n');
// Continue conversation
console.log('User: Make it more exciting');
console.log('Assistant: ');
const stream2 = await streamChat.sendMessageStream({
message: 'Make it more exciting'
});
for await (const chunk of stream2) {
process.stdout.write(chunk.text || '');
}
console.log('\n');// Upload an image first
const imageFile = await client.files.upload({
file: './diagram.png',
mimeType: 'image/png'
});
// Create chat and send image
const multimodalChat = client.chats.create({
model: 'gemini-2.0-flash'
});
const response1 = await multimodalChat.sendMessage({
message: [
{ text: 'What does this diagram show?' },
{ fileData: {
fileUri: imageFile.uri,
mimeType: 'image/png'
}}
]
});
console.log(response1.text);
// Follow up with text only
const response2 = await multimodalChat.sendMessage({
message: 'Can you explain it in simpler terms?'
});
console.log(response2.text);// Save chat history
const chat1 = client.chats.create({
model: 'gemini-2.0-flash'
});
await chat1.sendMessage({ message: 'Hi, my name is Alice' });
await chat1.sendMessage({ message: 'What is machine learning?' });
const savedHistory = chat1.getHistory();
// Later, restore chat from saved history
const chat2 = client.chats.create({
model: 'gemini-2.0-flash',
history: savedHistory
});
// Context is preserved
const response = await chat2.sendMessage({
message: 'What is my name?'
});
console.log(response.text); // Should remember "Alice"import { HarmCategory, HarmBlockThreshold } from '@google/genai';
const safeChat = client.chats.create({
model: 'gemini-2.0-flash',
config: {
safetySettings: [
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
},
{
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
}
]
}
});
const safeResponse = await safeChat.sendMessage({
message: 'Tell me about safety in extreme sports'
});
// Check for safety blocks
if (safeResponse.promptFeedback?.blockReason) {
console.log('Message blocked:', safeResponse.promptFeedback.blockReason);
}import { Type } from '@google/genai';
const structuredChat = client.chats.create({
model: 'gemini-2.0-flash',
config: {
responseMimeType: 'application/json',
responseSchema: {
type: Type.OBJECT,
properties: {
answer: { type: Type.STRING },
confidence: { type: Type.NUMBER },
sources: {
type: Type.ARRAY,
items: { type: Type.STRING }
}
}
}
}
});
const structuredResponse = await structuredChat.sendMessage({
message: 'What is quantum computing?'
});
const data = JSON.parse(structuredResponse.text || '{}');
console.log('Answer:', data.answer);
console.log('Confidence:', data.confidence);
console.log('Sources:', data.sources);const chat = client.chats.create({
model: 'gemini-2.0-flash'
});
let totalTokens = 0;
const response1 = await chat.sendMessage({
message: 'Explain neural networks'
});
totalTokens += response1.usageMetadata?.totalTokenCount || 0;
const response2 = await chat.sendMessage({
message: 'Give me an example'
});
totalTokens += response2.usageMetadata?.totalTokenCount || 0;
console.log(`Total tokens used in conversation: ${totalTokens}`);
console.log(`Cached tokens: ${response2.usageMetadata?.cachedContentTokenCount || 0}`);Install with Tessl CLI
npx tessl i tessl/npm-google--genai