0
# Chat Sessions
1
2
The Chats module provides multi-turn conversation management with history tracking, streaming support, and automatic context management.
3
4
## Capabilities
5
6
### create
7
8
Create a new chat session with optional configuration and initial history.
9
10
```typescript { .api }
11
/**
12
* Create a new chat session
13
* @param params - Chat creation parameters
14
* @returns Chat instance for multi-turn conversation
15
*/
16
function create(params: CreateChatParameters): Chat;
17
18
interface CreateChatParameters {
19
/** Model name (e.g., 'gemini-2.0-flash') */
20
model: string;
21
/** Generation configuration */
22
config?: GenerateContentConfig;
23
/** Initial chat history */
24
history?: Content[];
25
}
26
```
27
28
**Usage Examples:**
29
30
```typescript
31
import { GoogleGenAI } from '@google/genai';
32
33
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
34
35
// Create basic chat session
36
const chat = client.chats.create({
37
model: 'gemini-2.0-flash'
38
});
39
40
// Create chat with configuration
41
const configuredChat = client.chats.create({
42
model: 'gemini-2.0-flash',
43
config: {
44
temperature: 0.9,
45
maxOutputTokens: 2048,
46
systemInstruction: 'You are a helpful coding assistant'
47
}
48
});
49
50
// Create chat with initial history
51
const chatWithHistory = client.chats.create({
52
model: 'gemini-2.0-flash',
53
history: [
54
{ role: 'user', parts: [{ text: 'Hello!' }] },
55
{ role: 'model', parts: [{ text: 'Hi! How can I help you today?' }] }
56
]
57
});
58
```
59
60
## Chat Instance Methods
61
62
### sendMessage
63
64
Send a message and wait for the complete response.
65
66
```typescript { .api }
67
/**
68
* Send message and get complete response
69
* @param params - Message parameters
70
* @returns Promise resolving to generation response
71
*/
72
function sendMessage(
73
params: SendMessageParameters
74
): Promise<GenerateContentResponse>;
75
76
interface SendMessageParameters {
77
/** Message content (string, Part, or array of Parts) */
78
message: ContentUnion;
79
}
80
```
81
82
**Usage Examples:**
83
84
```typescript
85
// Simple text message
86
const response1 = await chat.sendMessage({
87
message: 'What is the capital of France?'
88
});
89
console.log(response1.text);
90
91
// Follow-up message (context is maintained)
92
const response2 = await chat.sendMessage({
93
message: 'What is its population?'
94
});
95
console.log(response2.text);
96
97
// Multimodal message
98
const response3 = await chat.sendMessage({
99
message: [
100
{ text: 'What is in this image?' },
101
{ fileData: {
102
fileUri: 'gs://bucket/image.jpg',
103
mimeType: 'image/jpeg'
104
}}
105
]
106
});
107
```
108
109
### sendMessageStream
110
111
Send a message with streaming response for real-time output.
112
113
```typescript { .api }
114
/**
115
* Send message with streaming response
116
* @param params - Message parameters
117
* @returns Promise resolving to async generator of response chunks
118
*/
119
function sendMessageStream(
120
params: SendMessageParameters
121
): Promise<AsyncGenerator<GenerateContentResponse>>;
122
```
123
124
**Usage Examples:**
125
126
```typescript
127
// Stream response
128
const stream = await chat.sendMessageStream({
129
message: 'Write a story about a robot'
130
});
131
132
for await (const chunk of stream) {
133
process.stdout.write(chunk.text || '');
134
}
135
console.log('\n');
136
137
// Stream with progress tracking
138
const progressStream = await chat.sendMessageStream({
139
message: 'Explain quantum computing'
140
});
141
142
let tokens = 0;
143
for await (const chunk of progressStream) {
144
if (chunk.text) {
145
console.log(chunk.text);
146
}
147
if (chunk.usageMetadata) {
148
tokens = chunk.usageMetadata.totalTokenCount || 0;
149
}
150
}
151
console.log(`\nTotal tokens: ${tokens}`);
152
```
153
154
### getHistory
155
156
Retrieve the conversation history.
157
158
```typescript { .api }
159
/**
160
* Get chat history
161
* @param curated - Whether to return curated history (default: false)
162
* @returns Array of Content messages
163
*/
164
function getHistory(curated?: boolean): Content[];
165
```
166
167
**Usage Examples:**
168
169
```typescript
170
// Get full conversation history
171
const fullHistory = chat.getHistory();
172
console.log('Full history:', fullHistory);
173
174
// Get curated history (excludes function call internals)
175
const curatedHistory = chat.getHistory(true);
176
console.log('Curated history:', curatedHistory);
177
178
// Inspect history
179
fullHistory.forEach((content, index) => {
180
console.log(`Turn ${index}: ${content.role}`);
181
content.parts?.forEach(part => {
182
if (part.text) {
183
console.log(` Text: ${part.text}`);
184
}
185
if (part.functionCall) {
186
console.log(` Function call: ${part.functionCall.name}`);
187
}
188
});
189
});
190
```
191
192
## Types
193
194
### GenerateContentResponse
195
196
Response from chat message.
197
198
```typescript { .api }
199
interface GenerateContentResponse {
200
/** Response candidates */
201
candidates?: Candidate[];
202
/** Helper property for first candidate text */
203
text?: string;
204
/** Function calls to execute */
205
functionCalls?: FunctionCall[];
206
/** Token usage information */
207
usageMetadata?: UsageMetadata;
208
/** Prompt evaluation feedback */
209
promptFeedback?: PromptFeedback;
210
/** Model version used */
211
modelVersion?: string;
212
/** Automatic function calling history */
213
automaticFunctionCallingHistory?: Content[];
214
/** HTTP response metadata */
215
sdkHttpResponse?: HttpResponse;
216
}
217
```
218
219
### Content
220
221
Message content structure.
222
223
```typescript { .api }
224
interface Content {
225
/** List of content parts */
226
parts?: Part[];
227
/** Role ('user' or 'model') */
228
role?: string;
229
}
230
```
231
232
### ContentUnion
233
234
Flexible input type for messages.
235
236
```typescript { .api }
237
/** Content, Part array, or Part/string */
238
type ContentUnion = Content | PartUnion[] | PartUnion;
239
240
/** Part or string */
241
type PartUnion = Part | string;
242
```
243
244
### GenerateContentConfig
245
246
Configuration options for chat generation.
247
248
```typescript { .api }
249
interface GenerateContentConfig {
250
/** Randomness in generation (0.0-2.0) */
251
temperature?: number;
252
/** Nucleus sampling threshold (0.0-1.0) */
253
topP?: number;
254
/** Top-k sampling parameter */
255
topK?: number;
256
/** Number of response candidates */
257
candidateCount?: number;
258
/** Maximum output tokens */
259
maxOutputTokens?: number;
260
/** Stop sequences to end generation */
261
stopSequences?: string[];
262
/** Presence penalty (-2.0 to 2.0) */
263
presencePenalty?: number;
264
/** Frequency penalty (-2.0 to 2.0) */
265
frequencyPenalty?: number;
266
/** Output modalities (TEXT, AUDIO, IMAGE) */
267
responseModalities?: Modality[];
268
/** System instructions for the model */
269
systemInstruction?: Content | string;
270
/** Tools/functions available to model */
271
tools?: ToolListUnion;
272
/** Tool configuration */
273
toolConfig?: ToolConfig;
274
/** Safety filter settings */
275
safetySettings?: SafetySetting[];
276
/** Cached content reference */
277
cachedContent?: string;
278
/** Automatic function calling configuration */
279
automaticFunctionCalling?: AutomaticFunctionCallingConfig;
280
/** Thinking configuration for extended reasoning */
281
thinkingConfig?: ThinkingConfig;
282
/** Schema for structured output */
283
responseSchema?: Schema;
284
/** JSON schema for structured output */
285
responseJsonSchema?: unknown;
286
/** Response MIME type (e.g., 'application/json') */
287
responseMimeType?: string;
288
/** HTTP options */
289
httpOptions?: HttpOptions;
290
/** Abort signal for cancellation */
291
abortSignal?: AbortSignal;
292
}
293
```
294
295
## Complete Examples
296
297
### Basic Chat Conversation
298
299
```typescript
300
import { GoogleGenAI } from '@google/genai';
301
302
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
303
304
// Create chat session
305
const chat = client.chats.create({
306
model: 'gemini-2.0-flash'
307
});
308
309
// Have a multi-turn conversation
310
const r1 = await chat.sendMessage({
311
message: 'Hello! Can you help me learn about astronomy?'
312
});
313
console.log('Assistant:', r1.text);
314
315
const r2 = await chat.sendMessage({
316
message: 'What is a black hole?'
317
});
318
console.log('Assistant:', r2.text);
319
320
const r3 = await chat.sendMessage({
321
message: 'How are they formed?'
322
});
323
console.log('Assistant:', r3.text);
324
325
// Review conversation history
326
const history = chat.getHistory();
327
console.log(`\nConversation had ${history.length} turns`);
328
```
329
330
### Chat with System Instructions
331
332
```typescript
333
// Create specialized assistant
334
const codeAssistant = client.chats.create({
335
model: 'gemini-2.0-flash',
336
config: {
337
systemInstruction: `You are an expert Python programmer.
338
Always provide code examples with detailed comments.
339
Follow PEP 8 style guidelines.`
340
}
341
});
342
343
const codeResponse = await codeAssistant.sendMessage({
344
message: 'How do I read a CSV file?'
345
});
346
console.log(codeResponse.text);
347
```
348
349
### Chat with Function Calling
350
351
```typescript
352
import { Tool, FunctionDeclaration, Type } from '@google/genai';
353
354
// Define weather tool
355
const weatherTool: Tool = {
356
functionDeclarations: [{
357
name: 'getWeather',
358
description: 'Get current weather for a location',
359
parameters: {
360
type: Type.OBJECT,
361
properties: {
362
location: {
363
type: Type.STRING,
364
description: 'City name'
365
},
366
unit: {
367
type: Type.STRING,
368
enum: ['celsius', 'fahrenheit'],
369
description: 'Temperature unit'
370
}
371
},
372
required: ['location']
373
}
374
}]
375
};
376
377
// Create chat with tools
378
const chatWithTools = client.chats.create({
379
model: 'gemini-2.0-flash',
380
config: {
381
tools: [weatherTool]
382
}
383
});
384
385
// Send message that triggers function call
386
const response = await chatWithTools.sendMessage({
387
message: 'What is the weather in Paris?'
388
});
389
390
// Handle function calls
391
if (response.functionCalls) {
392
console.log('Function calls:', response.functionCalls);
393
// Execute functions and send results back...
394
}
395
```
396
397
### Streaming Chat
398
399
```typescript
400
// Create chat for streaming
401
const streamChat = client.chats.create({
402
model: 'gemini-2.0-flash',
403
config: {
404
temperature: 0.8
405
}
406
});
407
408
// Interactive streaming conversation
409
console.log('User: Tell me a short story about space exploration');
410
console.log('Assistant: ');
411
412
const stream = await streamChat.sendMessageStream({
413
message: 'Tell me a short story about space exploration'
414
});
415
416
for await (const chunk of stream) {
417
process.stdout.write(chunk.text || '');
418
}
419
console.log('\n');
420
421
// Continue conversation
422
console.log('User: Make it more exciting');
423
console.log('Assistant: ');
424
425
const stream2 = await streamChat.sendMessageStream({
426
message: 'Make it more exciting'
427
});
428
429
for await (const chunk of stream2) {
430
process.stdout.write(chunk.text || '');
431
}
432
console.log('\n');
433
```
434
435
### Chat with Multimodal Input
436
437
```typescript
438
// Upload an image first
439
const imageFile = await client.files.upload({
440
file: './diagram.png',
441
mimeType: 'image/png'
442
});
443
444
// Create chat and send image
445
const multimodalChat = client.chats.create({
446
model: 'gemini-2.0-flash'
447
});
448
449
const response1 = await multimodalChat.sendMessage({
450
message: [
451
{ text: 'What does this diagram show?' },
452
{ fileData: {
453
fileUri: imageFile.uri,
454
mimeType: 'image/png'
455
}}
456
]
457
});
458
console.log(response1.text);
459
460
// Follow up with text only
461
const response2 = await multimodalChat.sendMessage({
462
message: 'Can you explain it in simpler terms?'
463
});
464
console.log(response2.text);
465
```
466
467
### Chat with History Restoration
468
469
```typescript
470
// Save chat history
471
const chat1 = client.chats.create({
472
model: 'gemini-2.0-flash'
473
});
474
475
await chat1.sendMessage({ message: 'Hi, my name is Alice' });
476
await chat1.sendMessage({ message: 'What is machine learning?' });
477
478
const savedHistory = chat1.getHistory();
479
480
// Later, restore chat from saved history
481
const chat2 = client.chats.create({
482
model: 'gemini-2.0-flash',
483
history: savedHistory
484
});
485
486
// Context is preserved
487
const response = await chat2.sendMessage({
488
message: 'What is my name?'
489
});
490
console.log(response.text); // Should remember "Alice"
491
```
492
493
### Chat with Safety Settings
494
495
```typescript
496
import { HarmCategory, HarmBlockThreshold } from '@google/genai';
497
498
const safeChat = client.chats.create({
499
model: 'gemini-2.0-flash',
500
config: {
501
safetySettings: [
502
{
503
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
504
threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
505
},
506
{
507
category: HarmCategory.HARM_CATEGORY_HARASSMENT,
508
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
509
}
510
]
511
}
512
});
513
514
const safeResponse = await safeChat.sendMessage({
515
message: 'Tell me about safety in extreme sports'
516
});
517
518
// Check for safety blocks
519
if (safeResponse.promptFeedback?.blockReason) {
520
console.log('Message blocked:', safeResponse.promptFeedback.blockReason);
521
}
522
```
523
524
### Chat with Structured Output
525
526
```typescript
527
import { Type } from '@google/genai';
528
529
const structuredChat = client.chats.create({
530
model: 'gemini-2.0-flash',
531
config: {
532
responseMimeType: 'application/json',
533
responseSchema: {
534
type: Type.OBJECT,
535
properties: {
536
answer: { type: Type.STRING },
537
confidence: { type: Type.NUMBER },
538
sources: {
539
type: Type.ARRAY,
540
items: { type: Type.STRING }
541
}
542
}
543
}
544
}
545
});
546
547
const structuredResponse = await structuredChat.sendMessage({
548
message: 'What is quantum computing?'
549
});
550
551
const data = JSON.parse(structuredResponse.text || '{}');
552
console.log('Answer:', data.answer);
553
console.log('Confidence:', data.confidence);
554
console.log('Sources:', data.sources);
555
```
556
557
### Token Usage Tracking
558
559
```typescript
560
const chat = client.chats.create({
561
model: 'gemini-2.0-flash'
562
});
563
564
let totalTokens = 0;
565
566
const response1 = await chat.sendMessage({
567
message: 'Explain neural networks'
568
});
569
totalTokens += response1.usageMetadata?.totalTokenCount || 0;
570
571
const response2 = await chat.sendMessage({
572
message: 'Give me an example'
573
});
574
totalTokens += response2.usageMetadata?.totalTokenCount || 0;
575
576
console.log(`Total tokens used in conversation: ${totalTokens}`);
577
console.log(`Cached tokens: ${response2.usageMetadata?.cachedContentTokenCount || 0}`);
578
```
579