or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

auth-tokens.mdbatch.mdcaching.mdchat.mdclient.mdcontent-generation.mdembeddings.mdfile-search-stores.mdfiles.mdfunction-calling.mdimage-generation.mdindex.mdlive.mdmcp.mdmodels.mdoperations.mdtuning.mdvideo-generation.md

content-generation.mddocs/

0

# Content Generation

1

2

The Models module provides core text and multimodal content generation capabilities with support for streaming, function calling, structured outputs, and various configuration options.

3

4

## Capabilities

5

6

### generateContent

7

8

Generate text or multimodal content with optional automatic function calling.

9

10

```typescript { .api }

11

/**

12

* Generate content from a model

13

* @param params - Generation parameters

14

* @returns Promise resolving to generation response

15

*/

16

function generateContent(

17

params: GenerateContentParameters

18

): Promise<GenerateContentResponse>;

19

20

interface GenerateContentParameters {

21

/** Model name (e.g., 'gemini-2.0-flash', 'models/gemini-2.0-flash', 'tunedModels/{id}') */

22

model: string;

23

/** Input content(s) - flexible string, Part, or Content input */

24

contents: ContentListUnion;

25

/** Generation configuration */

26

config?: GenerateContentConfig;

27

}

28

29

interface GenerateContentResponse {

30

/** Response candidates */

31

candidates?: Candidate[];

32

/** Helper property for first candidate text */

33

text?: string;

34

/** Function calls to execute */

35

functionCalls?: FunctionCall[];

36

/** Token usage information */

37

usageMetadata?: UsageMetadata;

38

/** Prompt evaluation feedback */

39

promptFeedback?: PromptFeedback;

40

/** Model version used */

41

modelVersion?: string;

42

/** Automatic function calling history */

43

automaticFunctionCallingHistory?: Content[];

44

/** HTTP response metadata */

45

sdkHttpResponse?: HttpResponse;

46

}

47

```

48

49

**Usage Examples:**

50

51

```typescript

52

import { GoogleGenAI } from '@google/genai';

53

54

const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });

55

56

// Simple text generation

57

const response = await client.models.generateContent({

58

model: 'gemini-2.0-flash',

59

contents: 'Explain quantum computing in simple terms'

60

});

61

62

console.log(response.text);

63

64

// With generation config

65

const configuredResponse = await client.models.generateContent({

66

model: 'gemini-2.0-flash',

67

contents: 'Write a creative story',

68

config: {

69

temperature: 1.0,

70

maxOutputTokens: 2048,

71

topP: 0.95

72

}

73

});

74

75

// Multimodal input

76

const multimodalResponse = await client.models.generateContent({

77

model: 'gemini-2.0-flash',

78

contents: [

79

{ role: 'user', parts: [

80

{ text: 'What is in this image?' },

81

{ fileData: {

82

fileUri: 'gs://bucket/image.jpg',

83

mimeType: 'image/jpeg'

84

}}

85

]}

86

]

87

});

88

89

// With system instruction

90

const instructedResponse = await client.models.generateContent({

91

model: 'gemini-2.0-flash',

92

contents: 'Tell me about dogs',

93

config: {

94

systemInstruction: 'You are a veterinarian. Always provide medical context.'

95

}

96

});

97

```

98

99

### generateContentStream

100

101

Stream content generation for chunk-by-chunk processing.

102

103

```typescript { .api }

104

/**

105

* Stream content generation

106

* @param params - Generation parameters

107

* @returns Promise resolving to async generator of response chunks

108

*/

109

function generateContentStream(

110

params: GenerateContentParameters

111

): Promise<AsyncGenerator<GenerateContentResponse>>;

112

```

113

114

**Usage Examples:**

115

116

```typescript

117

// Stream text generation

118

const stream = await client.models.generateContentStream({

119

model: 'gemini-2.0-flash',

120

contents: 'Write a long article about artificial intelligence'

121

});

122

123

for await (const chunk of stream) {

124

process.stdout.write(chunk.text || '');

125

}

126

127

// Stream with progress tracking

128

const streamWithProgress = await client.models.generateContentStream({

129

model: 'gemini-2.0-flash',

130

contents: 'Explain machine learning concepts',

131

config: {

132

maxOutputTokens: 4096

133

}

134

});

135

136

let totalTokens = 0;

137

for await (const chunk of streamWithProgress) {

138

if (chunk.text) {

139

console.log(chunk.text);

140

}

141

if (chunk.usageMetadata) {

142

totalTokens = chunk.usageMetadata.totalTokenCount || 0;

143

}

144

}

145

console.log(`Total tokens used: ${totalTokens}`);

146

```

147

148

### countTokens

149

150

Count tokens in content before generation.

151

152

```typescript { .api }

153

/**

154

* Count tokens in content

155

* @param params - Token counting parameters

156

* @returns Promise resolving to token count

157

*/

158

function countTokens(

159

params: CountTokensParameters

160

): Promise<CountTokensResponse>;

161

162

interface CountTokensParameters {

163

/** Model name */

164

model: string;

165

/** Content to count tokens for */

166

contents: ContentListUnion;

167

}

168

169

interface CountTokensResponse {

170

/** Total token count */

171

totalTokens?: number;

172

}

173

```

174

175

**Usage Examples:**

176

177

```typescript

178

// Count tokens in a prompt

179

const tokenCount = await client.models.countTokens({

180

model: 'gemini-2.0-flash',

181

contents: 'What is the capital of France?'

182

});

183

184

console.log(`Tokens: ${tokenCount.totalTokens}`);

185

186

// Count tokens with multimodal content

187

const multimodalCount = await client.models.countTokens({

188

model: 'gemini-2.0-flash',

189

contents: [

190

{ role: 'user', parts: [

191

{ text: 'Describe this image' },

192

{ fileData: {

193

fileUri: 'gs://bucket/image.jpg',

194

mimeType: 'image/jpeg'

195

}}

196

]}

197

]

198

});

199

```

200

201

### computeTokens

202

203

Get detailed token information (Vertex AI only).

204

205

```typescript { .api }

206

/**

207

* Get detailed token information

208

* @param params - Token computation parameters

209

* @returns Promise resolving to detailed token information

210

*/

211

function computeTokens(

212

params: ComputeTokensParameters

213

): Promise<ComputeTokensResponse>;

214

215

interface ComputeTokensParameters {

216

/** Model name */

217

model: string;

218

/** Content to analyze */

219

contents: ContentListUnion;

220

}

221

222

interface ComputeTokensResponse {

223

/** Total billable characters */

224

totalBillableCharacters?: number;

225

/** Total token count */

226

totalTokens?: number;

227

/** Token metadata for each content part */

228

tokensMetadataPerPart?: TokensMetadata[];

229

}

230

231

interface TokensMetadata {

232

/** Token IDs */

233

tokenIds?: number[];

234

/** Role of the content */

235

role?: string;

236

}

237

```

238

239

**Usage Examples:**

240

241

```typescript

242

// Get detailed token information (Vertex AI)

243

const vertexClient = new GoogleGenAI({

244

vertexai: true,

245

project: 'my-project',

246

location: 'us-central1'

247

});

248

249

const tokenInfo = await vertexClient.models.computeTokens({

250

model: 'gemini-2.0-flash',

251

contents: 'What is machine learning?'

252

});

253

254

console.log(`Total tokens: ${tokenInfo.totalTokens}`);

255

console.log(`Billable characters: ${tokenInfo.totalBillableCharacters}`);

256

```

257

258

## Types

259

260

### GenerateContentConfig

261

262

Configuration options for content generation.

263

264

```typescript { .api }

265

interface GenerateContentConfig {

266

/** Randomness in generation (0.0-2.0) */

267

temperature?: number;

268

/** Nucleus sampling threshold (0.0-1.0) */

269

topP?: number;

270

/** Top-k sampling parameter */

271

topK?: number;

272

/** Number of response candidates */

273

candidateCount?: number;

274

/** Maximum output tokens */

275

maxOutputTokens?: number;

276

/** Stop sequences to end generation */

277

stopSequences?: string[];

278

/** Presence penalty (-2.0 to 2.0) */

279

presencePenalty?: number;

280

/** Frequency penalty (-2.0 to 2.0) */

281

frequencyPenalty?: number;

282

/** Output modalities (TEXT, AUDIO, IMAGE) */

283

responseModalities?: Modality[];

284

/** System instructions for the model */

285

systemInstruction?: Content | string;

286

/** Tools/functions available to model */

287

tools?: ToolListUnion;

288

/** Tool configuration */

289

toolConfig?: ToolConfig;

290

/** Safety filter settings */

291

safetySettings?: SafetySetting[];

292

/** Cached content reference */

293

cachedContent?: string;

294

/** Automatic function calling configuration */

295

automaticFunctionCalling?: AutomaticFunctionCallingConfig;

296

/** Thinking configuration for extended reasoning */

297

thinkingConfig?: ThinkingConfig;

298

/** Schema for structured output */

299

responseSchema?: Schema;

300

/** JSON schema for structured output */

301

responseJsonSchema?: unknown;

302

/** Response MIME type (e.g., 'application/json') */

303

responseMimeType?: string;

304

/** HTTP options */

305

httpOptions?: HttpOptions;

306

/** Abort signal for cancellation */

307

abortSignal?: AbortSignal;

308

}

309

```

310

311

### Candidate

312

313

Single response candidate from generation.

314

315

```typescript { .api }

316

interface Candidate {

317

/** Generated content */

318

content?: Content;

319

/** Reason generation stopped */

320

finishReason?: FinishReason;

321

/** Safety ratings */

322

safetyRatings?: SafetyRating[];

323

/** Citation metadata */

324

citationMetadata?: CitationMetadata;

325

/** Token count */

326

tokenCount?: number;

327

/** Candidate index */

328

index?: number;

329

}

330

```

331

332

### PromptFeedback

333

334

Feedback about the prompt evaluation.

335

336

```typescript { .api }

337

interface PromptFeedback {

338

/** Block reason if prompt was blocked */

339

blockReason?: BlockedReason;

340

/** Safety ratings for prompt */

341

safetyRatings?: SafetyRating[];

342

}

343

```

344

345

### UsageMetadata

346

347

Token usage information.

348

349

```typescript { .api }

350

interface UsageMetadata {

351

/** Prompt token count */

352

promptTokenCount?: number;

353

/** Response candidates token count */

354

candidatesTokenCount?: number;

355

/** Total token count */

356

totalTokenCount?: number;

357

/** Cached content token count */

358

cachedContentTokenCount?: number;

359

}

360

```

361

362

### Content and Part Types

363

364

Content structure for inputs and outputs.

365

366

```typescript { .api }

367

interface Content {

368

/** List of content parts */

369

parts?: Part[];

370

/** Role ('user' or 'model') */

371

role?: string;

372

}

373

374

interface Part {

375

/** Text content */

376

text?: string;

377

/** Inline binary data (base64) */

378

inlineData?: Blob;

379

/** URI reference to file */

380

fileData?: FileData;

381

/** Function call from model */

382

functionCall?: FunctionCall;

383

/** Function response from user */

384

functionResponse?: FunctionResponse;

385

/** Video-specific metadata */

386

videoMetadata?: VideoMetadata;

387

/** Executable code */

388

executableCode?: ExecutableCode;

389

/** Code execution result */

390

codeExecutionResult?: CodeExecutionResult;

391

/** Whether part is model's internal thought */

392

thought?: boolean;

393

/** Media tokenization settings */

394

mediaResolution?: PartMediaResolution;

395

}

396

397

interface Blob {

398

/** MIME type */

399

mimeType?: string;

400

/** Base64-encoded data */

401

data?: string;

402

}

403

404

interface FileData {

405

/** File URI (e.g., 'gs://bucket/file.jpg' or Gemini file URI) */

406

fileUri?: string;

407

/** MIME type */

408

mimeType?: string;

409

}

410

411

interface VideoMetadata {

412

/** Video duration (e.g., '3.5s') */

413

videoDuration?: string;

414

}

415

```

416

417

### Union Types

418

419

Flexible input types for convenience.

420

421

```typescript { .api }

422

/** Part or string */

423

type PartUnion = Part | string;

424

425

/** Array or single Part/string */

426

type PartListUnion = PartUnion[] | PartUnion;

427

428

/** Content, Part array, or Part/string */

429

type ContentUnion = Content | PartUnion[] | PartUnion;

430

431

/** Content array or any PartUnion combination */

432

type ContentListUnion = Content | Content[] | PartUnion | PartUnion[];

433

```

434

435

### Safety Types

436

437

Safety filter configuration.

438

439

```typescript { .api }

440

interface SafetySetting {

441

/** Harm category to filter */

442

category?: HarmCategory;

443

/** Blocking threshold level */

444

threshold?: HarmBlockThreshold;

445

/** Blocking method */

446

method?: HarmBlockMethod;

447

}

448

449

interface SafetyRating {

450

/** Harm category */

451

category?: HarmCategory;

452

/** Harm probability */

453

probability?: HarmProbability;

454

/** Whether content was blocked */

455

blocked?: boolean;

456

/** Probability score (0.0-1.0) */

457

probabilityScore?: number;

458

/** Harm severity */

459

severity?: HarmSeverity;

460

/** Severity score */

461

severityScore?: number;

462

}

463

464

enum HarmCategory {

465

HARM_CATEGORY_UNSPECIFIED = 'HARM_CATEGORY_UNSPECIFIED',

466

HARM_CATEGORY_HATE_SPEECH = 'HARM_CATEGORY_HATE_SPEECH',

467

HARM_CATEGORY_SEXUALLY_EXPLICIT = 'HARM_CATEGORY_SEXUALLY_EXPLICIT',

468

HARM_CATEGORY_HARASSMENT = 'HARM_CATEGORY_HARASSMENT',

469

HARM_CATEGORY_DANGEROUS_CONTENT = 'HARM_CATEGORY_DANGEROUS_CONTENT',

470

HARM_CATEGORY_CIVIC_INTEGRITY = 'HARM_CATEGORY_CIVIC_INTEGRITY'

471

}

472

473

enum HarmBlockThreshold {

474

HARM_BLOCK_THRESHOLD_UNSPECIFIED = 'HARM_BLOCK_THRESHOLD_UNSPECIFIED',

475

BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE',

476

BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE',

477

BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH',

478

BLOCK_NONE = 'BLOCK_NONE',

479

OFF = 'OFF'

480

}

481

482

enum HarmProbability {

483

HARM_PROBABILITY_UNSPECIFIED = 'HARM_PROBABILITY_UNSPECIFIED',

484

NEGLIGIBLE = 'NEGLIGIBLE',

485

LOW = 'LOW',

486

MEDIUM = 'MEDIUM',

487

HIGH = 'HIGH'

488

}

489

```

490

491

### AutomaticFunctionCallingConfig

492

493

Configuration for automatic function calling.

494

495

```typescript { .api }

496

interface AutomaticFunctionCallingConfig {

497

/** Disable automatic function calling */

498

disable?: boolean;

499

/** Maximum number of remote calls (default: 10) */

500

maximumRemoteCalls?: number;

501

/** Include AFC history in response */

502

shouldAppendHistory?: boolean;

503

}

504

```

505

506

### ThinkingConfig

507

508

Configuration for extended reasoning.

509

510

```typescript { .api }

511

interface ThinkingConfig {

512

/** Whether to include thinking process in response */

513

includeThoughts?: boolean;

514

}

515

```

516

517

### Enumerations

518

519

```typescript { .api }

520

enum FinishReason {

521

FINISH_REASON_UNSPECIFIED = 'FINISH_REASON_UNSPECIFIED',

522

STOP = 'STOP',

523

MAX_TOKENS = 'MAX_TOKENS',

524

SAFETY = 'SAFETY',

525

RECITATION = 'RECITATION',

526

OTHER = 'OTHER',

527

BLOCKLIST = 'BLOCKLIST',

528

PROHIBITED_CONTENT = 'PROHIBITED_CONTENT',

529

SPII = 'SPII',

530

MALFORMED_FUNCTION_CALL = 'MALFORMED_FUNCTION_CALL'

531

}

532

533

enum BlockedReason {

534

BLOCKED_REASON_UNSPECIFIED = 'BLOCKED_REASON_UNSPECIFIED',

535

SAFETY = 'SAFETY',

536

OTHER = 'OTHER',

537

BLOCKLIST = 'BLOCKLIST',

538

PROHIBITED_CONTENT = 'PROHIBITED_CONTENT'

539

}

540

541

enum Modality {

542

TEXT = 'TEXT',

543

IMAGE = 'IMAGE',

544

AUDIO = 'AUDIO'

545

}

546

```

547

548

## Advanced Examples

549

550

### Structured Output with JSON Schema

551

552

```typescript

553

// Generate structured output

554

const structuredResponse = await client.models.generateContent({

555

model: 'gemini-2.0-flash',

556

contents: 'List three popular programming languages',

557

config: {

558

responseMimeType: 'application/json',

559

responseSchema: {

560

type: Type.OBJECT,

561

properties: {

562

languages: {

563

type: Type.ARRAY,

564

items: {

565

type: Type.OBJECT,

566

properties: {

567

name: { type: Type.STRING },

568

yearCreated: { type: Type.INTEGER },

569

paradigm: { type: Type.STRING }

570

},

571

required: ['name', 'yearCreated']

572

}

573

}

574

},

575

required: ['languages']

576

}

577

}

578

});

579

580

const data = JSON.parse(structuredResponse.text || '{}');

581

console.log(data.languages);

582

```

583

584

### Multi-turn Context

585

586

```typescript

587

// Multi-turn conversation without chat session

588

const response1 = await client.models.generateContent({

589

model: 'gemini-2.0-flash',

590

contents: [

591

{ role: 'user', parts: [{ text: 'What is the capital of France?' }] }

592

]

593

});

594

595

const response2 = await client.models.generateContent({

596

model: 'gemini-2.0-flash',

597

contents: [

598

{ role: 'user', parts: [{ text: 'What is the capital of France?' }] },

599

{ role: 'model', parts: [{ text: response1.text || '' }] },

600

{ role: 'user', parts: [{ text: 'What is its population?' }] }

601

]

602

});

603

```

604

605

### Safety Settings

606

607

```typescript

608

// Custom safety settings

609

const safeResponse = await client.models.generateContent({

610

model: 'gemini-2.0-flash',

611

contents: 'Write a story about adventure',

612

config: {

613

safetySettings: [

614

{

615

category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,

616

threshold: HarmBlockThreshold.BLOCK_LOW_AND_ABOVE

617

},

618

{

619

category: HarmCategory.HARM_CATEGORY_HATE_SPEECH,

620

threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE

621

}

622

]

623

}

624

});

625

626

// Check if response was blocked

627

if (safeResponse.promptFeedback?.blockReason) {

628

console.log('Prompt was blocked:', safeResponse.promptFeedback.blockReason);

629

}

630

```

631

632

### Abort Generation

633

634

```typescript

635

// Cancel generation after timeout

636

const controller = new AbortController();

637

638

setTimeout(() => {

639

controller.abort();

640

}, 5000); // Abort after 5 seconds

641

642

try {

643

const response = await client.models.generateContent({

644

model: 'gemini-2.0-flash',

645

contents: 'Write a very long essay',

646

config: {

647

abortSignal: controller.signal

648

}

649

});

650

} catch (error) {

651

if (error.name === 'AbortError') {

652

console.log('Generation was aborted');

653

}

654

}

655

```

656

657

### Using Cached Content

658

659

```typescript

660

// Create cache first

661

const cache = await client.caches.create({

662

model: 'gemini-2.0-flash',

663

contents: [

664

{ role: 'user', parts: [{ text: 'Large context document...' }] }

665

],

666

config: {

667

ttl: '3600s'

668

}

669

});

670

671

// Use cached content in generation

672

const cachedResponse = await client.models.generateContent({

673

model: 'gemini-2.0-flash',

674

contents: 'Summarize the document',

675

config: {

676

cachedContent: cache.name

677

}

678

});

679

```

680