or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

auth-tokens.mdbatch.mdcaching.mdchat.mdclient.mdcontent-generation.mdembeddings.mdfile-search-stores.mdfiles.mdfunction-calling.mdimage-generation.mdindex.mdlive.mdmcp.mdmodels.mdoperations.mdtuning.mdvideo-generation.md

models.mddocs/

0

# Model Management

1

2

The Models module provides capabilities for listing, retrieving, updating, and deleting models, especially useful for managing tuned models.

3

4

## Capabilities

5

6

### list

7

8

List available models with pagination.

9

10

```typescript { .api }

11

/**

12

* List available models with pagination

13

* @param params - List parameters

14

* @returns Promise resolving to pager of models

15

*/

16

function list(

17

params?: ListModelsParameters

18

): Promise<Pager<Model>>;

19

20

interface ListModelsParameters {

21

/** Page size */

22

pageSize?: number;

23

/** Page token for pagination */

24

pageToken?: string;

25

}

26

```

27

28

**Usage Examples:**

29

30

```typescript

31

import { GoogleGenAI } from '@google/genai';

32

33

const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });

34

35

// List all available models

36

const pager = await client.models.list({

37

pageSize: 50

38

});

39

40

for await (const model of pager) {

41

console.log(`Model: ${model.name}`);

42

console.log(` Display Name: ${model.displayName}`);

43

console.log(` Description: ${model.description}`);

44

console.log(` Input Token Limit: ${model.inputTokenLimit}`);

45

console.log(` Output Token Limit: ${model.outputTokenLimit}`);

46

console.log(` Supported Methods: ${model.supportedGenerationMethods?.join(', ')}`);

47

console.log('');

48

}

49

50

// Manual pagination

51

const page1 = await client.models.list({ pageSize: 10 });

52

console.log('First page:', page1.page);

53

54

if (page1.hasNextPage()) {

55

const page2 = await page1.nextPage();

56

console.log('Second page:', page2);

57

}

58

```

59

60

### get

61

62

Get detailed information about a specific model.

63

64

```typescript { .api }

65

/**

66

* Get model information by name

67

* @param params - Get parameters

68

* @returns Promise resolving to model

69

*/

70

function get(

71

params: GetModelParameters

72

): Promise<Model>;

73

74

interface GetModelParameters {

75

/** Model name (e.g., 'gemini-2.0-flash', 'models/gemini-2.0-flash', 'tunedModels/my-model-xyz') */

76

model: string;

77

}

78

```

79

80

**Usage Examples:**

81

82

```typescript

83

// Get model details

84

const model = await client.models.get({

85

model: 'gemini-2.0-flash'

86

});

87

88

console.log('Model:', model.name);

89

console.log('Version:', model.version);

90

console.log('Display Name:', model.displayName);

91

console.log('Description:', model.description);

92

console.log('Base Model ID:', model.baseModelId);

93

console.log('Input Token Limit:', model.inputTokenLimit);

94

console.log('Output Token Limit:', model.outputTokenLimit);

95

console.log('Default Temperature:', model.temperature);

96

console.log('Default Top-P:', model.topP);

97

console.log('Default Top-K:', model.topK);

98

console.log('Supported Methods:', model.supportedGenerationMethods);

99

100

// Get tuned model

101

const tunedModel = await client.models.get({

102

model: 'tunedModels/my-custom-model-abc123'

103

});

104

105

console.log('Tuned Model:', tunedModel.name);

106

console.log('Base Model:', tunedModel.baseModelId);

107

```

108

109

### update

110

111

Update tuned model metadata (display name, description).

112

113

```typescript { .api }

114

/**

115

* Update tuned model metadata

116

* @param params - Update parameters

117

* @returns Promise resolving to updated model

118

*/

119

function update(

120

params: UpdateModelParameters

121

): Promise<Model>;

122

123

interface UpdateModelParameters {

124

/** Model name */

125

model: string;

126

/** New display name */

127

displayName?: string;

128

/** New description */

129

description?: string;

130

}

131

```

132

133

**Usage Examples:**

134

135

```typescript

136

// Update tuned model metadata

137

const updated = await client.models.update({

138

model: 'tunedModels/my-model-abc123',

139

displayName: 'My Updated Model',

140

description: 'Updated description with new information'

141

});

142

143

console.log('Updated model:', updated.name);

144

console.log('New display name:', updated.displayName);

145

console.log('New description:', updated.description);

146

```

147

148

### delete

149

150

Delete a tuned model.

151

152

```typescript { .api }

153

/**

154

* Delete tuned model

155

* @param params - Delete parameters

156

* @returns Promise resolving to deletion response

157

*/

158

function delete(

159

params: DeleteModelParameters

160

): Promise<DeleteModelResponse>;

161

162

interface DeleteModelParameters {

163

/** Model name */

164

model: string;

165

}

166

167

interface DeleteModelResponse {

168

/** Empty response on success */

169

}

170

```

171

172

**Usage Examples:**

173

174

```typescript

175

// Delete tuned model

176

await client.models.delete({

177

model: 'tunedModels/my-old-model-xyz'

178

});

179

180

console.log('Model deleted successfully');

181

```

182

183

## Types

184

185

### Model

186

187

Model information and metadata.

188

189

```typescript { .api }

190

interface Model {

191

/** Model name (unique identifier) */

192

name?: string;

193

/** Base model ID */

194

baseModelId?: string;

195

/** Model version */

196

version?: string;

197

/** Display name */

198

displayName?: string;

199

/** Description */

200

description?: string;

201

/** Input token limit */

202

inputTokenLimit?: number;

203

/** Output token limit */

204

outputTokenLimit?: number;

205

/** Supported generation methods */

206

supportedGenerationMethods?: string[];

207

/** Default temperature */

208

temperature?: number;

209

/** Default top-P */

210

topP?: number;

211

/** Default top-K */

212

topK?: number;

213

}

214

```

215

216

## Complete Examples

217

218

### List All Base Models

219

220

```typescript

221

import { GoogleGenAI } from '@google/genai';

222

223

const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });

224

225

// Get all base models

226

const models = await client.models.list();

227

228

const baseModels: Model[] = [];

229

230

for await (const model of models) {

231

// Filter for base models (not tuned models)

232

if (!model.name?.includes('tunedModels/')) {

233

baseModels.push(model);

234

}

235

}

236

237

console.log(`Found ${baseModels.length} base models\n`);

238

239

// Display by category

240

const textModels = baseModels.filter(m =>

241

m.supportedGenerationMethods?.includes('generateContent')

242

);

243

244

const imageModels = baseModels.filter(m =>

245

m.supportedGenerationMethods?.includes('generateImages')

246

);

247

248

const videoModels = baseModels.filter(m =>

249

m.supportedGenerationMethods?.includes('generateVideos')

250

);

251

252

const embeddingModels = baseModels.filter(m =>

253

m.supportedGenerationMethods?.includes('embedContent')

254

);

255

256

console.log('Text Generation Models:');

257

textModels.forEach(m => {

258

console.log(` - ${m.displayName || m.name}`);

259

console.log(` Tokens: ${m.inputTokenLimit} in / ${m.outputTokenLimit} out`);

260

});

261

262

console.log('\nImage Generation Models:');

263

imageModels.forEach(m => console.log(` - ${m.displayName || m.name}`));

264

265

console.log('\nVideo Generation Models:');

266

videoModels.forEach(m => console.log(` - ${m.displayName || m.name}`));

267

268

console.log('\nEmbedding Models:');

269

embeddingModels.forEach(m => console.log(` - ${m.displayName || m.name}`));

270

```

271

272

### List Tuned Models

273

274

```typescript

275

// Get all tuned models

276

const models = await client.models.list();

277

278

const tunedModels: Model[] = [];

279

280

for await (const model of models) {

281

if (model.name?.includes('tunedModels/')) {

282

tunedModels.push(model);

283

}

284

}

285

286

console.log(`Found ${tunedModels.length} tuned models\n`);

287

288

tunedModels.forEach(model => {

289

console.log(`Tuned Model: ${model.displayName || model.name}`);

290

console.log(` Name: ${model.name}`);

291

console.log(` Base Model: ${model.baseModelId}`);

292

console.log(` Description: ${model.description}`);

293

console.log('');

294

});

295

```

296

297

### Compare Model Capabilities

298

299

```typescript

300

// Compare different models

301

const modelNames = [

302

'gemini-2.0-flash',

303

'gemini-1.5-pro',

304

'gemini-1.5-flash'

305

];

306

307

const modelDetails = await Promise.all(

308

modelNames.map(name =>

309

client.models.get({ model: name })

310

)

311

);

312

313

console.log('Model Comparison:\n');

314

315

// Create comparison table

316

const headers = ['Model', 'Input Tokens', 'Output Tokens', 'Temp', 'Top-P', 'Top-K'];

317

console.log(headers.join(' | '));

318

console.log('-'.repeat(80));

319

320

modelDetails.forEach(model => {

321

const row = [

322

model.displayName || model.name,

323

model.inputTokenLimit?.toString() || 'N/A',

324

model.outputTokenLimit?.toString() || 'N/A',

325

model.temperature?.toString() || 'N/A',

326

model.topP?.toString() || 'N/A',

327

model.topK?.toString() || 'N/A'

328

];

329

console.log(row.join(' | '));

330

});

331

```

332

333

### Model Selection Helper

334

335

```typescript

336

interface ModelRequirements {

337

minInputTokens?: number;

338

minOutputTokens?: number;

339

supportedMethods?: string[];

340

}

341

342

async function findModels(requirements: ModelRequirements): Promise<Model[]> {

343

const allModels = await client.models.list();

344

const matching: Model[] = [];

345

346

for await (const model of allModels) {

347

let matches = true;

348

349

// Check input token requirement

350

if (requirements.minInputTokens &&

351

(!model.inputTokenLimit || model.inputTokenLimit < requirements.minInputTokens)) {

352

matches = false;

353

}

354

355

// Check output token requirement

356

if (requirements.minOutputTokens &&

357

(!model.outputTokenLimit || model.outputTokenLimit < requirements.minOutputTokens)) {

358

matches = false;

359

}

360

361

// Check supported methods

362

if (requirements.supportedMethods) {

363

const hasAllMethods = requirements.supportedMethods.every(method =>

364

model.supportedGenerationMethods?.includes(method)

365

);

366

if (!hasAllMethods) {

367

matches = false;

368

}

369

}

370

371

if (matches) {

372

matching.push(model);

373

}

374

}

375

376

return matching;

377

}

378

379

// Find models with specific requirements

380

const suitableModels = await findModels({

381

minInputTokens: 100000,

382

minOutputTokens: 8000,

383

supportedMethods: ['generateContent']

384

});

385

386

console.log('Models meeting requirements:');

387

suitableModels.forEach(m => {

388

console.log(` - ${m.displayName || m.name}`);

389

console.log(` Tokens: ${m.inputTokenLimit} in / ${m.outputTokenLimit} out`);

390

});

391

```

392

393

### Manage Tuned Models

394

395

```typescript

396

// List all tuned models and manage them

397

const models = await client.models.list();

398

399

const tunedModels: Model[] = [];

400

for await (const model of models) {

401

if (model.name?.includes('tunedModels/')) {

402

tunedModels.push(model);

403

}

404

}

405

406

console.log(`Managing ${tunedModels.length} tuned models\n`);

407

408

for (const model of tunedModels) {

409

console.log(`Model: ${model.name}`);

410

console.log(` Display Name: ${model.displayName}`);

411

console.log(` Base Model: ${model.baseModelId}`);

412

413

// Update if needed

414

if (!model.description) {

415

console.log(' Updating with description...');

416

const updated = await client.models.update({

417

model: model.name!,

418

description: `Tuned from ${model.baseModelId}`

419

});

420

console.log(' Description added');

421

}

422

423

// Delete old models (example criteria)

424

if (model.displayName?.includes('test-')) {

425

console.log(' Deleting test model...');

426

await client.models.delete({ model: model.name! });

427

console.log(' Deleted');

428

}

429

430

console.log('');

431

}

432

```

433

434

### Model Information Display

435

436

```typescript

437

async function displayModelInfo(modelName: string) {

438

const model = await client.models.get({ model: modelName });

439

440

console.log('═'.repeat(80));

441

console.log(`Model: ${model.displayName || model.name}`);

442

console.log('═'.repeat(80));

443

444

console.log('\nIdentification:');

445

console.log(` Name: ${model.name}`);

446

console.log(` Version: ${model.version || 'N/A'}`);

447

console.log(` Base Model: ${model.baseModelId || 'N/A'}`);

448

449

console.log('\nCapabilities:');

450

console.log(` Supported Methods: ${model.supportedGenerationMethods?.join(', ') || 'N/A'}`);

451

452

console.log('\nLimits:');

453

console.log(` Input Tokens: ${model.inputTokenLimit || 'N/A'}`);

454

console.log(` Output Tokens: ${model.outputTokenLimit || 'N/A'}`);

455

456

console.log('\nDefault Parameters:');

457

console.log(` Temperature: ${model.temperature ?? 'N/A'}`);

458

console.log(` Top-P: ${model.topP ?? 'N/A'}`);

459

console.log(` Top-K: ${model.topK ?? 'N/A'}`);

460

461

if (model.description) {

462

console.log('\nDescription:');

463

console.log(` ${model.description}`);

464

}

465

466

console.log('\n' + '═'.repeat(80));

467

}

468

469

// Display info for specific model

470

await displayModelInfo('gemini-2.0-flash');

471

```

472

473

### Find Best Model for Task

474

475

```typescript

476

interface TaskRequirements {

477

taskType: 'text' | 'image' | 'video' | 'embedding';

478

contextSize?: number;

479

outputSize?: number;

480

preferSpeed?: boolean;

481

}

482

483

async function findBestModel(requirements: TaskRequirements): Promise<Model | null> {

484

const allModels = await client.models.list();

485

const candidates: Model[] = [];

486

487

// Method mapping

488

const methodMap = {

489

text: 'generateContent',

490

image: 'generateImages',

491

video: 'generateVideos',

492

embedding: 'embedContent'

493

};

494

495

const requiredMethod = methodMap[requirements.taskType];

496

497

for await (const model of allModels) {

498

// Must support required method

499

if (!model.supportedGenerationMethods?.includes(requiredMethod)) {

500

continue;

501

}

502

503

// Check context size

504

if (requirements.contextSize &&

505

(!model.inputTokenLimit || model.inputTokenLimit < requirements.contextSize)) {

506

continue;

507

}

508

509

// Check output size

510

if (requirements.outputSize &&

511

(!model.outputTokenLimit || model.outputTokenLimit < requirements.outputSize)) {

512

continue;

513

}

514

515

candidates.push(model);

516

}

517

518

if (candidates.length === 0) {

519

return null;

520

}

521

522

// Sort by preference

523

if (requirements.preferSpeed) {

524

// Prefer flash models for speed

525

candidates.sort((a, b) => {

526

const aIsFlash = a.name?.includes('flash') ? 1 : 0;

527

const bIsFlash = b.name?.includes('flash') ? 1 : 0;

528

return bIsFlash - aIsFlash;

529

});

530

} else {

531

// Prefer pro models for quality

532

candidates.sort((a, b) => {

533

const aIsPro = a.name?.includes('pro') ? 1 : 0;

534

const bIsPro = b.name?.includes('pro') ? 1 : 0;

535

return bIsPro - aIsPro;

536

});

537

}

538

539

return candidates[0];

540

}

541

542

// Find best model for task

543

const bestModel = await findBestModel({

544

taskType: 'text',

545

contextSize: 50000,

546

outputSize: 8000,

547

preferSpeed: true

548

});

549

550

if (bestModel) {

551

console.log('Recommended model:', bestModel.displayName || bestModel.name);

552

console.log('Reasoning: Meets requirements and optimized for speed');

553

} else {

554

console.log('No suitable model found');

555

}

556

```

557

558

### Export Model Catalog

559

560

```typescript

561

// Export all model information to JSON

562

const allModels = await client.models.list();

563

const modelCatalog: Model[] = [];

564

565

for await (const model of allModels) {

566

modelCatalog.push(model);

567

}

568

569

const catalog = {

570

exportDate: new Date().toISOString(),

571

modelCount: modelCatalog.length,

572

models: modelCatalog

573

};

574

575

// Save to file (Node.js)

576

require('fs').writeFileSync(

577

'./model-catalog.json',

578

JSON.stringify(catalog, null, 2)

579

);

580

581

console.log(`Exported ${modelCatalog.length} models to model-catalog.json`);

582

```

583

584

### Validate Model Before Use

585

586

```typescript

587

async function validateModel(modelName: string, requirements: {

588

minInputTokens: number;

589

minOutputTokens: number;

590

method: string;

591

}): Promise<boolean> {

592

try {

593

const model = await client.models.get({ model: modelName });

594

595

// Check method support

596

if (!model.supportedGenerationMethods?.includes(requirements.method)) {

597

console.error(`Model ${modelName} does not support ${requirements.method}`);

598

return false;

599

}

600

601

// Check input tokens

602

if (model.inputTokenLimit && model.inputTokenLimit < requirements.minInputTokens) {

603

console.error(`Model ${modelName} input limit (${model.inputTokenLimit}) is below required (${requirements.minInputTokens})`);

604

return false;

605

}

606

607

// Check output tokens

608

if (model.outputTokenLimit && model.outputTokenLimit < requirements.minOutputTokens) {

609

console.error(`Model ${modelName} output limit (${model.outputTokenLimit}) is below required (${requirements.minOutputTokens})`);

610

return false;

611

}

612

613

console.log(`Model ${modelName} validated successfully`);

614

return true;

615

} catch (error) {

616

console.error(`Model ${modelName} not found or error:`, error);

617

return false;

618

}

619

}

620

621

// Validate before using

622

const isValid = await validateModel('gemini-2.0-flash', {

623

minInputTokens: 10000,

624

minOutputTokens: 2000,

625

method: 'generateContent'

626

});

627

628

if (isValid) {

629

// Proceed with generation

630

const response = await client.models.generateContent({

631

model: 'gemini-2.0-flash',

632

contents: 'Your prompt here'

633

});

634

}

635

```

636