Google Gen AI JavaScript SDK for building applications powered by Gemini with content generation, image/video generation, function calling, caching, and real-time live sessions
The Models module provides capabilities for listing, retrieving, updating, and deleting models, especially useful for managing tuned models.
List available models with pagination.
/**
* List available models with pagination
* @param params - List parameters
* @returns Promise resolving to pager of models
*/
function list(
params?: ListModelsParameters
): Promise<Pager<Model>>;
interface ListModelsParameters {
/** Page size */
pageSize?: number;
/** Page token for pagination */
pageToken?: string;
}Usage Examples:
import { GoogleGenAI } from '@google/genai';
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
// List all available models
const pager = await client.models.list({
pageSize: 50
});
for await (const model of pager) {
console.log(`Model: ${model.name}`);
console.log(` Display Name: ${model.displayName}`);
console.log(` Description: ${model.description}`);
console.log(` Input Token Limit: ${model.inputTokenLimit}`);
console.log(` Output Token Limit: ${model.outputTokenLimit}`);
console.log(` Supported Methods: ${model.supportedGenerationMethods?.join(', ')}`);
console.log('');
}
// Manual pagination
const page1 = await client.models.list({ pageSize: 10 });
console.log('First page:', page1.page);
if (page1.hasNextPage()) {
const page2 = await page1.nextPage();
console.log('Second page:', page2);
}Get detailed information about a specific model.
/**
* Get model information by name
* @param params - Get parameters
* @returns Promise resolving to model
*/
function get(
params: GetModelParameters
): Promise<Model>;
interface GetModelParameters {
/** Model name (e.g., 'gemini-2.0-flash', 'models/gemini-2.0-flash', 'tunedModels/my-model-xyz') */
model: string;
}Usage Examples:
// Get model details
const model = await client.models.get({
model: 'gemini-2.0-flash'
});
console.log('Model:', model.name);
console.log('Version:', model.version);
console.log('Display Name:', model.displayName);
console.log('Description:', model.description);
console.log('Base Model ID:', model.baseModelId);
console.log('Input Token Limit:', model.inputTokenLimit);
console.log('Output Token Limit:', model.outputTokenLimit);
console.log('Default Temperature:', model.temperature);
console.log('Default Top-P:', model.topP);
console.log('Default Top-K:', model.topK);
console.log('Supported Methods:', model.supportedGenerationMethods);
// Get tuned model
const tunedModel = await client.models.get({
model: 'tunedModels/my-custom-model-abc123'
});
console.log('Tuned Model:', tunedModel.name);
console.log('Base Model:', tunedModel.baseModelId);Update tuned model metadata (display name, description).
/**
* Update tuned model metadata
* @param params - Update parameters
* @returns Promise resolving to updated model
*/
function update(
params: UpdateModelParameters
): Promise<Model>;
interface UpdateModelParameters {
/** Model name */
model: string;
/** New display name */
displayName?: string;
/** New description */
description?: string;
}Usage Examples:
// Update tuned model metadata
const updated = await client.models.update({
model: 'tunedModels/my-model-abc123',
displayName: 'My Updated Model',
description: 'Updated description with new information'
});
console.log('Updated model:', updated.name);
console.log('New display name:', updated.displayName);
console.log('New description:', updated.description);Delete a tuned model.
/**
* Delete tuned model
* @param params - Delete parameters
* @returns Promise resolving to deletion response
*/
function delete(
params: DeleteModelParameters
): Promise<DeleteModelResponse>;
interface DeleteModelParameters {
/** Model name */
model: string;
}
interface DeleteModelResponse {
/** Empty response on success */
}Usage Examples:
// Delete tuned model
await client.models.delete({
model: 'tunedModels/my-old-model-xyz'
});
console.log('Model deleted successfully');Model information and metadata.
interface Model {
/** Model name (unique identifier) */
name?: string;
/** Base model ID */
baseModelId?: string;
/** Model version */
version?: string;
/** Display name */
displayName?: string;
/** Description */
description?: string;
/** Input token limit */
inputTokenLimit?: number;
/** Output token limit */
outputTokenLimit?: number;
/** Supported generation methods */
supportedGenerationMethods?: string[];
/** Default temperature */
temperature?: number;
/** Default top-P */
topP?: number;
/** Default top-K */
topK?: number;
}import { GoogleGenAI } from '@google/genai';
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
// Get all base models
const models = await client.models.list();
const baseModels: Model[] = [];
for await (const model of models) {
// Filter for base models (not tuned models)
if (!model.name?.includes('tunedModels/')) {
baseModels.push(model);
}
}
console.log(`Found ${baseModels.length} base models\n`);
// Display by category
const textModels = baseModels.filter(m =>
m.supportedGenerationMethods?.includes('generateContent')
);
const imageModels = baseModels.filter(m =>
m.supportedGenerationMethods?.includes('generateImages')
);
const videoModels = baseModels.filter(m =>
m.supportedGenerationMethods?.includes('generateVideos')
);
const embeddingModels = baseModels.filter(m =>
m.supportedGenerationMethods?.includes('embedContent')
);
console.log('Text Generation Models:');
textModels.forEach(m => {
console.log(` - ${m.displayName || m.name}`);
console.log(` Tokens: ${m.inputTokenLimit} in / ${m.outputTokenLimit} out`);
});
console.log('\nImage Generation Models:');
imageModels.forEach(m => console.log(` - ${m.displayName || m.name}`));
console.log('\nVideo Generation Models:');
videoModels.forEach(m => console.log(` - ${m.displayName || m.name}`));
console.log('\nEmbedding Models:');
embeddingModels.forEach(m => console.log(` - ${m.displayName || m.name}`));// Get all tuned models
const models = await client.models.list();
const tunedModels: Model[] = [];
for await (const model of models) {
if (model.name?.includes('tunedModels/')) {
tunedModels.push(model);
}
}
console.log(`Found ${tunedModels.length} tuned models\n`);
tunedModels.forEach(model => {
console.log(`Tuned Model: ${model.displayName || model.name}`);
console.log(` Name: ${model.name}`);
console.log(` Base Model: ${model.baseModelId}`);
console.log(` Description: ${model.description}`);
console.log('');
});// Compare different models
const modelNames = [
'gemini-2.0-flash',
'gemini-1.5-pro',
'gemini-1.5-flash'
];
const modelDetails = await Promise.all(
modelNames.map(name =>
client.models.get({ model: name })
)
);
console.log('Model Comparison:\n');
// Create comparison table
const headers = ['Model', 'Input Tokens', 'Output Tokens', 'Temp', 'Top-P', 'Top-K'];
console.log(headers.join(' | '));
console.log('-'.repeat(80));
modelDetails.forEach(model => {
const row = [
model.displayName || model.name,
model.inputTokenLimit?.toString() || 'N/A',
model.outputTokenLimit?.toString() || 'N/A',
model.temperature?.toString() || 'N/A',
model.topP?.toString() || 'N/A',
model.topK?.toString() || 'N/A'
];
console.log(row.join(' | '));
});interface ModelRequirements {
minInputTokens?: number;
minOutputTokens?: number;
supportedMethods?: string[];
}
async function findModels(requirements: ModelRequirements): Promise<Model[]> {
const allModels = await client.models.list();
const matching: Model[] = [];
for await (const model of allModels) {
let matches = true;
// Check input token requirement
if (requirements.minInputTokens &&
(!model.inputTokenLimit || model.inputTokenLimit < requirements.minInputTokens)) {
matches = false;
}
// Check output token requirement
if (requirements.minOutputTokens &&
(!model.outputTokenLimit || model.outputTokenLimit < requirements.minOutputTokens)) {
matches = false;
}
// Check supported methods
if (requirements.supportedMethods) {
const hasAllMethods = requirements.supportedMethods.every(method =>
model.supportedGenerationMethods?.includes(method)
);
if (!hasAllMethods) {
matches = false;
}
}
if (matches) {
matching.push(model);
}
}
return matching;
}
// Find models with specific requirements
const suitableModels = await findModels({
minInputTokens: 100000,
minOutputTokens: 8000,
supportedMethods: ['generateContent']
});
console.log('Models meeting requirements:');
suitableModels.forEach(m => {
console.log(` - ${m.displayName || m.name}`);
console.log(` Tokens: ${m.inputTokenLimit} in / ${m.outputTokenLimit} out`);
});// List all tuned models and manage them
const models = await client.models.list();
const tunedModels: Model[] = [];
for await (const model of models) {
if (model.name?.includes('tunedModels/')) {
tunedModels.push(model);
}
}
console.log(`Managing ${tunedModels.length} tuned models\n`);
for (const model of tunedModels) {
console.log(`Model: ${model.name}`);
console.log(` Display Name: ${model.displayName}`);
console.log(` Base Model: ${model.baseModelId}`);
// Update if needed
if (!model.description) {
console.log(' Updating with description...');
const updated = await client.models.update({
model: model.name!,
description: `Tuned from ${model.baseModelId}`
});
console.log(' Description added');
}
// Delete old models (example criteria)
if (model.displayName?.includes('test-')) {
console.log(' Deleting test model...');
await client.models.delete({ model: model.name! });
console.log(' Deleted');
}
console.log('');
}async function displayModelInfo(modelName: string) {
const model = await client.models.get({ model: modelName });
console.log('═'.repeat(80));
console.log(`Model: ${model.displayName || model.name}`);
console.log('═'.repeat(80));
console.log('\nIdentification:');
console.log(` Name: ${model.name}`);
console.log(` Version: ${model.version || 'N/A'}`);
console.log(` Base Model: ${model.baseModelId || 'N/A'}`);
console.log('\nCapabilities:');
console.log(` Supported Methods: ${model.supportedGenerationMethods?.join(', ') || 'N/A'}`);
console.log('\nLimits:');
console.log(` Input Tokens: ${model.inputTokenLimit || 'N/A'}`);
console.log(` Output Tokens: ${model.outputTokenLimit || 'N/A'}`);
console.log('\nDefault Parameters:');
console.log(` Temperature: ${model.temperature ?? 'N/A'}`);
console.log(` Top-P: ${model.topP ?? 'N/A'}`);
console.log(` Top-K: ${model.topK ?? 'N/A'}`);
if (model.description) {
console.log('\nDescription:');
console.log(` ${model.description}`);
}
console.log('\n' + '═'.repeat(80));
}
// Display info for specific model
await displayModelInfo('gemini-2.0-flash');interface TaskRequirements {
taskType: 'text' | 'image' | 'video' | 'embedding';
contextSize?: number;
outputSize?: number;
preferSpeed?: boolean;
}
async function findBestModel(requirements: TaskRequirements): Promise<Model | null> {
const allModels = await client.models.list();
const candidates: Model[] = [];
// Method mapping
const methodMap = {
text: 'generateContent',
image: 'generateImages',
video: 'generateVideos',
embedding: 'embedContent'
};
const requiredMethod = methodMap[requirements.taskType];
for await (const model of allModels) {
// Must support required method
if (!model.supportedGenerationMethods?.includes(requiredMethod)) {
continue;
}
// Check context size
if (requirements.contextSize &&
(!model.inputTokenLimit || model.inputTokenLimit < requirements.contextSize)) {
continue;
}
// Check output size
if (requirements.outputSize &&
(!model.outputTokenLimit || model.outputTokenLimit < requirements.outputSize)) {
continue;
}
candidates.push(model);
}
if (candidates.length === 0) {
return null;
}
// Sort by preference
if (requirements.preferSpeed) {
// Prefer flash models for speed
candidates.sort((a, b) => {
const aIsFlash = a.name?.includes('flash') ? 1 : 0;
const bIsFlash = b.name?.includes('flash') ? 1 : 0;
return bIsFlash - aIsFlash;
});
} else {
// Prefer pro models for quality
candidates.sort((a, b) => {
const aIsPro = a.name?.includes('pro') ? 1 : 0;
const bIsPro = b.name?.includes('pro') ? 1 : 0;
return bIsPro - aIsPro;
});
}
return candidates[0];
}
// Find best model for task
const bestModel = await findBestModel({
taskType: 'text',
contextSize: 50000,
outputSize: 8000,
preferSpeed: true
});
if (bestModel) {
console.log('Recommended model:', bestModel.displayName || bestModel.name);
console.log('Reasoning: Meets requirements and optimized for speed');
} else {
console.log('No suitable model found');
}// Export all model information to JSON
const allModels = await client.models.list();
const modelCatalog: Model[] = [];
for await (const model of allModels) {
modelCatalog.push(model);
}
const catalog = {
exportDate: new Date().toISOString(),
modelCount: modelCatalog.length,
models: modelCatalog
};
// Save to file (Node.js)
require('fs').writeFileSync(
'./model-catalog.json',
JSON.stringify(catalog, null, 2)
);
console.log(`Exported ${modelCatalog.length} models to model-catalog.json`);async function validateModel(modelName: string, requirements: {
minInputTokens: number;
minOutputTokens: number;
method: string;
}): Promise<boolean> {
try {
const model = await client.models.get({ model: modelName });
// Check method support
if (!model.supportedGenerationMethods?.includes(requirements.method)) {
console.error(`Model ${modelName} does not support ${requirements.method}`);
return false;
}
// Check input tokens
if (model.inputTokenLimit && model.inputTokenLimit < requirements.minInputTokens) {
console.error(`Model ${modelName} input limit (${model.inputTokenLimit}) is below required (${requirements.minInputTokens})`);
return false;
}
// Check output tokens
if (model.outputTokenLimit && model.outputTokenLimit < requirements.minOutputTokens) {
console.error(`Model ${modelName} output limit (${model.outputTokenLimit}) is below required (${requirements.minOutputTokens})`);
return false;
}
console.log(`Model ${modelName} validated successfully`);
return true;
} catch (error) {
console.error(`Model ${modelName} not found or error:`, error);
return false;
}
}
// Validate before using
const isValid = await validateModel('gemini-2.0-flash', {
minInputTokens: 10000,
minOutputTokens: 2000,
method: 'generateContent'
});
if (isValid) {
// Proceed with generation
const response = await client.models.generateContent({
model: 'gemini-2.0-flash',
contents: 'Your prompt here'
});
}Install with Tessl CLI
npx tessl i tessl/npm-google--genai