Google Gen AI JavaScript SDK for building applications powered by Gemini with content generation, image/video generation, function calling, caching, and real-time live sessions
The Models module provides video generation capabilities using Veo models, supporting text-to-video, image-to-video, and video-to-video generation as long-running operations.
Generate videos from text, images, or existing videos using Veo models. This is a long-running operation that requires polling for completion.
/**
* Generate videos from text/images (returns long-running operation)
* @param params - Video generation parameters
* @returns Promise resolving to video generation operation
*/
function generateVideos(
params: GenerateVideosParameters
): Promise<GenerateVideosOperation>;
interface GenerateVideosParameters {
/** Model name (e.g., 'veo-2.0-generate-001') */
model: string;
/** Text prompt for video generation */
prompt?: string;
/** Source image for image-to-video */
image?: BlobImageUnion;
/** Source video for video-to-video */
video?: BlobVideoUnion;
/** Source reference */
source?: SourceUnion;
/** Video generation configuration */
config?: GenerateVideosConfig;
}
interface GenerateVideosOperation {
/** Operation name (used for polling status) */
name?: string;
/** Whether operation is complete */
done?: boolean;
/** Response when operation completes successfully */
response?: GenerateVideosResponse;
/** Error if operation failed */
error?: Status;
/** Operation metadata */
metadata?: GenerateVideosMetadata;
}Usage Examples:
import { GoogleGenAI } from '@google/genai';
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
// Text-to-video generation
const operation = await client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'A serene lake with mountains in the background, sunrise time lapse'
});
console.log('Operation started:', operation.name);
// Poll for completion
while (!operation.done) {
await new Promise(resolve => setTimeout(resolve, 5000)); // Wait 5 seconds
const updated = await client.operations.getVideosOperation({
operation: operation.name!
});
if (updated.done) {
if (updated.response) {
console.log('Video generated successfully');
const videoUri = updated.response.generatedVideos?.[0]?.video?.uri;
console.log('Video URI:', videoUri);
} else if (updated.error) {
console.error('Video generation failed:', updated.error);
}
break;
}
}
// With configuration
const configuredOp = await client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'A robot walking through a futuristic city',
config: {
aspectRatio: '16:9',
durationSeconds: 8
}
});Poll and manage video generation operations.
/**
* Get video generation operation status
* @param params - Operation parameters
* @returns Promise resolving to operation status
*/
function getVideosOperation(
params: OperationGetParameters<GenerateVideosResponse, GenerateVideosOperation>
): Promise<GenerateVideosOperation>;
interface OperationGetParameters<T, U> {
/** Operation name from initial generateVideos call */
operation: string;
}Usage Examples:
// Start video generation
const operation = await client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'Waves crashing on a beach'
});
// Poll operation status
const pollOperation = async (opName: string) => {
let op = await client.operations.getVideosOperation({
operation: opName
});
while (!op.done) {
console.log('Operation in progress...');
if (op.metadata) {
console.log('Progress:', op.metadata);
}
await new Promise(resolve => setTimeout(resolve, 10000)); // Wait 10s
op = await client.operations.getVideosOperation({
operation: opName
});
}
return op;
};
const result = await pollOperation(operation.name!);
if (result.response?.generatedVideos) {
result.response.generatedVideos.forEach((genVideo, index) => {
console.log(`Video ${index}:`, genVideo.video?.uri);
});
}Configuration options for video generation.
interface GenerateVideosConfig {
/** Aspect ratio (e.g., '16:9', '9:16', '1:1') */
aspectRatio?: string;
/** Video duration in seconds */
durationSeconds?: number;
/** Number of videos to generate */
numberOfVideos?: number;
/** Negative prompt (things to avoid) */
negativePrompt?: string;
/** Frame rate (fps) */
frameRate?: number;
/** Guidance scale */
guidanceScale?: number;
/** Random seed for reproducibility */
seed?: number;
/** Safety filter level */
safetyFilterLevel?: SafetyFilterLevel;
/** Include RAI information */
includeRaiReason?: boolean;
/** Output video codec */
outputVideoType?: string;
}Response from completed video generation operation.
interface GenerateVideosResponse {
/** Generated videos */
generatedVideos?: GeneratedVideo[];
}
interface GeneratedVideo {
/** Video data */
video?: Video;
/** RAI (Responsible AI) information */
rai?: Rai;
}
interface Video {
/** Video URI (GCS path or download URI) */
uri?: string;
/** Processing state */
state?: FileState;
}Metadata about the video generation operation.
interface GenerateVideosMetadata {
/** Operation create time */
createTime?: string;
/** Operation start time */
startTime?: string;
/** Operation update time */
updateTime?: string;
/** Progress percentage (0-100) */
progressPercentage?: number;
}Error information if operation failed.
interface Status {
/** Error code */
code?: number;
/** Error message */
message?: string;
/** Additional error details */
details?: unknown[];
}Input media types.
/** Image blob for image-to-video */
type BlobImageUnion = Blob;
/** Video blob for video-to-video */
type BlobVideoUnion = Blob;
interface Blob {
/** MIME type (e.g., 'image/png', 'video/mp4') */
mimeType?: string;
/** Base64-encoded data */
data?: string;
}enum FileState {
STATE_UNSPECIFIED = 'STATE_UNSPECIFIED',
PROCESSING = 'PROCESSING',
ACTIVE = 'ACTIVE',
FAILED = 'FAILED'
}
enum SafetyFilterLevel {
BLOCK_LOW_AND_ABOVE = 'BLOCK_LOW_AND_ABOVE',
BLOCK_MEDIUM_AND_ABOVE = 'BLOCK_MEDIUM_AND_ABOVE',
BLOCK_ONLY_HIGH = 'BLOCK_ONLY_HIGH',
BLOCK_NONE = 'BLOCK_NONE'
}import { GoogleGenAI, GenerateVideosOperation } from '@google/genai';
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
// Helper function to poll until completion
async function pollVideoOperation(
client: GoogleGenAI,
operationName: string,
pollIntervalMs: number = 10000
): Promise<GenerateVideosOperation> {
let operation = await client.operations.getVideosOperation({
operation: operationName
});
while (!operation.done) {
console.log(`Polling operation... ${operation.metadata?.progressPercentage || 0}%`);
await new Promise(resolve => setTimeout(resolve, pollIntervalMs));
operation = await client.operations.getVideosOperation({
operation: operationName
});
}
return operation;
}
// Generate video
const operation = await client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'A time-lapse of a flower blooming in spring',
config: {
aspectRatio: '16:9',
durationSeconds: 5,
numberOfVideos: 1
}
});
console.log('Video generation started:', operation.name);
// Wait for completion
const completed = await pollVideoOperation(client, operation.name!);
if (completed.response?.generatedVideos?.[0]?.video?.uri) {
console.log('Video ready:', completed.response.generatedVideos[0].video.uri);
} else if (completed.error) {
console.error('Video generation failed:', completed.error.message);
}import { GoogleGenAI } from '@google/genai';
import * as fs from 'fs';
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
// Read image as base64
const imageData = fs.readFileSync('./start_frame.png', 'base64');
// Generate video from image
const operation = await client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'Animate this scene with gentle camera movement',
image: {
data: imageData,
mimeType: 'image/png'
},
config: {
aspectRatio: '16:9',
durationSeconds: 8,
frameRate: 24
}
});
console.log('Image-to-video generation started:', operation.name);const videoData = fs.readFileSync('./source_video.mp4', 'base64');
const operation = await client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'Transform this video into an animated cartoon style',
video: {
data: videoData,
mimeType: 'video/mp4'
},
config: {
durationSeconds: 10,
guidanceScale: 7.5
}
});
console.log('Video-to-video generation started:', operation.name);// Generate multiple variations
const operation = await client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'A cat playing with a ball of yarn',
config: {
aspectRatio: '9:16',
durationSeconds: 6,
numberOfVideos: 4,
negativePrompt: 'blurry, low quality, distorted'
}
});
// Wait for completion
const completed = await pollVideoOperation(client, operation.name!);
// Process all generated videos
completed.response?.generatedVideos?.forEach((genVideo, index) => {
if (genVideo.video?.uri) {
console.log(`Video ${index + 1}:`, genVideo.video.uri);
if (genVideo.rai) {
console.log(` RAI info:`, genVideo.rai);
}
}
});// Generate with specific seed for reproducibility
const operation1 = await client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'A sunset over the ocean',
config: {
seed: 12345,
aspectRatio: '16:9',
durationSeconds: 5
}
});
// Same seed will produce similar results
const operation2 = await client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'A sunset over the ocean',
config: {
seed: 12345,
aspectRatio: '16:9',
durationSeconds: 5
}
});import * as fs from 'fs';
import * as https from 'https';
// Wait for video generation
const completed = await pollVideoOperation(client, operation.name!);
const videoUri = completed.response?.generatedVideos?.[0]?.video?.uri;
if (videoUri) {
// Download video from URI
const downloadVideo = (uri: string, outputPath: string): Promise<void> => {
return new Promise((resolve, reject) => {
const file = fs.createWriteStream(outputPath);
https.get(uri, (response) => {
response.pipe(file);
file.on('finish', () => {
file.close();
resolve();
});
}).on('error', (err) => {
fs.unlink(outputPath, () => {});
reject(err);
});
});
};
await downloadVideo(videoUri, './generated_video.mp4');
console.log('Video downloaded to ./generated_video.mp4');
}const operation = await client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'A bustling street market with colorful stalls and people',
config: {
// Video settings
aspectRatio: '16:9',
durationSeconds: 10,
frameRate: 30,
// Quality and style
guidanceScale: 8.0,
negativePrompt: 'static, boring, low quality, artifacts',
// Safety
safetyFilterLevel: SafetyFilterLevel.BLOCK_MEDIUM_AND_ABOVE,
includeRaiReason: true,
// Generation
numberOfVideos: 2,
seed: 42,
// Output
outputVideoType: 'video/mp4'
}
});async function generateVideoWithRetry(
client: GoogleGenAI,
params: GenerateVideosParameters,
maxRetries: number = 3
): Promise<GenerateVideosOperation> {
let lastError: Error | null = null;
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
console.log(`Attempt ${attempt}/${maxRetries}`);
const operation = await client.models.generateVideos(params);
const completed = await pollVideoOperation(client, operation.name!);
if (completed.error) {
throw new Error(`Operation failed: ${completed.error.message}`);
}
if (completed.response?.generatedVideos?.[0]?.video?.uri) {
console.log('Video generation successful');
return completed;
}
throw new Error('No video generated');
} catch (error) {
lastError = error as Error;
console.error(`Attempt ${attempt} failed:`, error);
if (attempt < maxRetries) {
const backoffMs = Math.pow(2, attempt) * 1000;
console.log(`Retrying in ${backoffMs}ms...`);
await new Promise(resolve => setTimeout(resolve, backoffMs));
}
}
}
throw lastError || new Error('Max retries exceeded');
}
// Use with retry logic
try {
const result = await generateVideoWithRetry(client, {
model: 'veo-2.0-generate-001',
prompt: 'A peaceful forest scene',
config: {
aspectRatio: '16:9',
durationSeconds: 5
}
});
console.log('Final video:', result.response?.generatedVideos?.[0]?.video?.uri);
} catch (error) {
console.error('Video generation failed after retries:', error);
}// Start multiple video generations
const operations = await Promise.all([
client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'A sunrise over mountains'
}),
client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'A waterfall in a tropical forest'
}),
client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'City traffic at night'
})
]);
console.log(`Started ${operations.length} video generation operations`);
// Poll all operations
const results = await Promise.all(
operations.map(op => pollVideoOperation(client, op.name!))
);
// Process results
results.forEach((result, index) => {
if (result.response?.generatedVideos?.[0]?.video?.uri) {
console.log(`Video ${index + 1}: ${result.response.generatedVideos[0].video.uri}`);
} else {
console.log(`Video ${index + 1}: Failed`);
}
});// For video-to-video with large source files, upload first
const uploadedVideo = await client.files.upload({
file: './large_video.mp4',
mimeType: 'video/mp4'
});
// Wait for processing
while (uploadedVideo.state === FileState.PROCESSING) {
await new Promise(resolve => setTimeout(resolve, 5000));
const file = await client.files.get({ file: uploadedVideo.name! });
if (file.state === FileState.ACTIVE) {
break;
}
}
// Use uploaded video URI
const operation = await client.models.generateVideos({
model: 'veo-2.0-generate-001',
prompt: 'Add dramatic music visualization effects',
source: {
videoUri: uploadedVideo.uri
},
config: {
durationSeconds: 15
}
});Install with Tessl CLI
npx tessl i tessl/npm-google--genai