Google Gen AI JavaScript SDK for building applications powered by Gemini with content generation, image/video generation, function calling, caching, and real-time live sessions
The Batches module provides batch processing capabilities for processing multiple requests efficiently using GCS or BigQuery integration, supporting both content generation and embeddings.
Create a batch generation job.
/**
* Create batch generation job
* @param params - Batch job creation parameters
* @returns Promise resolving to batch job
*/
function create(
params: CreateBatchJobParameters
): Promise<BatchJob>;
interface CreateBatchJobParameters {
/** Model name */
model: string;
/** Source (GCS URI, BigQuery URI, or inline requests) */
src: BatchJobSourceUnion;
/** Batch configuration */
config?: BatchJobConfig;
}
interface BatchJob {
/** Job name (unique identifier) */
name?: string;
/** Display name */
displayName?: string;
/** Model name */
model?: string;
/** Job state */
state?: JobState;
/** Creation timestamp */
createTime?: string;
/** Start timestamp */
startTime?: string;
/** End timestamp */
endTime?: string;
/** Source configuration */
src?: BatchJobSource;
/** Job configuration */
config?: BatchJobConfig;
}Usage Examples:
import { GoogleGenAI } from '@google/genai';
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
// Create batch job with GCS source
const batchJob = await client.batches.create({
model: 'gemini-2.0-flash',
src: 'gs://my-bucket/batch-requests.jsonl',
config: {
destination: 'gs://my-bucket/batch-results/'
}
});
console.log('Batch job created:', batchJob.name);
console.log('State:', batchJob.state);
// With inline requests
const inlineBatch = await client.batches.create({
model: 'gemini-2.0-flash',
src: [
{
contents: [{ role: 'user', parts: [{ text: 'Question 1' }] }]
},
{
contents: [{ role: 'user', parts: [{ text: 'Question 2' }] }]
},
{
contents: [{ role: 'user', parts: [{ text: 'Question 3' }] }]
}
],
config: {
destination: 'gs://my-bucket/results/'
}
});Create embeddings batch job (experimental, Gemini API only).
/**
* Create embeddings batch job
* @param params - Embeddings batch job creation parameters
* @returns Promise resolving to batch job
*/
function createEmbeddings(
params: CreateEmbeddingsBatchJobParameters
): Promise<BatchJob>;
interface CreateEmbeddingsBatchJobParameters {
/** Model name (e.g., 'text-embedding-004') */
model: string;
/** Source data */
src: BatchJobSourceUnion;
/** Batch configuration */
config?: BatchJobConfig;
}Usage Examples:
// Create embeddings batch job
const embeddingsBatch = await client.batches.createEmbeddings({
model: 'text-embedding-004',
src: 'gs://my-bucket/texts.jsonl',
config: {
destination: 'gs://my-bucket/embeddings/'
}
});
console.log('Embeddings batch job:', embeddingsBatch.name);List batch jobs with pagination.
/**
* List batch jobs with pagination
* @param params - List parameters
* @returns Promise resolving to pager of batch jobs
*/
function list(
params?: ListBatchJobsParameters
): Promise<Pager<BatchJob>>;
interface ListBatchJobsParameters {
/** Page size */
pageSize?: number;
/** Page token for pagination */
pageToken?: string;
}Usage Examples:
// List all batch jobs
const pager = await client.batches.list({
pageSize: 10
});
for await (const job of pager) {
console.log(`Job: ${job.name}`);
console.log(` State: ${job.state}`);
console.log(` Model: ${job.model}`);
console.log(` Created: ${job.createTime}`);
}
// Manual pagination
const page1 = await client.batches.list({ pageSize: 5 });
console.log('First page:', page1.page);
if (page1.hasNextPage()) {
const page2 = await page1.nextPage();
console.log('Second page:', page2);
}Get batch job status and details.
/**
* Get batch job status
* @param params - Get parameters
* @returns Promise resolving to batch job
*/
function get(
params: GetBatchJobParameters
): Promise<BatchJob>;
interface GetBatchJobParameters {
/** Batch job name */
batchJob: string;
}Usage Examples:
// Get job status
const job = await client.batches.get({
batchJob: 'projects/123/locations/us-central1/batchJobs/abc'
});
console.log('Job state:', job.state);
console.log('Progress:', job);
// Poll until complete
while (job.state === JobState.JOB_STATE_RUNNING) {
await new Promise(resolve => setTimeout(resolve, 10000));
const updated = await client.batches.get({
batchJob: job.name!
});
if (updated.state === JobState.JOB_STATE_SUCCEEDED) {
console.log('Job completed successfully');
break;
} else if (updated.state === JobState.JOB_STATE_FAILED) {
console.error('Job failed');
break;
}
}Cancel a running batch job.
/**
* Cancel running batch job
* @param params - Cancel parameters
* @returns Promise resolving when cancellation is initiated
*/
function cancel(
params: CancelBatchJobParameters
): Promise<void>;
interface CancelBatchJobParameters {
/** Batch job name */
batchJob: string;
}Usage Examples:
// Cancel job
await client.batches.cancel({
batchJob: 'projects/123/locations/us-central1/batchJobs/abc'
});
console.log('Cancellation requested');
// Verify cancellation
const cancelled = await client.batches.get({
batchJob: 'projects/123/locations/us-central1/batchJobs/abc'
});
console.log('Job state:', cancelled.state);Delete a batch job.
/**
* Delete batch job
* @param params - Delete parameters
* @returns Promise resolving to delete response
*/
function delete(
params: DeleteBatchJobParameters
): Promise<DeleteResourceJob>;
interface DeleteBatchJobParameters {
/** Batch job name */
batchJob: string;
}
interface DeleteResourceJob {
/** Empty response on success */
}Usage Examples:
// Delete completed job
await client.batches.delete({
batchJob: 'projects/123/locations/us-central1/batchJobs/abc'
});
console.log('Batch job deleted');Configuration for batch jobs.
interface BatchJobConfig {
/** Destination (GCS URI or BigQuery URI) */
destination?: BatchJobDestinationUnion;
/** Display name */
displayName?: string;
/** Generation configuration */
generationConfig?: GenerateContentConfig;
}Source configuration for batch jobs.
interface BatchJobSource {
/** GCS URI (e.g., 'gs://bucket/file.jsonl') */
gcsUri?: string;
/** BigQuery URI (e.g., 'bq://project.dataset.table') */
bigqueryUri?: string;
}
/** GCS URI, BigQuery URI, or inline requests array */
type BatchJobSourceUnion = BatchJobSource | InlinedRequest[] | string;Destination configuration for batch results.
interface BatchJobDestination {
/** GCS URI (e.g., 'gs://bucket/results/') */
gcsUri?: string;
/** BigQuery URI (e.g., 'bq://project.dataset.table') */
bigqueryUri?: string;
}
/** GCS URI, BigQuery URI string, or destination object */
type BatchJobDestinationUnion = BatchJobDestination | string;Single request in inline batch.
interface InlinedRequest {
/** Request contents */
contents: ContentListUnion;
/** Request-specific config */
config?: GenerateContentConfig;
}Batch job states.
enum JobState {
JOB_STATE_UNSPECIFIED = 'JOB_STATE_UNSPECIFIED',
JOB_STATE_QUEUED = 'JOB_STATE_QUEUED',
JOB_STATE_PENDING = 'JOB_STATE_PENDING',
JOB_STATE_RUNNING = 'JOB_STATE_RUNNING',
JOB_STATE_SUCCEEDED = 'JOB_STATE_SUCCEEDED',
JOB_STATE_FAILED = 'JOB_STATE_FAILED',
JOB_STATE_CANCELLING = 'JOB_STATE_CANCELLING',
JOB_STATE_CANCELLED = 'JOB_STATE_CANCELLED',
JOB_STATE_PAUSED = 'JOB_STATE_PAUSED',
JOB_STATE_EXPIRED = 'JOB_STATE_EXPIRED',
JOB_STATE_UPDATING = 'JOB_STATE_UPDATING',
JOB_STATE_PARTIALLY_SUCCEEDED = 'JOB_STATE_PARTIALLY_SUCCEEDED'
}import { GoogleGenAI } from '@google/genai';
import * as fs from 'fs';
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
// Prepare JSONL file with requests
const requests = [
{ contents: [{ role: 'user', parts: [{ text: 'Translate to French: Hello' }] }] },
{ contents: [{ role: 'user', parts: [{ text: 'Translate to French: Goodbye' }] }] },
{ contents: [{ role: 'user', parts: [{ text: 'Translate to French: Thank you' }] }] }
];
const jsonl = requests.map(r => JSON.stringify(r)).join('\n');
fs.writeFileSync('./batch-requests.jsonl', jsonl);
// Upload to GCS (using Google Cloud Storage client)
// await uploadToGCS('./batch-requests.jsonl', 'gs://my-bucket/batch-requests.jsonl');
// Create batch job
const job = await client.batches.create({
model: 'gemini-2.0-flash',
src: 'gs://my-bucket/batch-requests.jsonl',
config: {
destination: 'gs://my-bucket/batch-results/',
displayName: 'Translation Batch'
}
});
console.log('Batch job created:', job.name);
// Poll for completion
const pollBatchJob = async (jobName: string) => {
let currentJob = await client.batches.get({ batchJob: jobName });
while (
currentJob.state === JobState.JOB_STATE_RUNNING ||
currentJob.state === JobState.JOB_STATE_PENDING ||
currentJob.state === JobState.JOB_STATE_QUEUED
) {
console.log('Job state:', currentJob.state);
await new Promise(resolve => setTimeout(resolve, 30000)); // Wait 30s
currentJob = await client.batches.get({ batchJob: jobName });
}
return currentJob;
};
const completed = await pollBatchJob(job.name!);
if (completed.state === JobState.JOB_STATE_SUCCEEDED) {
console.log('Batch job completed successfully');
console.log('Results at:', completed.config?.destination);
} else {
console.error('Batch job failed:', completed.state);
}// Create batch with inline requests
const requests = [];
for (let i = 1; i <= 100; i++) {
requests.push({
contents: [{ role: 'user', parts: [{ text: `Summarize topic ${i}` }] }]
});
}
const batchJob = await client.batches.create({
model: 'gemini-2.0-flash',
src: requests,
config: {
destination: 'gs://my-bucket/summaries/',
displayName: 'Topic Summaries Batch',
generationConfig: {
temperature: 0.7,
maxOutputTokens: 512
}
}
});
console.log('Processing', requests.length, 'requests');// Batch job with BigQuery source and destination
const batchJob = await client.batches.create({
model: 'gemini-2.0-flash',
src: 'bq://my-project.my-dataset.input_table',
config: {
destination: 'bq://my-project.my-dataset.output_table',
displayName: 'BigQuery Batch Processing'
}
});
console.log('BigQuery batch job:', batchJob.name);
// Poll and wait
const completed = await pollBatchJob(batchJob.name!);
if (completed.state === JobState.JOB_STATE_SUCCEEDED) {
console.log('Results written to BigQuery table');
}// Prepare texts for embedding
const texts = [
'Text document 1',
'Text document 2',
'Text document 3',
// ... many more
];
// Write to JSONL
const jsonl = texts.map(text =>
JSON.stringify({ contents: [{ role: 'user', parts: [{ text }] }] })
).join('\n');
// Upload to GCS
// await uploadToGCS(jsonl, 'gs://my-bucket/texts.jsonl');
// Create embeddings batch
const embeddingJob = await client.batches.createEmbeddings({
model: 'text-embedding-004',
src: 'gs://my-bucket/texts.jsonl',
config: {
destination: 'gs://my-bucket/embeddings/',
displayName: 'Text Embeddings Batch'
}
});
console.log('Embeddings job:', embeddingJob.name);
// Wait for completion
const completed = await pollBatchJob(embeddingJob.name!);
console.log('Embeddings ready at:', completed.config?.destination);// List all running jobs
const jobs = await client.batches.list();
for await (const job of jobs) {
console.log(`\nJob: ${job.name}`);
console.log(` Display Name: ${job.displayName}`);
console.log(` State: ${job.state}`);
console.log(` Model: ${job.model}`);
console.log(` Created: ${job.createTime}`);
// Cancel long-running jobs
if (job.state === JobState.JOB_STATE_RUNNING) {
const created = new Date(job.createTime!);
const now = new Date();
const hoursRunning = (now.getTime() - created.getTime()) / (1000 * 60 * 60);
if (hoursRunning > 2) {
console.log(' Cancelling long-running job...');
await client.batches.cancel({ batchJob: job.name! });
}
}
// Delete completed jobs
if (
job.state === JobState.JOB_STATE_SUCCEEDED ||
job.state === JobState.JOB_STATE_FAILED ||
job.state === JobState.JOB_STATE_CANCELLED
) {
const ended = new Date(job.endTime!);
const now = new Date();
const daysOld = (now.getTime() - ended.getTime()) / (1000 * 60 * 60 * 24);
if (daysOld > 7) {
console.log(' Deleting old job...');
await client.batches.delete({ batchJob: job.name! });
}
}
}async function createBatchWithRetry(
params: CreateBatchJobParameters,
maxRetries: number = 3
): Promise<BatchJob> {
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
const job = await client.batches.create(params);
console.log(`Batch job created on attempt ${attempt}`);
return job;
} catch (error) {
console.error(`Attempt ${attempt} failed:`, error);
if (attempt === maxRetries) {
throw error;
}
const backoffMs = Math.pow(2, attempt) * 1000;
await new Promise(resolve => setTimeout(resolve, backoffMs));
}
}
throw new Error('Max retries exceeded');
}
// Use with retry logic
const job = await createBatchWithRetry({
model: 'gemini-2.0-flash',
src: 'gs://my-bucket/requests.jsonl',
config: {
destination: 'gs://my-bucket/results/'
}
});// Create multiple batch jobs in parallel
const batches = [
{
model: 'gemini-2.0-flash',
src: 'gs://my-bucket/batch1.jsonl',
config: { destination: 'gs://my-bucket/results1/' }
},
{
model: 'gemini-2.0-flash',
src: 'gs://my-bucket/batch2.jsonl',
config: { destination: 'gs://my-bucket/results2/' }
},
{
model: 'gemini-2.0-flash',
src: 'gs://my-bucket/batch3.jsonl',
config: { destination: 'gs://my-bucket/results3/' }
}
];
const jobs = await Promise.all(
batches.map(params => client.batches.create(params))
);
console.log(`Created ${jobs.length} batch jobs`);
// Wait for all to complete
const results = await Promise.all(
jobs.map(job => pollBatchJob(job.name!))
);
// Check results
results.forEach((result, index) => {
console.log(`Batch ${index + 1}:`, result.state);
});import { Storage } from '@google-cloud/storage';
const storage = new Storage();
// Wait for batch to complete
const completed = await pollBatchJob(batchJob.name!);
if (completed.state === JobState.JOB_STATE_SUCCEEDED) {
// Parse GCS destination
const destUri = completed.config?.destination as string;
const [, bucket, ...pathParts] = destUri.replace('gs://', '').split('/');
const prefix = pathParts.join('/');
// List result files
const [files] = await storage.bucket(bucket).getFiles({ prefix });
// Download and process each result file
for (const file of files) {
const [content] = await file.download();
const lines = content.toString().split('\n').filter(l => l.trim());
lines.forEach(line => {
const result = JSON.parse(line);
console.log('Result:', result);
});
}
}import { HarmCategory, HarmBlockThreshold } from '@google/genai';
const batchJob = await client.batches.create({
model: 'gemini-2.0-flash',
src: 'gs://my-bucket/requests.jsonl',
config: {
destination: 'gs://my-bucket/results/',
displayName: 'Configured Batch Job',
generationConfig: {
temperature: 0.9,
topP: 0.95,
topK: 40,
maxOutputTokens: 2048,
safetySettings: [
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE
}
],
systemInstruction: 'You are a helpful assistant.'
}
}
});Install with Tessl CLI
npx tessl i tessl/npm-google--genai