The official TypeScript library for the OpenAI API
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Comprehensive guide to OpenAI's image generation and editing capabilities including text-to-image generation, image editing with masks, creating variations, and streaming support.
The Images resource provides three core methods for working with images:
Creates images from text descriptions using state-of-the-art models. Supports both standard and streaming modes.
/**
* Creates an image given a prompt
* @param params - Generation parameters including model, prompt, size, quality
* @returns Image response with generated images or stream of partial images
*/
generate(
params: ImageGenerateParams
): Promise<ImagesResponse> | Promise<Stream<ImageGenStreamEvent>>;Edits or extends images given a source image and prompt description. Supports mask-based editing to specify regions for modification.
/**
* Creates an edited or extended image given source images and a prompt
* @param params - Edit parameters including image, prompt, mask, model
* @returns Image response with edited images or stream of partial images
*/
edit(
params: ImageEditParams
): Promise<ImagesResponse> | Promise<Stream<ImageEditStreamEvent>>;Creates variations of an existing image. Only supported for dall-e-2.
/**
* Creates variations of a given image
* @param params - Variation parameters including image and model
* @returns Image response with variation images
*/
createVariation(
params: ImageCreateVariationParams
): Promise<ImagesResponse>;Represents a generated or edited image object in the response.
interface Image {
/** Base64-encoded JSON of the generated image (for b64_json response format) */
b64_json?: string;
/** URL of the generated image (for dall-e-2 and dall-e-3 with url response format) */
url?: string;
/** For dall-e-3 only: the revised prompt used to generate the image */
revised_prompt?: string;
}The response object containing generated or edited images and metadata.
interface ImagesResponse {
/** Unix timestamp (in seconds) when the image was created */
created: number;
/** Array of generated Image objects */
data?: Array<Image>;
/** Background setting: 'transparent' or 'opaque' (gpt-image-1 only) */
background?: 'transparent' | 'opaque';
/** Output format of images: 'png', 'webp', or 'jpeg' */
output_format?: 'png' | 'webp' | 'jpeg';
/** Quality of generated images: 'low', 'medium', or 'high' */
quality?: 'low' | 'medium' | 'high';
/** Size of generated images: '1024x1024', '1024x1536', or '1536x1024' */
size?: '1024x1024' | '1024x1536' | '1536x1024';
/** Token usage information (gpt-image-1 only) */
usage?: ImagesResponse.Usage;
}
namespace ImagesResponse {
interface Usage {
/** Number of tokens in the input prompt */
input_tokens: number;
/** Detailed input token breakdown */
input_tokens_details: {
/** Number of image tokens in input */
image_tokens: number;
/** Number of text tokens in input */
text_tokens: number;
};
/** Number of tokens in the output image */
output_tokens: number;
/** Total tokens used */
total_tokens: number;
}
}Supported image generation models.
type ImageModel = 'dall-e-2' | 'dall-e-3' | 'gpt-image-1' | 'gpt-image-1-mini';Streaming is supported for generate() and edit() methods for gpt-image-1. Events are emitted as partial images are generated and when generation completes.
Union of all streaming events for image generation.
type ImageGenStreamEvent = ImageGenPartialImageEvent | ImageGenCompletedEvent;Emitted when a partial image is available during generation streaming.
interface ImageGenPartialImageEvent {
/** Type of event: always 'image_generation.partial_image' */
type: 'image_generation.partial_image';
/** Base64-encoded partial image data */
b64_json: string;
/** 0-based index for the partial image in the sequence */
partial_image_index: number;
/** Size of the image: '1024x1024', '1024x1536', or '1536x1024' */
size: '1024x1024' | '1024x1536' | '1536x1024' | 'auto';
/** Quality setting: 'low', 'medium', 'high', or 'auto' */
quality: 'low' | 'medium' | 'high' | 'auto';
/** Output format: 'png', 'webp', or 'jpeg' */
output_format: 'png' | 'webp' | 'jpeg';
/** Background setting: 'transparent', 'opaque', or 'auto' */
background: 'transparent' | 'opaque' | 'auto';
/** Unix timestamp when the event was created */
created_at: number;
}Emitted when image generation completes and the final image is available.
interface ImageGenCompletedEvent {
/** Type of event: always 'image_generation.completed' */
type: 'image_generation.completed';
/** Base64-encoded final image data */
b64_json: string;
/** Size of the image */
size: '1024x1024' | '1024x1536' | '1536x1024' | 'auto';
/** Quality setting */
quality: 'low' | 'medium' | 'high' | 'auto';
/** Output format */
output_format: 'png' | 'webp' | 'jpeg';
/** Background setting */
background: 'transparent' | 'opaque' | 'auto';
/** Unix timestamp when the event was created */
created_at: number;
/** Token usage information (gpt-image-1 only) */
usage: {
input_tokens: number;
input_tokens_details: {
image_tokens: number;
text_tokens: number;
};
output_tokens: number;
total_tokens: number;
};
}Union of all streaming events for image editing.
type ImageEditStreamEvent = ImageEditPartialImageEvent | ImageEditCompletedEvent;Emitted when a partial image is available during editing streaming.
interface ImageEditPartialImageEvent {
/** Type of event: always 'image_edit.partial_image' */
type: 'image_edit.partial_image';
/** Base64-encoded partial edited image data */
b64_json: string;
/** 0-based index for the partial image in the sequence */
partial_image_index: number;
/** Size of the edited image */
size: '1024x1024' | '1024x1536' | '1536x1024' | 'auto';
/** Quality setting */
quality: 'low' | 'medium' | 'high' | 'auto';
/** Output format */
output_format: 'png' | 'webp' | 'jpeg';
/** Background setting */
background: 'transparent' | 'opaque' | 'auto';
/** Unix timestamp when the event was created */
created_at: number;
}Emitted when image editing completes and the final image is available.
interface ImageEditCompletedEvent {
/** Type of event: always 'image_edit.completed' */
type: 'image_edit.completed';
/** Base64-encoded final edited image data */
b64_json: string;
/** Size of the edited image */
size: '1024x1024' | '1024x1536' | '1536x1024' | 'auto';
/** Quality setting */
quality: 'low' | 'medium' | 'high' | 'auto';
/** Output format */
output_format: 'png' | 'webp' | 'jpeg';
/** Background setting */
background: 'transparent' | 'opaque' | 'auto';
/** Unix timestamp when the event was created */
created_at: number;
/** Token usage information (gpt-image-1 only) */
usage: {
input_tokens: number;
input_tokens_details: {
image_tokens: number;
text_tokens: number;
};
output_tokens: number;
total_tokens: number;
};
}Parameters for the generate() method.
interface ImageGenerateParams {
/** Text description of desired image(s). Max 32000 chars for gpt-image-1, 1000 for dall-e-2, 4000 for dall-e-3 */
prompt: string;
/** Model to use: 'dall-e-2', 'dall-e-3', or 'gpt-image-1' (default: 'dall-e-2') */
model?: (string & {}) | ImageModel | null;
/** Number of images to generate (1-10, default: 1). Note: dall-e-3 only supports n=1 */
n?: number | null;
/** Image size. For gpt-image-1: '1024x1024', '1536x1024', '1024x1536', or 'auto' */
size?: 'auto' | '1024x1024' | '1536x1024' | '1024x1536' | '256x256' | '512x512' | '1792x1024' | '1024x1792' | null;
/** Quality level: 'auto', 'standard' (dall-e-2/3), 'hd' (dall-e-3), 'high'/'medium'/'low' (gpt-image-1) */
quality?: 'standard' | 'hd' | 'low' | 'medium' | 'high' | 'auto' | null;
/** Response format: 'url' or 'b64_json' (default: 'url' for dall-e-2/3; b64 for gpt-image-1) */
response_format?: 'url' | 'b64_json' | null;
/** Style (dall-e-3 only): 'vivid' or 'natural' */
style?: 'vivid' | 'natural' | null;
/** Background setting (gpt-image-1 only): 'transparent', 'opaque', or 'auto' (default) */
background?: 'transparent' | 'opaque' | 'auto' | null;
/** Moderation level for gpt-image-1: 'low' or 'auto' (default) */
moderation?: 'low' | 'auto' | null;
/** Enable streaming mode (gpt-image-1 only, default: false) */
stream?: boolean | null;
/** Number of partial images to generate in streaming (0-3, default: 1) */
partial_images?: number | null;
/** Output compression level 0-100% (gpt-image-1 with webp/jpeg only, default: 100) */
output_compression?: number | null;
/** Output format (gpt-image-1 only): 'png', 'jpeg', or 'webp' */
output_format?: 'png' | 'jpeg' | 'webp' | null;
/** End-user identifier for monitoring abuse */
user?: string;
}Parameters for the edit() method.
interface ImageEditParams {
/** Source image(s) to edit. For gpt-image-1: png/webp/jpg up to 50MB each, max 16 images */
image: Uploadable | Array<Uploadable>;
/** Text description of desired edits. Max 1000 chars for dall-e-2, 32000 for gpt-image-1 */
prompt: string;
/** Optional mask image with transparent areas (PNG, same dimensions as image) */
mask?: Uploadable;
/** Model to use: 'dall-e-2', 'gpt-image-1' (default: 'dall-e-2') */
model?: (string & {}) | ImageModel | null;
/** Number of images to generate (1-10, default: 1) */
n?: number | null;
/** Image size. For gpt-image-1: '1024x1024', '1536x1024', '1024x1536', or 'auto' */
size?: '256x256' | '512x512' | '1024x1024' | '1536x1024' | '1024x1536' | 'auto' | null;
/** Quality level: 'standard', 'low', 'medium', 'high' (gpt-image-1), 'auto' */
quality?: 'standard' | 'low' | 'medium' | 'high' | 'auto' | null;
/** Response format: 'url' or 'b64_json' (dall-e-2 only) */
response_format?: 'url' | 'b64_json' | null;
/** Background setting (gpt-image-1 only): 'transparent', 'opaque', or 'auto' */
background?: 'transparent' | 'opaque' | 'auto' | null;
/** Input fidelity control (gpt-image-1 only): 'high' or 'low' (default). Unsupported for gpt-image-1-mini */
input_fidelity?: 'high' | 'low' | null;
/** Enable streaming mode (default: false) */
stream?: boolean | null;
/** Number of partial images to generate in streaming (0-3) */
partial_images?: number | null;
/** Output compression level 0-100% (gpt-image-1 with webp/jpeg, default: 100) */
output_compression?: number | null;
/** Output format (gpt-image-1 only): 'png', 'jpeg', or 'webp' */
output_format?: 'png' | 'jpeg' | 'webp' | null;
/** End-user identifier for monitoring abuse */
user?: string;
}Parameters for the createVariation() method.
interface ImageCreateVariationParams {
/** Source image to create variations from. Must be square PNG < 4MB (dall-e-2 only) */
image: Uploadable;
/** Model to use: 'dall-e-2' only (default: 'dall-e-2') */
model?: (string & {}) | ImageModel | null;
/** Number of variations to generate (1-10, default: 1) */
n?: number | null;
/** Image size: '256x256', '512x512', or '1024x1024' (default: '1024x1024') */
size?: '256x256' | '512x512' | '1024x1024' | null;
/** Response format: 'url' or 'b64_json' (default: 'url') */
response_format?: 'url' | 'b64_json' | null;
/** End-user identifier for monitoring abuse */
user?: string;
}Generate a single image with dall-e-3 using vivid style.
import OpenAI from 'openai';
import fs from 'fs';
const client = new OpenAI();
async function generateImageDallE3() {
const response = await client.images.generate({
model: 'dall-e-3',
prompt: 'A futuristic cityscape at sunset with flying vehicles, neon lights, and detailed architecture',
n: 1,
size: '1024x1024',
quality: 'hd',
style: 'vivid',
response_format: 'b64_json'
});
// Save the base64 image
if (response.data[0].b64_json) {
const imageBuffer = Buffer.from(response.data[0].b64_json, 'base64');
fs.writeFileSync('generated_image.png', imageBuffer);
console.log('Image saved as generated_image.png');
}
// Display revised prompt (dall-e-3 automatically refines prompts)
console.log('Revised prompt:', response.data[0].revised_prompt);
console.log('Created at:', new Date(response.created * 1000).toISOString());
}
generateImageDallE3().catch(console.error);Generate multiple variations with gpt-image-1.
import OpenAI from 'openai';
import fs from 'fs';
const client = new OpenAI();
async function generateMultipleImages() {
const response = await client.images.generate({
model: 'gpt-image-1',
prompt: 'A serene beach scene with crystal clear water, white sand, and tropical islands',
n: 4,
size: '1024x1024',
quality: 'high',
response_format: 'b64_json'
});
response.data.forEach((image, index) => {
if (image.b64_json) {
const buffer = Buffer.from(image.b64_json, 'base64');
fs.writeFileSync(`beach_${index + 1}.png`, buffer);
}
});
console.log(`Generated ${response.data.length} images`);
console.log('Token usage:', response.usage);
}
generateMultipleImages().catch(console.error);Generate images and receive URLs instead of base64.
import OpenAI from 'openai';
const client = new OpenAI();
async function generateWithUrls() {
const response = await client.images.generate({
model: 'dall-e-2',
prompt: 'A majestic mountain landscape with snow peaks, alpine meadows, and clear blue sky',
n: 2,
size: '1024x1024',
quality: 'standard',
response_format: 'url' // URLs expire after 60 minutes
});
response.data.forEach((image, index) => {
console.log(`Image ${index + 1} URL: ${image.url}`);
});
// Note: Store URLs immediately as they expire in 60 minutes
return response.data.map(img => img.url);
}
generateWithUrls().catch(console.error);Stream partial images as they're generated for progressive rendering.
import OpenAI from 'openai';
import fs from 'fs';
const client = new OpenAI();
async function streamImageGeneration() {
console.log('Starting image generation stream...');
const stream = await client.images.generate({
model: 'gpt-image-1',
prompt: 'An intricate steampunk airship flying over clouds with brass gears and mechanical wings',
n: 1,
size: '1024x1024',
quality: 'high',
stream: true,
partial_images: 2 // Request 2 partial images before final
});
let partialCount = 0;
let finalImage: string | undefined;
for await (const event of stream) {
if (event.type === 'image_generation.partial_image') {
partialCount++;
console.log(`Received partial image ${event.partial_image_index + 1}/${partialCount}`);
console.log(` Size: ${event.size}`);
console.log(` Quality: ${event.quality}`);
console.log(` Format: ${event.output_format}`);
// In a real app, you might render this progressively
} else if (event.type === 'image_generation.completed') {
console.log('Image generation completed!');
console.log(` Final size: ${event.size}`);
console.log(` Token usage: ${event.usage.total_tokens} total`);
finalImage = event.b64_json;
// Save final image
if (finalImage) {
const buffer = Buffer.from(finalImage, 'base64');
fs.writeFileSync('streamed_image.png', buffer);
console.log('Final image saved as streamed_image.png');
}
}
}
}
streamImageGeneration().catch(console.error);Edit an image by providing a prompt and source image.
import OpenAI from 'openai';
import fs from 'fs';
const client = new OpenAI();
async function editImage() {
// Read source image
const imageData = fs.readFileSync('original_image.png');
const response = await client.images.edit({
image: imageData,
prompt: 'Replace the background with a vibrant sunset over mountains',
model: 'gpt-image-1',
n: 1,
size: '1024x1024',
quality: 'high',
response_format: 'b64_json'
});
if (response.data[0].b64_json) {
const buffer = Buffer.from(response.data[0].b64_json, 'base64');
fs.writeFileSync('edited_image.png', buffer);
console.log('Edited image saved');
}
}
editImage().catch(console.error);Use a mask to specify exactly which regions should be edited.
import OpenAI from 'openai';
import fs from 'fs';
const client = new OpenAI();
async function editImageWithMask() {
// Read source image and mask
const imageData = fs.readFileSync('photo.png');
const maskData = fs.readFileSync('mask.png');
const response = await client.images.edit({
image: imageData,
mask: maskData, // Transparent areas indicate where to edit
prompt: 'Add a realistic bird flying in the edited region with wings spread',
model: 'gpt-image-1',
n: 1,
size: '1024x1024',
quality: 'high',
input_fidelity: 'high', // Preserve more of the original image
response_format: 'b64_json'
});
if (response.data[0].b64_json) {
const buffer = Buffer.from(response.data[0].b64_json, 'base64');
fs.writeFileSync('masked_edited_image.png', buffer);
console.log('Masked edit completed');
}
}
editImageWithMask().catch(console.error);Edit multiple images at once (gpt-image-1 supports up to 16 images).
import OpenAI from 'openai';
import fs from 'fs';
const client = new OpenAI();
async function editMultipleImages() {
// Read multiple source images
const images = [
fs.readFileSync('photo1.png'),
fs.readFileSync('photo2.png'),
fs.readFileSync('photo3.png')
];
const response = await client.images.edit({
image: images,
prompt: 'Enhance the colors and add vibrant lighting, increase contrast and saturation',
model: 'gpt-image-1',
n: 3, // One edit per image
size: '1024x1024',
quality: 'medium',
output_format: 'png',
response_format: 'b64_json'
});
response.data.forEach((image, index) => {
if (image.b64_json) {
const buffer = Buffer.from(image.b64_json, 'base64');
fs.writeFileSync(`enhanced_${index + 1}.png`, buffer);
}
});
console.log(`Edited ${response.data.length} images`);
}
editMultipleImages().catch(console.error);Stream edited images with partial previews.
import OpenAI from 'openai';
import fs from 'fs';
const client = new OpenAI();
async function streamImageEdit() {
const imageData = fs.readFileSync('original.png');
console.log('Starting image edit stream...');
const stream = await client.images.edit({
image: imageData,
prompt: 'Transform the scene to show the same location during a magical winter night with snow and glowing aurora borealis',
model: 'gpt-image-1',
n: 1,
size: '1024x1024',
quality: 'high',
stream: true,
partial_images: 3,
response_format: 'b64_json'
});
let eventCount = 0;
for await (const event of stream) {
if (event.type === 'image_edit.partial_image') {
eventCount++;
console.log(`Partial edit ${event.partial_image_index + 1} received`);
console.log(` Background: ${event.background}`);
// Could render progressive preview here
} else if (event.type === 'image_edit.completed') {
console.log('Edit completed!');
console.log(` Quality applied: ${event.quality}`);
console.log(` Format: ${event.output_format}`);
if (event.b64_json) {
const buffer = Buffer.from(event.b64_json, 'base64');
fs.writeFileSync('winter_edit.png', buffer);
console.log('Final edited image saved');
}
}
}
console.log(`Total events received: ${eventCount}`);
}
streamImageEdit().catch(console.error);Create variations of an existing image (dall-e-2 only).
import OpenAI from 'openai';
import fs from 'fs';
const client = new OpenAI();
async function createVariations() {
// Read the source image (must be square PNG < 4MB)
const imageData = fs.readFileSync('reference_image.png');
const response = await client.images.createVariation({
image: imageData,
model: 'dall-e-2',
n: 4,
size: '1024x1024',
response_format: 'b64_json'
});
response.data.forEach((image, index) => {
if (image.b64_json) {
const buffer = Buffer.from(image.b64_json, 'base64');
fs.writeFileSync(`variation_${index + 1}.png`, buffer);
}
});
console.log(`Created ${response.data.length} variations`);
console.log('Created at:', new Date(response.created * 1000).toISOString());
}
createVariations().catch(console.error);Create variations of different sizes.
import OpenAI from 'openai';
import fs from 'fs';
const client = new OpenAI();
async function createVariationsMultipleSizes() {
const imageData = fs.readFileSync('portrait.png');
// Create thumbnails
const smallVariations = await client.images.createVariation({
image: imageData,
size: '256x256',
n: 2,
response_format: 'url'
});
// Create medium variations
const mediumVariations = await client.images.createVariation({
image: imageData,
size: '512x512',
n: 2,
response_format: 'url'
});
// Create large variations
const largeVariations = await client.images.createVariation({
image: imageData,
size: '1024x1024',
n: 2,
response_format: 'url'
});
console.log('Small (256x256):', smallVariations.data.map(img => img.url));
console.log('Medium (512x512):', mediumVariations.data.map(img => img.url));
console.log('Large (1024x1024):', largeVariations.data.map(img => img.url));
// Remember: URLs expire after 60 minutes - download immediately if needed
}
createVariationsMultipleSizes().catch(console.error);Image dimensions determine level of detail and computational cost.
| Size | Models | Use Case |
|---|---|---|
256x256 | dall-e-2 | Thumbnails, quick previews |
512x512 | dall-e-2 | Small images, mobile-friendly |
1024x1024 | dall-e-2, dall-e-3, gpt-image-1 | Standard size, balanced quality |
1536x1024 | gpt-image-1 | Landscape, wider content |
1024x1536 | gpt-image-1 | Portrait, taller content |
1792x1024 | dall-e-3 | Wide landscape (dall-e-3 only) |
1024x1792 | dall-e-3 | Tall portrait (dall-e-3 only) |
auto | gpt-image-1 | Let model determine optimal size |
Controls detail level and processing time.
| Quality | Models | Notes |
|---|---|---|
auto | gpt-image-1 | Automatically selects best quality |
low | gpt-image-1 | Faster generation, lower detail |
medium | gpt-image-1 | Balanced generation and quality |
high | gpt-image-1 | Higher detail, longer processing |
standard | dall-e-2, dall-e-3 | Default for DALL-E models |
hd | dall-e-3 | Enhanced detail (DALL-E 3 only) |
Artistic direction (dall-e-3 only).
| Style | Description |
|---|---|
vivid | Hyper-real, dramatic, saturated colors |
natural | Natural, less stylized appearance |
How images are returned.
| Format | Return Type | Expiration | Notes |
|---|---|---|---|
url | HTTP URL | 60 minutes | Default for DALL-E, easier to distribute |
b64_json | Base64 string | No expiration | Better for long-term storage, slightly larger payload |
Background handling (gpt-image-1 only).
| Background | Description |
|---|---|
auto | Model determines best background (default) |
transparent | Transparent background (requires png/webp format) |
opaque | Solid opaque background |
| Model | Best For | Strengths | Limitations |
|---|---|---|---|
dall-e-2 | Classic image generation | Established, reliable | Limited size options |
dall-e-3 | High-quality artistic images | Detailed prompts, vivid style | Only n=1 supported |
gpt-image-1 | Multi-image, streaming, editing | Streaming, masks, multiple images | Newer, still being optimized |
gpt-image-1-mini | Fast, lightweight generation | Low latency | Reduced quality, fewer features |
Example:
// Good prompt
prompt: 'A cozy cabin in snowy forest at twilight, warm golden light from windows, detailed architectural style, photorealistic, film noir lighting'
// Poor prompt
prompt: 'A house in snow'import OpenAI from 'openai';
const client = new OpenAI();
async function safeImageGeneration() {
try {
const response = await client.images.generate({
model: 'gpt-image-1',
prompt: 'Your creative prompt here',
n: 1,
size: '1024x1024'
});
return response;
} catch (error) {
if (error instanceof OpenAI.APIError) {
console.error('API Error:', error.message);
console.error('Status:', error.status);
if (error.status === 400) {
console.error('Invalid parameters provided');
} else if (error.status === 429) {
console.error('Rate limited - too many requests');
} else if (error.status === 500) {
console.error('Server error - please retry');
}
}
throw error;
}
}import OpenAI from 'openai';
const client = new OpenAI();
async function manageImageUrls() {
const response = await client.images.generate({
model: 'dall-e-2',
prompt: 'A beautiful landscape',
response_format: 'url'
});
// URLs expire after 60 minutes - download immediately
for (const image of response.data) {
if (image.url) {
// Download and save immediately or store permanently
const response = await fetch(image.url);
const buffer = await response.arrayBuffer();
// Save buffer to disk or convert to base64
}
}
}import OpenAI from 'openai';
import fs from 'fs';
const client = new OpenAI();
async function efficientStreaming() {
const stream = await client.images.generate({
model: 'gpt-image-1',
prompt: 'Your image prompt',
stream: true,
partial_images: 2 // Balance preview updates and final quality
});
const progressUpdates: string[] = [];
for await (const event of stream) {
if (event.type === 'image_generation.partial_image') {
// Update UI with partial image
progressUpdates.push(`Partial ${event.partial_image_index + 1}`);
} else if (event.type === 'image_generation.completed') {
// Process final image
if (event.b64_json) {
const buffer = Buffer.from(event.b64_json, 'base64');
fs.writeFileSync('final_image.png', buffer);
}
console.log('Generation complete with updates:', progressUpdates);
}
}
}
efficientStreaming().catch(console.error);import OpenAI from 'openai';
const client = new OpenAI();
// Use gpt-image-1-mini for fast, low-cost generation
async function costOptimized() {
return client.images.generate({
model: 'gpt-image-1-mini', // Smaller model, lower cost
prompt: 'Your prompt',
n: 1,
size: '1024x1024',
quality: 'low' // Lower quality = lower cost
});
}
// Use batch processing for multiple images
async function batchGeneration() {
const prompts = [
'A peaceful garden',
'A mountain landscape',
'An urban skyline'
];
const results = await Promise.all(
prompts.map(prompt =>
client.images.generate({
model: 'gpt-image-1-mini',
prompt,
n: 1,
size: '512x512' // Smaller size = lower cost
})
)
);
return results;
}Install with Tessl CLI
npx tessl i tessl/npm-openai