or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

docs

embedding-models.mderrors.mdimage-models.mdindex.mdjson-utilities.mdlanguage-models.mdmiddleware.mdprovider.mdspeech-models.mdtranscription-models.md
tile.json

image-models.mddocs/

Image Models

Interface for image generation models that create images from text prompts with support for customization options like size, aspect ratio, and seed values.

Capabilities

ImageModelV2 Type

Core type definition for image generation model implementations.

/**
 * Core image generation model type
 */
type ImageModelV2 = {
  /** API specification version */
  specificationVersion: 'v2';
  /** Provider identifier (e.g., 'openai', 'stability') */
  provider: string;
  /** Model identifier (e.g., 'dall-e-3', 'stable-diffusion') */
  modelId: string;
  /** Maximum number of images that can be generated in a single call */
  maxImagesPerCall: number | undefined | GetMaxImagesPerCallFunction;
  
  /** Generate images from text prompt */
  doGenerate(options: ImageModelV2CallOptions): PromiseLike<ImageModelV2Result>;
};

/**
 * Function to determine max images per call based on model
 */
type GetMaxImagesPerCallFunction = (options: { modelId: string }) => PromiseLike<number | undefined> | number | undefined;

Call Options

Configuration options for image generation calls.

/**
 * Configuration options for image generation
 */
interface ImageModelV2CallOptions {
  /** Text prompt describing the desired image */
  prompt: string;
  /** Number of images to generate */
  n: number;
  /** Image dimensions as "width x height" */
  size?: `${number}x${number}` | undefined;
  /** Aspect ratio as "width:height" */
  aspectRatio?: `${number}:${number}` | undefined;
  /** Random seed for reproducible generation */
  seed?: number | undefined;
  /** Provider-specific options */
  providerOptions: SharedV2ProviderOptions;
  /** Abort signal for cancellation */
  abortSignal?: AbortSignal;
  /** Custom HTTP headers */
  headers?: Record<string, string | undefined>;
}

Generation Results

Response structure containing generated images and metadata.

/**
 * Result from image generation
 */
interface ImageModelV2Result {
  /** Generated images as base64 strings or binary data */
  images: Array<string> | Array<Uint8Array>;
  /** Warnings from the generation */
  warnings: ImageModelV2CallWarning[];
  /** Provider-specific metadata */
  providerMetadata?: ImageModelV2ProviderMetadata;
  /** Response details (required) */
  response: {
    timestamp: Date;
    modelId: string;
    headers: Record<string, string> | undefined;
  };
}

/**
 * Provider metadata for image generation
 */
type ImageModelV2ProviderMetadata = Record<string, { images: JSONArray } & JSONValue>;

Warning Types

Warnings that can be returned from image generation calls.

/**
 * Warning types for image generation calls
 */
type ImageModelV2CallWarning =
  | { type: 'unsupported-setting'; setting: keyof ImageModelV2CallOptions; details?: string }
  | { type: 'other'; message: string };

Usage Examples:

import { ImageModelV2 } from "@ai-sdk/provider";

// Basic image generation
const model: ImageModelV2 = provider.imageModel('dall-e-3');

const result = await model.doGenerate({
  prompt: 'A futuristic cityscape at sunset with flying cars',
  n: 1,
  size: '1024x1024',
  providerOptions: {}
});

// Handle the generated image
const image = result.images[0];
if (image.url) {
  console.log('Image URL:', image.url);
} else if (image.base64) {
  // Convert base64 to blob or save to file
  const imageData = atob(image.base64);
  const uint8Array = new Uint8Array(imageData.length);
  for (let i = 0; i < imageData.length; i++) {
    uint8Array[i] = imageData.charCodeAt(i);
  }
  const blob = new Blob([uint8Array], { type: 'image/png' });
}

// Generate multiple images with different settings
const multipleImages = await model.doGenerate({
  prompt: 'Abstract art with vibrant colors',
  n: 3,
  aspectRatio: '16:9',
  seed: 12345,
  providerOptions: {
    stability: {
      style: 'photographic',
      cfg_scale: 7
    }
  }
});

console.log(`Generated ${multipleImages.images.length} images`);

// Check for warnings
if (result.warnings && result.warnings.length > 0) {
  result.warnings.forEach(warning => {
    if (warning.type === 'unsupported-setting') {
      console.warn(`Setting '${warning.setting}' is not supported:`, warning.details);
    } else {
      console.warn('Warning:', warning.message);
    }
  });
}

// Handle generation limits
const maxImages = typeof model.maxImagesPerCall === 'function' 
  ? model.maxImagesPerCall({ prompt: 'test', n: 10, providerOptions: {} })
  : model.maxImagesPerCall;

if (maxImages && requestedImages > maxImages) {
  console.log(`Can only generate ${maxImages} images per call, splitting request`);
  // Split into multiple calls
}

// Advanced usage with aspect ratio
const landscapeImage = await model.doGenerate({
  prompt: 'Mountain landscape with a clear lake',
  n: 1,
  aspectRatio: '21:9', // Ultra-wide format
  providerOptions: {
    openai: {
      quality: 'hd',
      style: 'natural'
    }
  }
});