The official TypeScript library for the OpenAI API
npx @tessl/cli install tessl/npm-openai@6.9.1The official TypeScript/JavaScript client library for interacting with the OpenAI API. This library provides comprehensive access to OpenAI's AI models including GPT-4, GPT-3.5, DALL-E, Whisper, and more, with full TypeScript support, streaming capabilities, and both Node.js and edge runtime compatibility.
npm install openaiimport OpenAI from "openai";For CommonJS:
const OpenAI = require("openai");Named imports for specific functionality:
import OpenAI, { AzureOpenAI, toFile } from "openai";import OpenAI from "openai";
// Initialize client
const client = new OpenAI({
apiKey: process.env.OPENAI_API_KEY, // This is the default and can be omitted
});
// Create a chat completion
const completion = await client.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: "Say hello!" }],
});
console.log(completion.choices[0].message.content);The OpenAI library is organized around several key components:
OpenAI (main client) and AzureOpenAI (Azure-specific client)The legacy text completions endpoint for non-chat use cases. This API is deprecated in favor of Chat Completions for new projects.
function completions.create(
params: CompletionCreateParams
): Promise<Completion> | Stream<Completion>;
interface CompletionCreateParams {
model: string;
prompt: string | string[];
max_tokens?: number;
temperature?: number;
stream?: boolean;
// ... additional parameters
}Note: This API is deprecated. Use Chat Completions for all new projects. The Chat Completions API provides better performance, supports conversations, and includes newer features like function calling.
The standard interface for conversational AI using GPT models. Supports function calling, vision, streaming, and stored completions.
// Create completion
function create(
params: ChatCompletionCreateParams
): Promise<ChatCompletion> | Stream<ChatCompletionChunk>;
// Type-safe parsing with JSON schemas or Zod
function parse<Params extends ChatCompletionParseParams, ParsedT>(
params: Params
): Promise<ParsedChatCompletion<ParsedT>>;
// Streaming helper
function stream<Params extends ChatCompletionCreateParamsStreaming, ParsedT>(
body: Params,
options?: RequestOptions
): ChatCompletionStream<ParsedT>;
// Tool calling automation
function runTools<Params extends ChatCompletionToolRunnerParams<any>, ParsedT>(
body: Params,
options?: RunnerOptions
): ChatCompletionRunner<ParsedT> | ChatCompletionStreamingRunner<ParsedT>;
// Stored completions management
function retrieve(completionID: string): Promise<ChatCompletion>;
function update(
completionID: string,
params: ChatCompletionUpdateParams
): Promise<ChatCompletion>;
function list(params?: ChatCompletionListParams): Promise<ChatCompletionsPage>;
function delete(completionID: string): Promise<ChatCompletionDeleted>;
// Stored completion messages
function messages.list(
completionID: string,
params?: MessageListParams
): Promise<ChatCompletionStoreMessagesPage>;
interface MessageListParams {
order?: 'asc' | 'desc'; // Sort by timestamp
limit?: number;
after?: string;
before?: string;
}
interface ChatCompletionCreateParams {
model: string;
messages: ChatCompletionMessageParam[];
stream?: boolean;
temperature?: number;
max_tokens?: number;
tools?: ChatCompletionTool[];
store?: boolean; // Set to true to enable retrieval later
// ... additional parameters
}OpenAI's primary interface for multi-turn conversations with advanced features including persistent sessions, tool use, and computer/web capabilities.
// Response management
function create(
params: ResponseCreateParams
): Promise<Response> | Stream<ResponseStreamEvent>;
function retrieve(responseID: string): Promise<Response>;
function cancel(responseID: string): Promise<Response>;
function delete(responseID: string): Promise<void>;
// Type-safe parsing helper
function parse<Params extends ResponseParseParams, ParsedT>(
body: Params
): Promise<ParsedResponse<ParsedT>>;
// Streaming helper
function stream<Params extends ResponseCreateParamsStreaming, ParsedT>(
body: Params,
options?: RequestOptions
): ResponseStream<ParsedT>;
// Input items - list input items for a response
function inputItems.list(responseID: string, params?: InputItemListParams): Promise<ResponseItemsPage>;
// Input tokens - count input tokens before creating a response
function inputTokens.count(params?: InputTokenCountParams): Promise<InputTokenCountResponse>;
interface ResponseCreateParams {
model: string;
input: ResponseInput;
instructions?: string;
tools?: Tool[];
// ... additional parameters
}Generate vector embeddings from text for semantic search, clustering, and recommendations.
function create(
params: EmbeddingCreateParams
): Promise<CreateEmbeddingResponse>;
interface EmbeddingCreateParams {
model: string;
input: string | string[];
encoding_format?: "float" | "base64";
}File management for fine-tuning, assistants, and other API features. Supports both simple and multipart uploads.
// Files API
function create(params: FileCreateParams): Promise<FileObject>;
function retrieve(fileID: string): Promise<FileObject>;
function list(params?: FileListParams): Promise<FileObjectsPage>;
function delete(fileID: string): Promise<FileDeleted>;
function content(fileID: string): Promise<Response>;
// Wait for file processing to complete
function waitForProcessing(
id: string,
options?: { pollInterval?: number; maxWait?: number }
): Promise<FileObject>;
// Uploads API for large files (multipart upload)
function uploads.create(params: UploadCreateParams): Promise<Upload>;
function uploads.cancel(uploadID: string): Promise<Upload>;
function uploads.complete(uploadID: string, params: UploadCompleteParams): Promise<Upload>;
function uploads.parts.create(uploadID: string, params: PartCreateParams): Promise<UploadPart>;Generate, edit, and create variations of images using DALL-E models.
function generate(params: ImageGenerateParams): Promise<ImagesResponse>;
function edit(params: ImageEditParams): Promise<ImagesResponse>;
function createVariation(
params: ImageCreateVariationParams
): Promise<ImagesResponse>;Speech-to-text transcription, translation, and text-to-speech generation.
// Text-to-speech
function speech.create(params: SpeechCreateParams): Promise<Response>;
// Speech-to-text transcription
function transcriptions.create(
params: TranscriptionCreateParams
): Promise<Transcription>;
// Translation to English
function translations.create(
params: TranslationCreateParams
): Promise<Translation>;Build AI assistants with persistent threads, code interpreter, file search, and function calling capabilities. Complete CRUD operations for assistants, threads, messages, and runs with streaming support.
// Assistant management
function beta.assistants.create(params: AssistantCreateParams): Promise<Assistant>;
function beta.assistants.retrieve(assistantID: string): Promise<Assistant>;
function beta.assistants.update(assistantID: string, params: AssistantUpdateParams): Promise<Assistant>;
function beta.assistants.list(query?: AssistantListParams): PagePromise<AssistantsPage, Assistant>;
function beta.assistants.delete(assistantID: string): Promise<AssistantDeleted>;
// Thread management
function beta.threads.create(params?: ThreadCreateParams): Promise<Thread>;
function beta.threads.retrieve(threadID: string): Promise<Thread>;
function beta.threads.update(threadID: string, params: ThreadUpdateParams): Promise<Thread>;
function beta.threads.delete(threadID: string): Promise<ThreadDeleted>;
function beta.threads.createAndRun(params: ThreadCreateAndRunParams): Promise<Run | Stream<AssistantStreamEvent>>;
// Message management
function beta.threads.messages.create(threadID: string, params: MessageCreateParams): Promise<Message>;
function beta.threads.messages.retrieve(threadID: string, messageID: string): Promise<Message>;
function beta.threads.messages.update(threadID: string, messageID: string, params: MessageUpdateParams): Promise<Message>;
function beta.threads.messages.list(threadID: string, params?: MessageListParams): PagePromise<MessagesPage, Message>;
function beta.threads.messages.delete(threadID: string, messageID: string): Promise<MessageDeleted>;
// Run management
function beta.threads.runs.create(threadID: string, params: RunCreateParams): Promise<Run | Stream<AssistantStreamEvent>>;
function beta.threads.runs.retrieve(threadID: string, runID: string): Promise<Run>;
function beta.threads.runs.update(threadID: string, runID: string, params: RunUpdateParams): Promise<Run>;
function beta.threads.runs.list(threadID: string, params?: RunListParams): PagePromise<RunsPage, Run>;
function beta.threads.runs.cancel(threadID: string, runID: string): Promise<Run>;
function beta.threads.runs.submitToolOutputs(threadID: string, runID: string, params: RunSubmitToolOutputsParams): Promise<Run | Stream<AssistantStreamEvent>>;
// Run steps
function beta.threads.runs.steps.retrieve(threadID: string, runID: string, stepID: string): Promise<RunStep>;
function beta.threads.runs.steps.list(threadID: string, runID: string, params?: StepListParams): PagePromise<RunStepsPage, RunStep>;Build conversational interfaces with OpenAI's ChatKit framework. ChatKit provides session management and thread-based conversations with workflow support.
// Create ChatKit session
function beta.chatkit.sessions.create(
params: SessionCreateParams
): Promise<ChatSession>;
// Cancel session
function beta.chatkit.sessions.cancel(sessionID: string): Promise<ChatSession>;
// Manage threads
function beta.chatkit.threads.retrieve(threadID: string): Promise<ChatKitThread>;
function beta.chatkit.threads.list(
params?: ThreadListParams
): Promise<ChatKitThreadsPage>;
function beta.chatkit.threads.delete(
threadID: string
): Promise<ThreadDeleteResponse>;
function beta.chatkit.threads.listItems(
threadID: string,
params?: ThreadListItemsParams
): Promise<ChatKitThreadItemListDataPage>;
interface SessionCreateParams {
user: string;
workflow: ChatSessionWorkflowParam;
configuration?: ChatSessionChatKitConfigurationParam;
expires_after?: ChatSessionExpiresAfterParam;
rate_limits?: ChatSessionRateLimitsParam;
}Note: ChatKit is a beta feature. The API may change. Threads are created implicitly through the session workflow; there is no direct threads.create() method. See Assistants API for related thread and message management.
WebSocket-based real-time voice conversations with low latency and streaming audio. Includes SIP call management, client secret creation, and beta session management.
// Client secrets for WebSocket auth
function realtime.clientSecrets.create(
params: ClientSecretCreateParams
): Promise<ClientSecretCreateResponse>;
// SIP call management
function realtime.calls.accept(callID: string, params: CallAcceptParams): Promise<void>;
function realtime.calls.hangup(callID: string): Promise<void>;
function realtime.calls.refer(callID: string, params: CallReferParams): Promise<void>;
function realtime.calls.reject(callID: string, params: CallRejectParams): Promise<void>;
// Beta: Ephemeral session management
function beta.realtime.sessions.create(params: SessionCreateParams): Promise<SessionCreateResponse>;
function beta.realtime.transcriptionSessions.create(params: TranscriptionSessionCreateParams): Promise<TranscriptionSession>;
// WebSocket clients
import { OpenAIRealtimeWebSocket } from "openai/realtime/websocket"; // Browser
import { OpenAIRealtimeWS } from "openai/realtime/ws"; // Node.js with 'ws'Create and manage fine-tuning jobs to customize models on your own data. Supports supervised learning, DPO, and reinforcement learning with checkpoint and permission management.
// Job management
function fineTuning.jobs.create(params: FineTuningJobCreateParams): Promise<FineTuningJob>;
function fineTuning.jobs.retrieve(jobID: string): Promise<FineTuningJob>;
function fineTuning.jobs.list(params?: FineTuningJobListParams): Promise<FineTuningJobsPage>;
function fineTuning.jobs.cancel(jobID: string): Promise<FineTuningJob>;
function fineTuning.jobs.pause(jobID: string): Promise<FineTuningJob>;
function fineTuning.jobs.resume(jobID: string): Promise<FineTuningJob>;
function fineTuning.jobs.listEvents(jobID: string, params?: JobEventListParams): Promise<FineTuningJobEventsPage>;
// Checkpoint access
function fineTuning.jobs.checkpoints.list(jobID: string, params?: CheckpointListParams): Promise<FineTuningJobCheckpointsPage>;
// Checkpoint permissions (nested under fineTuning.checkpoints)
function fineTuning.checkpoints.permissions.create(checkpointID: string, body: PermissionCreateParams): Promise<PermissionCreateResponsesPage>;
function fineTuning.checkpoints.permissions.retrieve(checkpointID: string, params?: PermissionRetrieveParams): Promise<PermissionRetrieveResponse>;
function fineTuning.checkpoints.permissions.delete(permissionID: string, params: PermissionDeleteParams): Promise<PermissionDeleteResponse>;
// Alpha features - grader validation
function fineTuning.alpha.graders.run(body: GraderRunParams): Promise<GraderRunResponse>;
function fineTuning.alpha.graders.validate(body: GraderValidateParams): Promise<GraderValidateResponse>;
// Methods resource (type definitions for fine-tuning methods)
// Note: fineTuning.methods provides TypeScript type definitions for supervised, DPO, and reinforcement learning configurations
interface Methods {
supervised: SupervisedMethod;
dpo: DpoMethod;
reinforcement: ReinforcementMethod;
}Store and search embeddings for retrieval-augmented generation (RAG) with the Assistants API. Includes file and file batch management for vector stores.
// Vector store management
function vectorStores.create(params: VectorStoreCreateParams): Promise<VectorStore>;
function vectorStores.retrieve(vectorStoreID: string): Promise<VectorStore>;
function vectorStores.update(vectorStoreID: string, params: VectorStoreUpdateParams): Promise<VectorStore>;
function vectorStores.list(params?: VectorStoreListParams): Promise<VectorStoresPage>;
function vectorStores.delete(vectorStoreID: string): Promise<VectorStoreDeleted>;
function vectorStores.search(storeID: string, params: VectorStoreSearchParams): Promise<VectorStoreSearchResponsesPage>;
// File management within vector stores
function vectorStores.files.create(vectorStoreID: string, body: FileCreateParams): Promise<VectorStoreFile>;
function vectorStores.files.retrieve(fileID: string, params: FileRetrieveParams): Promise<VectorStoreFile>;
function vectorStores.files.update(fileID: string, params: FileUpdateParams): Promise<VectorStoreFile>;
function vectorStores.files.list(vectorStoreID: string, params?: FileListParams): Promise<VectorStoreFilesPage>;
function vectorStores.files.delete(fileID: string, params: FileDeleteParams): Promise<VectorStoreFileDeleted>;
function vectorStores.files.content(fileID: string, params: FileContentParams): Promise<FileContentResponsesPage>;
// Batch file operations
function vectorStores.fileBatches.create(vectorStoreID: string, body: FileBatchCreateParams): Promise<VectorStoreFileBatch>;
function vectorStores.fileBatches.retrieve(batchID: string, params: FileBatchRetrieveParams): Promise<VectorStoreFileBatch>;
function vectorStores.fileBatches.cancel(batchID: string, params: FileBatchCancelParams): Promise<VectorStoreFileBatch>;
function vectorStores.fileBatches.listFiles(batchID: string, params: FileBatchListFilesParams): Promise<VectorStoreFilesPage>;Batch processing for cost-effective async operations and evaluation framework for testing model performance.
// Batches
function batches.create(params: BatchCreateParams): Promise<Batch>;
function batches.retrieve(batchID: string): Promise<Batch>;
function batches.list(params?: BatchListParams): Promise<BatchesPage>;
function batches.cancel(batchID: string): Promise<Batch>;
// Evaluations
function evals.create(params: EvalCreateParams): Promise<EvalCreateResponse>;
function evals.retrieve(evalID: string): Promise<EvalRetrieveResponse>;
function evals.update(
evalID: string,
params: EvalUpdateParams
): Promise<EvalUpdateResponse>;
function evals.list(params?: EvalListParams): Promise<EvalListResponsesPage>;
function evals.delete(evalID: string): Promise<EvalDeleteResponse>;
// Evaluation Runs
function evals.runs.create(
evalID: string,
params: RunCreateParams
): Promise<RunCreateResponse>;
function evals.runs.retrieve(
runID: string,
params: RunRetrieveParams
): Promise<RunRetrieveResponse>;
function evals.runs.cancel(
runID: string,
params: RunCancelParams
): Promise<RunCancelResponse>;
function evals.runs.list(
evalID: string,
params?: RunListParams
): Promise<RunListResponsesPage>;
function evals.runs.delete(
runID: string,
params: RunDeleteParams
): Promise<RunDeleteResponse>;
// Output items from evaluation runs
function evals.runs.outputItems.retrieve(
itemID: string,
params: OutputItemRetrieveParams
): Promise<OutputItemRetrieveResponse>;
function evals.runs.outputItems.list(
runID: string,
params?: OutputItemListParams
): Promise<OutputItemListResponsesPage>;Type definitions for evaluation grading. The graders resource provides structured type definitions used with the Evaluations API for automated assessment of model outputs.
Note: The client.graders.graderModels resource exists but contains no methods - it only provides TypeScript type definitions for use with the Evals API.
// Grader type definitions used in Evaluations API:
interface LabelModelGrader {
input: Array<LabelModelGraderInput>;
labels: string[];
model: string;
name: string;
passing_labels: string[];
type: 'label_model';
}
interface ScoreModelGrader {
input: Array<ScoreModelGraderInput>;
model: string;
name: string;
passing_threshold: number;
score_range: [number, number];
type: 'score_model';
}
interface PythonGrader {
code: string;
name: string;
type: 'python';
}
interface StringCheckGrader {
expected_strings: string[];
name: string;
type: 'string_check';
}
interface TextSimilarityGrader {
model: string;
name: string;
similarity_threshold: number;
type: 'text_similarity';
}
interface MultiGrader {
graders: Array<LabelModelGrader | ScoreModelGrader | PythonGrader | StringCheckGrader | TextSimilarityGrader>;
name: string;
type: 'multi';
}These type definitions are used with the Evaluations API for automated grading of evaluation runs. See Batches and Evaluations for usage examples.
Classify content for safety violations using OpenAI's moderation models.
function moderations.create(
params: ModerationCreateParams
): Promise<ModerationCreateResponse>;
interface ModerationCreateParams {
model?: string;
input: string | string[] | ModerationMultiModalInput[];
}Content categories checked: hate, hate/threatening, harassment, harassment/threatening, self-harm, self-harm/intent, self-harm/instructions, sexual, sexual/minors, violence, violence/graphic, illicit, illicit/violent.
List and retrieve available models, and delete fine-tuned models.
function models.retrieve(model: string): Promise<Model>;
function models.list(): Promise<ModelsPage>;
function models.delete(model: string): Promise<ModelDeleted>;Initialize and configure the OpenAI client with various options.
class OpenAI {
constructor(options?: ClientOptions);
}
type ApiKeySetter = () => Promise<string>;
interface ClientOptions {
apiKey?: string | ApiKeySetter;
organization?: string;
project?: string;
webhookSecret?: string;
baseURL?: string;
timeout?: number;
maxRetries?: number;
defaultHeaders?: Record<string, string>;
defaultQuery?: Record<string, string>;
dangerouslyAllowBrowser?: boolean;
}The library provides granular error classes for different failure scenarios:
class OpenAIError extends Error {}
class APIError extends OpenAIError {}
class APIConnectionError extends OpenAIError {}
class APIConnectionTimeoutError extends APIConnectionError {}
class APIUserAbortError extends APIConnectionError {}
class RateLimitError extends APIError {} // HTTP 429
class BadRequestError extends APIError {} // HTTP 400
class AuthenticationError extends APIError {} // HTTP 401
class PermissionDeniedError extends APIError {} // HTTP 403
class NotFoundError extends APIError {} // HTTP 404
class ConflictError extends APIError {} // HTTP 409
class UnprocessableEntityError extends APIError {} // HTTP 422
class InternalServerError extends APIError {} // HTTP 5xx
class InvalidWebhookSignatureError extends OpenAIError {} // Webhook signature verification failedMany endpoints support streaming responses via Server-Sent Events:
// Chat completion streaming
const stream = await client.chat.completions.create({
model: "gpt-4",
messages: [{ role: "user", content: "Count to 10" }],
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content || "";
process.stdout.write(content);
}List methods return paginated results with async iteration:
// Iterate all items
for await (const file of client.files.list()) {
console.log(file);
}
// Iterate pages
for await (const page of client.files.list().iterPages()) {
console.log(page.data);
}
// Manual pagination
const page = await client.files.list();
if (page.hasNextPage()) {
const nextPage = await page.getNextPage();
}Use the AzureOpenAI client for Azure-specific deployments:
import { AzureOpenAI } from "openai";
const client = new AzureOpenAI({
apiKey: process.env.AZURE_OPENAI_API_KEY,
endpoint: process.env.AZURE_OPENAI_ENDPOINT,
apiVersion: "2024-02-01",
deployment: "gpt-4", // Your deployment name
});interface APIPromise<T> extends Promise<T> {
withResponse(): Promise<{ data: T; response: Response }>;
asResponse(): Promise<Response>;
}
class PagePromise<PageClass, Item> extends APIPromise<PageClass> implements AsyncIterable<Item> {
// Enables async iteration over paginated items
[Symbol.asyncIterator](): AsyncIterator<Item>;
}
interface Stream<T> extends AsyncIterable<T> {
abort(): void;
done(): boolean;
tee(): [Stream<T>, Stream<T>];
}interface RequestOptions {
headers?: Record<string, string>;
maxRetries?: number;
timeout?: number;
query?: Record<string, unknown>;
signal?: AbortSignal;
}
type Uploadable = File | Response | FsReadStream | BunFile;
interface Metadata {
[key: string]: string;
}The SDK exports several utility types from the shared module for use across different API resources:
// Model type definitions
type AllModels = ChatModel | string;
type ChatModel =
| 'gpt-5.1'
| 'gpt-5'
| 'gpt-4.1'
| 'gpt-4o'
| 'gpt-4o-mini'
| 'gpt-4-turbo'
| 'gpt-4'
| 'gpt-3.5-turbo'
| 'o4-mini'
| 'o3'
| 'o3-mini'
| 'o1'
| 'o1-preview'
| 'o1-mini'
// ... and many more model identifiers
;
type ResponsesModel = string; // Model identifier for Responses API
// Filter types for vector store search and other operations
interface ComparisonFilter {
key: string;
op: 'eq' | 'ne' | 'gt' | 'gte' | 'lt' | 'lte' | 'in' | 'nin';
value: string | number | boolean | Array<string | number>;
}
interface CompoundFilter {
and?: Array<ComparisonFilter | CompoundFilter>;
or?: Array<ComparisonFilter | CompoundFilter>;
}
// Tool and function definitions
type CustomToolInputFormat = CustomToolInputFormat.Text | CustomToolInputFormat.Grammar;
interface FunctionDefinition {
name: string;
description?: string;
parameters?: FunctionParameters;
strict?: boolean;
}
type FunctionParameters = { [key: string]: unknown };
// Response format types
interface ResponseFormatJSONObject {
type: 'json_object';
}
interface ResponseFormatJSONSchema {
json_schema: ResponseFormatJSONSchema.JSONSchema;
type: 'json_schema';
}
interface ResponseFormatText {
type: 'text';
}
interface ResponseFormatTextGrammar {
grammar: string;
type: 'text_grammar';
}
interface ResponseFormatTextPython {
type: 'text_python';
}
// Reasoning configuration
interface Reasoning {
type: 'default' | 'extended' | 'internal';
effort?: ReasoningEffort;
content?: 'enabled' | 'disabled';
}
type ReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | null;
// Error structure
interface ErrorObject {
code: string | null;
message: string;
param: string | null;
type: string;
}Common use cases:
ChatModel or AllModels for type-safe model selectionComparisonFilter and CompoundFilter for vector store search queriesFunctionDefinition and FunctionParameters when defining custom toolsResponseFormat* types to specify desired output formatsReasoning and ReasoningEffort for reasoning models configurationErrorObject type for API error responsesUtilities for verifying webhook signatures from OpenAI:
function webhooks.verifySignature(
payload: string,
headers: Record<string, string>,
secret?: string,
tolerance?: number
): void;
function webhooks.unwrap(
payload: string,
headers: Record<string, string>,
secret?: string,
tolerance?: number
): WebhookEvent;Webhook events include: response completed/failed/cancelled, batch completed/failed, fine-tuning job status updates, eval run results, and realtime call events.
Converts various data types into File objects suitable for upload operations. This is a top-level export from the main package.
function toFile(
value: ToFileInput | PromiseLike<ToFileInput>,
name?: string | null | undefined,
options?: FilePropertyBag | undefined,
): Promise<File>;
type ToFileInput =
| Uploadable
| Exclude<BlobPart, string>
| AsyncIterable<BlobPart>
| Iterable<BlobPart>;The toFile function handles conversion from various input types (streams, buffers, paths, etc.) into File objects that can be uploaded via the API. It automatically handles file reading, buffering, and proper MIME type detection.
For detailed documentation, usage examples, and platform-specific behavior, see Files and Uploads - toFile Helper.
The SDK provides specialized helper modules that are imported via subpaths rather than the main openai package.
Type-safe parsing and validation using Zod schemas. Imported from openai/helpers/zod.
// Convert Zod schemas to response formats with auto-parsing
function zodResponseFormat<T>(
zodObject: z.ZodType<T>,
name: string,
props?: object
): AutoParseableResponseFormat<T>;
// Create function tools with Zod validation
function zodFunction<P>(options: {
name: string;
parameters: z.ZodType<P>;
function?: (args: P) => unknown | Promise<unknown>;
description?: string;
}): AutoParseableTool;
// Text format variant for Responses API
function zodTextFormat<T>(
zodObject: z.ZodType<T>,
name: string,
props?: object
): AutoParseableTextFormat<T>;
// Function tool variant for Responses API
function zodResponsesFunction<P>(options: {
name: string;
parameters: z.ZodType<P>;
function?: (args: P) => unknown | Promise<unknown>;
description?: string;
}): AutoParseableResponseTool;Quick Example:
import { zodResponseFormat } from "openai/helpers/zod";
import { z } from "zod";
const schema = z.object({
name: z.string(),
age: z.number(),
});
const completion = await client.chat.completions.parse({
model: "gpt-4o-2024-08-06",
messages: [{ role: "user", content: "Generate user data" }],
response_format: zodResponseFormat(schema, "UserSchema"),
});
// Type-safe access to parsed data
const data = completion.choices[0].message.parsed;
console.log(data.name, data.age); // Fully typedPlay and record audio using ffmpeg/ffplay. Imported from openai/helpers/audio.
/**
* Play audio from a stream, Response, or File
* Requires ffplay to be installed
* Node.js only - throws error in browser
*/
function playAudio(
input: NodeJS.ReadableStream | Response | File
): Promise<void>;
/**
* Record audio from system input device
* Requires ffmpeg to be installed
* Node.js only - throws error in browser
*/
function recordAudio(options?: {
signal?: AbortSignal;
device?: number;
timeout?: number;
}): Promise<File>;Quick Example:
import { playAudio, recordAudio } from "openai/helpers/audio";
// Record 5 seconds of audio
const audioFile = await recordAudio({ timeout: 5000 });
// Transcribe it
const transcription = await client.audio.transcriptions.create({
file: audioFile,
model: "whisper-1",
});
// Generate a response
const speech = await client.audio.speech.create({
model: "tts-1",
voice: "alloy",
input: transcription.text,
});
// Play the response
await playAudio(speech);Several resources provide polling utilities for waiting on async operations:
// Wait for file processing
const file = await client.files.waitForProcessing(fileId);
// Create run and poll until completion
const run = await client.beta.threads.runs.createAndPoll(threadId, {
assistant_id: assistantId,
});The library works across multiple JavaScript runtimes:
dangerouslyAllowBrowser: true)Generate and manipulate videos using OpenAI's video models with Sora.
function videos.create(params: VideoCreateParams): Promise<Video>;
function videos.retrieve(videoID: string): Promise<Video>;
function videos.list(params?: VideoListParams): Promise<VideosPage>;
function videos.delete(videoID: string): Promise<VideoDeleteResponse>;
function videos.downloadContent(
videoID: string,
params?: VideoDownloadContentParams
): Promise<Response>;
function videos.remix(
videoID: string,
params: VideoRemixParams
): Promise<Video>;Manage isolated execution containers for code interpreter and other tools.
function containers.create(
params: ContainerCreateParams
): Promise<ContainerCreateResponse>;
function containers.retrieve(
containerID: string
): Promise<ContainerRetrieveResponse>;
function containers.list(
params?: ContainerListParams
): Promise<ContainerListResponsesPage>;
function containers.delete(containerID: string): Promise<void>;
// Container file operations
function containers.files.create(
containerID: string,
params: FileCreateParams
): Promise<FileCreateResponse>;
function containers.files.retrieve(
fileID: string,
params: FileRetrieveParams
): Promise<FileRetrieveResponse>;
function containers.files.list(
containerID: string,
params?: FileListParams
): Promise<FileListResponsesPage>;
function containers.files.delete(
fileID: string,
params: FileDeleteParams
): Promise<void>;
function containers.files.content.retrieve(
fileID: string,
params: ContentRetrieveParams
): Promise<Response>;Manage persistent conversation state independently of threads.
function conversations.create(
params: ConversationCreateParams
): Promise<Conversation>;
function conversations.retrieve(conversationID: string): Promise<Conversation>;
function conversations.update(
conversationID: string,
params: ConversationUpdateParams
): Promise<Conversation>;
function conversations.delete(
conversationID: string
): Promise<ConversationDeletedResource>;
// Conversation item operations
function conversations.items.create(
conversationID: string,
params: ConversationItemCreateParams
): Promise<ConversationItemList>;
function conversations.items.retrieve(
itemID: string,
params: ItemRetrieveParams
): Promise<ConversationItem>;
function conversations.items.list(
conversationID: string,
params?: ItemListParams
): Promise<ConversationItemsPage>;
function conversations.items.delete(
itemID: string,
params: ItemDeleteParams
): Promise<Conversation>;