OpenAI integrations for LangChain.js providing chat models, embeddings, tools, and Azure support.
—
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Pending
The risk profile of this skill
Modern conversational AI models supporting streaming, tools, structured output, and multimodal interactions. Built on OpenAI's Chat Completions API with support for the latest GPT models.
The primary chat model class providing access to OpenAI's chat completion models with comprehensive feature support.
/**
* Main OpenAI chat model integration supporting both Completions and Responses APIs
* Supports streaming, tool calling, structured output, and multimodal interactions
*/
class ChatOpenAI<CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions>
extends BaseChatOpenAI<CallOptions> {
constructor(fields?: ChatOpenAIFields & Partial<ChatOpenAIFields>);
/** Model configuration */
model: string; // Default: "gpt-3.5-turbo"
temperature?: number; // Sampling temperature (0-2)
maxTokens?: number; // Maximum tokens to generate
topP?: number; // Nucleus sampling parameter
frequencyPenalty?: number; // Frequency penalty (-2 to 2)
presencePenalty?: number; // Presence penalty (-2 to 2)
n?: number; // Number of completions to generate
streaming: boolean; // Enable streaming (default: false)
streamUsage: boolean; // Include usage in streams (default: true)
logprobs?: boolean; // Return log probabilities
topLogprobs?: number; // Number of top log probabilities (1-20)
/** Advanced features */
useResponsesApi: boolean; // Use Responses API (default: false)
supportsStrictToolCalling?: boolean; // Enable strict tool calling
audio?: OpenAIClient.Chat.ChatCompletionAudioParam; // Audio output config
modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>; // Output modalities
reasoning?: OpenAIClient.Reasoning; // Reasoning model options
zdrEnabled?: boolean; // Zero data retention mode
service_tier?: string; // "auto" | "default" | "flex" | "priority"
promptCacheKey?: string; // Cache key for prompt caching
verbosity?: OpenAIVerbosityParam; // "low" | "medium" | "high" | null
/** Generate a single response */
invoke(
input: BaseLanguageModelInput,
options?: CallOptions
): Promise<BaseMessage>;
/** Generate streaming response */
stream(
input: BaseLanguageModelInput,
options?: CallOptions
): AsyncIterable<BaseMessageChunk>;
/** Bind tools to the model */
bindTools(
tools: ChatOpenAIToolType[],
kwargs?: Partial<CallOptions>
): Runnable<BaseLanguageModelInput, BaseMessageLike>;
/** Enable structured output with schema validation */
withStructuredOutput<T>(
outputSchema: z.ZodType<T> | Record<string, any>,
config?: {
name?: string;
description?: string;
method?: "functionCalling" | "jsonMode";
strict?: boolean;
}
): Runnable<BaseLanguageModelInput, T>;
/** Configure model with runtime options */
withConfig(config: RunnableConfig): Runnable;
/** Internal generation method */
_generate(
messages: BaseMessage[],
options: CallOptions,
runManager?: CallbackManagerForLLMRun
): Promise<ChatResult>;
}Abstract base class providing common functionality for OpenAI chat models.
/**
* Abstract base class for OpenAI chat models
* Implements common OpenAI functionality, authentication, and client configuration
*/
abstract class BaseChatOpenAI<CallOptions extends BaseChatOpenAICallOptions>
extends BaseChatModel<CallOptions, AIMessageChunk> {
/** Client configuration */
openAIApiKey?: string; // OpenAI API key
organization?: string; // OpenAI organization ID
baseURL?: string; // Custom base URL
timeout?: number; // Request timeout in milliseconds
maxRetries?: number; // Maximum retry attempts
dangerouslyAllowBrowser?: boolean; // Allow browser usage
/** Advanced configuration */
completionWithRetry<T>(
request: OpenAIClient.Chat.ChatCompletionCreateParamsStreaming,
options?: OpenAICallOptions
): Promise<T>;
/** Convert messages to OpenAI format */
_convertMessagesToOpenAIParams(
messages: BaseMessage[],
model?: string
): OpenAIClient.Chat.ChatCompletionMessageParam[];
}import { ChatOpenAI } from "@langchain/openai";
const chatModel = new ChatOpenAI({
model: "gpt-4o-mini",
temperature: 0.7,
maxTokens: 1000,
apiKey: process.env.OPENAI_API_KEY
});
// Simple message
const response = await chatModel.invoke("Explain quantum computing in simple terms");
console.log(response.content);
// Message with system prompt
import { HumanMessage, SystemMessage } from "@langchain/core/messages";
const messages = [
new SystemMessage("You are a helpful AI assistant specializing in science education."),
new HumanMessage("What is photosynthesis?")
];
const result = await chatModel.invoke(messages);const streamingModel = new ChatOpenAI({
model: "gpt-4o",
streaming: true,
temperature: 0.3
});
// Stream tokens as they arrive
const stream = await streamingModel.stream("Write a short story about space exploration");
for await (const chunk of stream) {
process.stdout.write(chunk.content);
}import { z } from "zod";
// Define tools
const tools = [
{
name: "calculator",
description: "Perform mathematical calculations",
schema: z.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
a: z.number().describe("First number"),
b: z.number().describe("Second number")
})
},
{
name: "get_weather",
description: "Get current weather for a location",
schema: z.object({
location: z.string().describe("City name"),
units: z.enum(["celsius", "fahrenheit"]).optional()
})
}
];
const modelWithTools = chatModel.bindTools(tools);
// The model will automatically call tools when appropriate
const response = await modelWithTools.invoke("What's 15 * 23? Also, what's the weather in Tokyo?");
// Handle tool calls
if (response.tool_calls && response.tool_calls.length > 0) {
for (const toolCall of response.tool_calls) {
console.log(`Tool: ${toolCall.name}`);
console.log(`Arguments: ${JSON.stringify(toolCall.args)}`);
}
}import { z } from "zod";
// Define output schema
const PersonSchema = z.object({
name: z.string().describe("Person's full name"),
age: z.number().describe("Person's age"),
occupation: z.string().describe("Person's job or profession"),
personality_traits: z.array(z.string()).describe("Key personality characteristics"),
confidence: z.number().min(0).max(1).describe("Confidence in the analysis")
});
const structuredModel = chatModel.withStructuredOutput(PersonSchema, {
name: "PersonAnalysis",
description: "Extract person information from text",
method: "functionCalling"
});
const result = await structuredModel.invoke(`
John is a 34-year-old software engineer who works at a tech startup.
He's known for being creative, analytical, and having a great sense of humor.
He enjoys solving complex problems and mentoring junior developers.
`);
console.log(result);
// Output: { name: "John", age: 34, occupation: "software engineer", ... }import { HumanMessage } from "@langchain/core/messages";
const visionModel = new ChatOpenAI({
model: "gpt-4o",
maxTokens: 1000
});
const message = new HumanMessage({
content: [
{
type: "text",
text: "What do you see in this image? Describe it in detail."
},
{
type: "image_url",
image_url: {
url: "https://example.com/image.jpg"
// or use base64: "data:image/jpeg;base64,..."
}
}
]
});
const response = await visionModel.invoke([message]);const audioModel = new ChatOpenAI({
model: "gpt-4o-audio-preview",
modalities: ["text", "audio"],
audio: {
voice: "alloy",
format: "wav"
}
});
const response = await audioModel.invoke("Tell me a joke and make it sound funny!");
// Response will include both text and audio
if (response.response_metadata?.audio) {
// Handle audio data
console.log("Audio data available:", response.response_metadata.audio.id);
}const advancedModel = new ChatOpenAI({
// Model selection
model: "gpt-4o",
// Generation parameters
temperature: 0.8,
maxTokens: 2000,
topP: 0.95,
frequencyPenalty: 0.1,
presencePenalty: 0.1,
// API configuration
apiKey: process.env.OPENAI_API_KEY,
organization: process.env.OPENAI_ORG_ID,
timeout: 60000, // 60 seconds
maxRetries: 3,
// Advanced features
streaming: true,
streamUsage: true,
logprobs: true,
topLogprobs: 5,
// Performance options
service_tier: "priority", // Higher priority processing
promptCacheKey: "my-cache-key", // Enable prompt caching
// Privacy and compliance
zdrEnabled: true // Zero data retention
});import { wrapOpenAIClientError } from "@langchain/openai";
try {
const response = await chatModel.invoke("Hello!");
} catch (error) {
// Errors are automatically wrapped with LangChain error handling
if (error.code === 'invalid_api_key') {
console.error("Invalid API key provided");
} else if (error.code === 'rate_limit_exceeded') {
console.error("Rate limit exceeded, please try again later");
} else {
console.error("Unexpected error:", error.message);
}
}Runtime options that can be passed to generation methods.
interface ChatOpenAICallOptions extends BaseChatOpenAICallOptions {
/** Tools available to the model */
tools?: ChatOpenAIToolType[];
/** Tool selection strategy */
tool_choice?: OpenAIToolChoice | ResponsesToolChoice;
/** Response format specification */
response_format?: ChatOpenAIResponseFormat;
/** Deterministic sampling seed */
seed?: number;
/** Streaming configuration */
stream_options?: OpenAIClient.Chat.ChatCompletionStreamOptions;
/** Enable parallel tool calling */
parallel_tool_calls?: boolean;
/** Enable strict mode for tools and schemas */
strict?: boolean;
/** Output modalities (text, audio) */
modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>;
/** Audio output configuration */
audio?: OpenAIClient.Chat.ChatCompletionAudioParam;
/** Predicted output for optimization */
prediction?: OpenAIClient.ChatCompletionPredictionContent;
/** Reasoning model options */
reasoning?: OpenAIClient.Reasoning;
/** Service tier for processing priority */
service_tier?: string;
/** Prompt cache key */
promptCacheKey?: string;
/** Response verbosity level */
verbosity?: OpenAIVerbosityParam;
}The ChatOpenAI class supports all OpenAI chat models:
gpt-4o, gpt-4o-2024-08-06, gpt-4o-2024-05-13gpt-4o-mini, gpt-4o-mini-2024-07-18gpt-4-turbo, gpt-4-turbo-2024-04-09gpt-4, gpt-4-0613, gpt-4-0314gpt-3.5-turbo, gpt-3.5-turbo-0125gpt-4o-audio-previewo1-preview, o1-miniDifferent models support different features:
| Feature | GPT-4o | GPT-4o mini | GPT-4 | GPT-3.5 | Audio | o1 |
|---|---|---|---|---|---|---|
| Function Calling | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |
| Vision | ✅ | ✅ | ✅ | ❌ | ✅ | ✅ |
| Audio Output | ❌ | ❌ | ❌ | ❌ | ✅ | ❌ |
| Structured Output | ✅ | ✅ | ✅ | ✅ | ✅ | ❌ |
| Reasoning | ❌ | ❌ | ❌ | ❌ | ❌ | ✅ |