CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/npm-langchain--openai

OpenAI integrations for LangChain.js providing chat models, embeddings, tools, and Azure support.

Pending
Quality

Pending

Does it follow best practices?

Impact

Pending

No eval scenarios have been run

SecuritybySnyk

Pending

The risk profile of this skill

Overview
Eval results
Files

types-and-configuration.mddocs/

Types and Configuration

Comprehensive type system covering all configuration options, call parameters, and response formats for the @langchain/openai package.

Core Configuration Types

OpenAI Base Configuration

Base configuration interface shared across all OpenAI integrations.

/**
 * Base configuration interface for OpenAI models
 * Provides common authentication and client settings
 */
interface OpenAIBaseInput {
  /** OpenAI API key for authentication */
  openAIApiKey?: string;
  
  /** OpenAI organization ID */
  organization?: string;
  
  /** Custom base URL for API requests */
  baseURL?: string;
  
  /** Request timeout in milliseconds */
  timeout?: number;
  
  /** Maximum number of retry attempts */
  maxRetries?: number;
  
  /** Allow usage in browser environments */
  dangerouslyAllowBrowser?: boolean;
  
  /** Custom HTTP headers */
  defaultHeaders?: Record<string, string>;
  
  /** Custom query parameters */
  defaultQuery?: Record<string, string>;
}

/**
 * Extended configuration for OpenAI chat models
 */
interface OpenAIChatInput extends OpenAIBaseInput {
  /** Model name (e.g., "gpt-4o", "gpt-3.5-turbo") */
  model?: string;
  
  /** Sampling temperature (0-2) */
  temperature?: number;
  
  /** Maximum tokens to generate */
  maxTokens?: number;
  
  /** Nucleus sampling parameter (0-1) */
  topP?: number;
  
  /** Frequency penalty (-2 to 2) */
  frequencyPenalty?: number;
  
  /** Presence penalty (-2 to 2) */
  presencePenalty?: number;
  
  /** Number of completions to generate */
  n?: number;
  
  /** Enable streaming responses */
  streaming?: boolean;
  
  /** Include usage metadata in streams */
  streamUsage?: boolean;
  
  /** Return log probabilities */
  logprobs?: boolean;
  
  /** Number of top log probabilities to return */
  topLogprobs?: number;
  
  /** Use Responses API by default */
  useResponsesApi?: boolean;
  
  /** Enable strict tool calling */
  supportsStrictToolCalling?: boolean;
  
  /** Audio output configuration */
  audio?: OpenAIClient.Chat.ChatCompletionAudioParam;
  
  /** Output modalities */
  modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>;
  
  /** Reasoning model configuration */
  reasoning?: OpenAIClient.Reasoning;
  
  /** Zero data retention mode */
  zdrEnabled?: boolean;
  
  /** Service tier for processing priority */
  service_tier?: string;
  
  /** Prompt caching key */
  promptCacheKey?: string;
  
  /** Response verbosity level */
  verbosity?: OpenAIVerbosityParam;
}

Azure Configuration

Configuration interface for Azure OpenAI services.

/**
 * Configuration interface for Azure OpenAI services
 * Extends base OpenAI configuration with Azure-specific settings
 */
interface AzureOpenAIInput extends OpenAIBaseInput {
  /** Azure API version (required for Azure) */
  azureOpenAIApiVersion?: string;
  
  /** Azure OpenAI API key */
  azureOpenAIApiKey?: string;
  
  /** Azure AD token provider function */
  azureADTokenProvider?: () => Promise<string>;
  
  /** Azure OpenAI instance name */
  azureOpenAIApiInstanceName?: string;
  
  /** Azure deployment name */
  azureOpenAIApiDeploymentName?: string;
  
  /** Azure base path for custom endpoints */
  azureOpenAIBasePath?: string;
  
  /** Azure endpoint URL */
  azureOpenAIEndpoint?: string;
  
  /** Embeddings-specific deployment name */
  azureOpenAIApiEmbeddingsDeploymentName?: string;
  
  /** Completions-specific deployment name */
  azureOpenAIApiCompletionsDeploymentName?: string;
}

Call Options

Chat Model Call Options

Runtime options that can be passed to chat model generation methods.

/**
 * Runtime call options for chat models
 * Combines base options with chat-specific parameters
 */
interface ChatOpenAICallOptions extends BaseChatOpenAICallOptions {
  /** Tools available to the model */
  tools?: ChatOpenAIToolType[];
  
  /** Tool selection strategy */
  tool_choice?: OpenAIToolChoice | ResponsesToolChoice;
  
  /** Response format specification */
  response_format?: ChatOpenAIResponseFormat;
  
  /** Deterministic sampling seed */
  seed?: number;
  
  /** Streaming configuration options */
  stream_options?: OpenAIClient.Chat.ChatCompletionStreamOptions;
  
  /** Enable parallel tool calling */
  parallel_tool_calls?: boolean;
  
  /** Enable strict mode for tools and schemas */
  strict?: boolean;
  
  /** Output modalities (text, audio) */
  modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>;
  
  /** Audio output configuration */
  audio?: OpenAIClient.Chat.ChatCompletionAudioParam;
  
  /** Predicted output for optimization */
  prediction?: OpenAIClient.ChatCompletionPredictionContent;
  
  /** Reasoning model options */
  reasoning?: OpenAIClient.Reasoning;
  
  /** Service tier for processing priority */
  service_tier?: string;
  
  /** Prompt cache key */
  promptCacheKey?: string;
  
  /** Response verbosity level */
  verbosity?: OpenAIVerbosityParam;
}

/**
 * Base call options shared across OpenAI models
 */
interface BaseChatOpenAICallOptions extends OpenAICallOptions, BaseFunctionCallOptions {
  /** Custom user identifier for monitoring */
  user?: string;
  
  /** Custom request headers */
  headers?: Record<string, string>;
  
  /** Additional request options */
  options?: RequestOptions;
}

/**
 * Core OpenAI call options
 */
interface OpenAICallOptions {
  /** Stop sequences to end generation */
  stop?: string | string[];
  
  /** Custom user identifier */
  user?: string;
  
  /** Deterministic sampling seed */
  seed?: number;
  
  /** Logit bias for token probabilities */
  logit_bias?: Record<string, number>;
  
  /** Request timeout override */
  timeout?: number;
}

Language Model Call Options

Call options specific to text completion models.

/**
 * Call options for OpenAI language models (completions API)
 */
interface OpenAICallOptions extends BaseOpenAICallOptions {
  /** Stop sequences to end generation */
  stop?: string | string[];
  
  /** Custom user identifier for abuse monitoring */
  user?: string;
  
  /** Deterministic sampling seed */
  seed?: number;
  
  /** Logit bias for specific tokens */
  logit_bias?: Record<string, number>;
  
  /** Text to append after completion */
  suffix?: string;
  
  /** Echo back the prompt in response */
  echo?: boolean;
  
  /** Generate multiple completions server-side and return best */
  best_of?: number;
}

Response Format Types

Chat Response Formats

Types for specifying output formats in chat completions.

/**
 * Response format specification for chat completions
 * Supports JSON mode, JSON schema, and text formats
 */
type ChatOpenAIResponseFormat = 
  | { type: "text" }
  | { type: "json_object" }
  | {
      type: "json_schema";
      json_schema: {
        name: string;
        description?: string;
        schema: Record<string, any>;
        strict?: boolean;
      };
    };

/**
 * Verbosity levels for model responses
 */
type OpenAIVerbosityParam = "low" | "medium" | "high" | null;

/**
 * Reasoning summary type for o1 models
 */
interface ChatOpenAIReasoningSummary {
  /** Reasoning content */
  content: string;
  
  /** Reasoning steps */
  steps?: Array<{
    content: string;
    type?: string;
  }>;
}

Tool Types

Tool Definitions

Types for defining and working with tools in OpenAI models.

/**
 * Union type for all supported tool formats
 */
type ChatOpenAIToolType = 
  | BindToolsInput 
  | OpenAIClient.Chat.ChatCompletionTool
  | ResponsesTool
  | CustomTool;

/**
 * Tool type for Responses API
 */
interface ResponsesTool {
  type: "function" | "file_search" | "code_interpreter";
  function?: {
    name: string;
    description?: string;
    parameters?: Record<string, any>;
    strict?: boolean;
  };
}

/**
 * Custom tool definition
 */
interface CustomTool<T extends Record<string, any> = Record<string, any>> {
  type: "custom_tool";
  name: string;
  description: string;
  schema: T;
  func: RunnableFunc<string, string, ToolRunnableConfig>;
}

/**
 * Tool choice specification
 */
type OpenAIToolChoice = 
  | "auto" 
  | "none" 
  | "required"
  | {
      type: "function";
      function: {
        name: string;
      };
    };

/**
 * Tool choice for Responses API
 */
type ResponsesToolChoice = 
  | "auto"
  | "none" 
  | "required"
  | {
      type: "function" | "code_interpreter" | "file_search";
      function?: {
        name: string;
      };
    };

Model Identifier Types

Model Names and IDs

Type definitions for OpenAI model identifiers.

/**
 * Type alias for OpenAI chat model identifiers
 * Includes all supported GPT models
 */
type OpenAIChatModelId = 
  | "gpt-4o"
  | "gpt-4o-2024-08-06"
  | "gpt-4o-2024-05-13" 
  | "gpt-4o-mini"
  | "gpt-4o-mini-2024-07-18"
  | "gpt-4o-audio-preview"
  | "gpt-4-turbo"
  | "gpt-4-turbo-2024-04-09"
  | "gpt-4"
  | "gpt-4-0613"
  | "gpt-4-0314"
  | "gpt-3.5-turbo"
  | "gpt-3.5-turbo-0125"
  | "gpt-3.5-turbo-1106"
  | "o1-preview"
  | "o1-mini"
  | string; // Allow custom model names

/**
 * Embedding model identifiers
 */
type OpenAIEmbeddingModelId =
  | "text-embedding-ada-002"
  | "text-embedding-3-small" 
  | "text-embedding-3-large"
  | string;

/**
 * Language model (completions) identifiers  
 */
type OpenAILanguageModelId =
  | "gpt-3.5-turbo-instruct"
  | "text-davinci-003" // Deprecated
  | "text-davinci-002" // Deprecated
  | string;

/**
 * DALL-E model identifiers
 */
type OpenAIImageModelId = 
  | "dall-e-2"
  | "dall-e-3"
  | string;

Embeddings Configuration

Embeddings Parameters

Configuration types for embedding models.

/**
 * Configuration parameters for OpenAI embeddings
 */
interface OpenAIEmbeddingsParams extends EmbeddingsParams {
  /** Embedding model name */
  model: string;
  
  /** Number of texts to batch in single API call */
  batchSize: number;
  
  /** Strip newlines from input text */
  stripNewLines: boolean;
  
  /** Output dimensions (text-embedding-3-* only) */
  dimensions?: number;
  
  /** Maximum number of concurrent requests */
  maxConcurrency?: number;
}

/**
 * Base embeddings configuration
 */
interface EmbeddingsParams extends AsyncCallerParams {
  /** Maximum number of documents to embed in single call */
  maxConcurrency?: number;
  
  /** Maximum number of retries */
  maxRetries?: number;
}

Usage Examples

Type-Safe Configuration

import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from "@langchain/openai";
import type { 
  ChatOpenAICallOptions, 
  OpenAICallOptions,
  OpenAIChatModelId,
  ChatOpenAIResponseFormat 
} from "@langchain/openai";

// Type-safe model configuration
const chatConfig: Partial<OpenAIChatInput> = {
  model: "gpt-4o" as OpenAIChatModelId,
  temperature: 0.7,
  maxTokens: 1000,
  streaming: true,
  topP: 0.9
};

const chatModel = new ChatOpenAI(chatConfig);

// Type-safe call options
const callOptions: ChatOpenAICallOptions = {
  seed: 42,
  stop: ["\n\n", "END"],
  user: "user-123",
  tools: [], // Will be type-checked
  response_format: { type: "json_object" } as ChatOpenAIResponseFormat
};

const response = await chatModel.invoke("Hello!", callOptions);

Response Format Configuration

// Text response (default)
const textFormat: ChatOpenAIResponseFormat = { type: "text" };

// JSON object mode
const jsonFormat: ChatOpenAIResponseFormat = { type: "json_object" };

// JSON schema mode with validation
const schemaFormat: ChatOpenAIResponseFormat = {
  type: "json_schema",
  json_schema: {
    name: "PersonInfo",
    description: "Information about a person",
    schema: {
      type: "object",
      properties: {
        name: { type: "string" },
        age: { type: "number" },
        occupation: { type: "string" }
      },
      required: ["name", "age"],
      additionalProperties: false
    },
    strict: true
  }
};

const model = new ChatOpenAI({
  model: "gpt-4o",
  temperature: 0
});

// Use different formats
const textResponse = await model.invoke("Hello", { response_format: textFormat });
const jsonResponse = await model.invoke("Return person info", { response_format: jsonFormat });
const schemaResponse = await model.invoke("Extract person", { response_format: schemaFormat });

Tool Type Definitions

import { z } from "zod";
import type { ChatOpenAIToolType, OpenAIToolChoice } from "@langchain/openai";

// Define tool schemas with proper types
const weatherSchema = z.object({
  location: z.string().describe("City name"),
  units: z.enum(["celsius", "fahrenheit"]).optional()
});

const calculatorSchema = z.object({
  operation: z.enum(["add", "subtract", "multiply", "divide"]),
  a: z.number(),
  b: z.number()
});

// Create type-safe tools
const tools: ChatOpenAIToolType[] = [
  {
    type: "function",
    function: {
      name: "get_weather",
      description: "Get weather information",
      parameters: weatherSchema
    }
  },
  {
    type: "function", 
    function: {
      name: "calculator",
      description: "Perform calculations",
      parameters: calculatorSchema
    }
  }
];

// Type-safe tool choice
const toolChoice: OpenAIToolChoice = {
  type: "function",
  function: { name: "get_weather" }
};

const modelWithTools = chatModel.bindTools(tools);
const result = await modelWithTools.invoke("Weather in NYC", { 
  tool_choice: toolChoice 
});

Advanced Configuration Types

// Audio configuration
const audioConfig: OpenAIClient.Chat.ChatCompletionAudioParam = {
  voice: "alloy",
  format: "wav"
};

// Modalities configuration
const modalities: Array<OpenAIClient.Chat.ChatCompletionModality> = ["text", "audio"];

// Stream options
const streamOptions: OpenAIClient.Chat.ChatCompletionStreamOptions = {
  include_usage: true
};

// Reasoning configuration (for o1 models)
const reasoningConfig: OpenAIClient.Reasoning = {
  effort: "medium" // low, medium, high
};

// Complete advanced configuration
const advancedModel = new ChatOpenAI({
  model: "gpt-4o-audio-preview",
  temperature: 0.5,
  audio: audioConfig,
  modalities: modalities,
  reasoning: reasoningConfig,
  service_tier: "priority",
  verbosity: "medium" as OpenAIVerbosityParam,
  zdrEnabled: true
});

const advancedResponse = await advancedModel.invoke("Tell me a story", {
  stream_options: streamOptions,
  seed: 12345,
  parallel_tool_calls: false
});

Error Handling Types

import type { OpenAIError } from "openai";

// Type-safe error handling
async function handleOpenAICall() {
  try {
    const response = await chatModel.invoke("Hello!");
    return response;
  } catch (error) {
    if (error instanceof Error) {
      // LangChain wrapped errors
      console.error("LangChain error:", error.message);
      
      // Check for specific error codes
      if ('code' in error) {
        switch (error.code) {
          case 'invalid_api_key':
            console.error("Invalid API key");
            break;
          case 'rate_limit_exceeded':
            console.error("Rate limit exceeded");
            break;
          case 'model_not_found':
            console.error("Model not found");
            break;
          default:
            console.error("Unknown error code:", error.code);
        }
      }
    }
    throw error;
  }
}

Custom Type Extensions

// Extend configuration types for custom use cases
interface CustomChatConfig extends OpenAIChatInput {
  customRetryDelay?: number;
  customHeaders?: Record<string, string>;
  debugMode?: boolean;
}

interface CustomCallOptions extends ChatOpenAICallOptions {
  requestId?: string;
  priority?: "high" | "normal" | "low";
  context?: Record<string, any>;
}

class CustomChatOpenAI extends ChatOpenAI {
  private customConfig: CustomChatConfig;

  constructor(config: CustomChatConfig) {
    super(config);
    this.customConfig = config;
  }

  async invoke(
    input: BaseLanguageModelInput,
    options?: CustomCallOptions
  ): Promise<BaseMessage> {
    if (this.customConfig.debugMode) {
      console.log("Request ID:", options?.requestId);
      console.log("Priority:", options?.priority);
    }

    return super.invoke(input, options);
  }
}

// Usage with custom types
const customModel = new CustomChatOpenAI({
  model: "gpt-4o",
  temperature: 0.7,
  debugMode: true,
  customRetryDelay: 1000
});

const customResponse = await customModel.invoke("Hello", {
  requestId: "req-123",
  priority: "high",
  context: { userId: "user-456" }
});

Environment Variable Types

Configuration from Environment

/**
 * Environment variable configuration
 * All these can be set as process.env variables
 */
interface EnvironmentConfig {
  // OpenAI configuration
  OPENAI_API_KEY?: string;
  OPENAI_ORGANIZATION?: string;

  // Azure OpenAI configuration
  AZURE_OPENAI_API_KEY?: string;
  AZURE_OPENAI_API_INSTANCE_NAME?: string;
  AZURE_OPENAI_API_DEPLOYMENT_NAME?: string;
  AZURE_OPENAI_API_VERSION?: string;
  AZURE_OPENAI_BASE_PATH?: string;
  AZURE_OPENAI_ENDPOINT?: string;
  AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME?: string;
  AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME?: string;
}

// Type-safe environment variable loading
function loadConfigFromEnv(): Partial<OpenAIChatInput & AzureOpenAIInput> {
  return {
    openAIApiKey: process.env.OPENAI_API_KEY,
    organization: process.env.OPENAI_ORGANIZATION,
    azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
    azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME,
    azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME,
    azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION
  };
}

const envConfig = loadConfigFromEnv();
const model = new ChatOpenAI(envConfig);

Utility Types

Core Utility Types

Additional utility types exported by the package for advanced usage.

/**
 * Tiktoken model type for token counting
 */
type TiktokenModel = string;

/**
 * OpenAI verbosity parameter for reasoning models
 */
type OpenAIVerbosityParam = "low" | "medium" | "high" | null;

/**
 * Custom tool fields for Responses API
 */
type CustomToolFields = Omit<OpenAI.Responses.CustomTool, "type">;

/**
 * OpenAI endpoint configuration for Azure
 */
interface OpenAIEndpointConfig {
  azureOpenAIApiDeploymentName?: string;
  azureOpenAIApiInstanceName?: string;
  azureOpenAIApiKey?: string;
  azureOpenAIBasePath?: string;
  azureOpenAIApiVersion?: string;
  baseURL?: string;
}

/**
 * Response format configuration union type
 */
type ResponseFormatConfiguration = 
  | { method: "functionCalling" }
  | { method: "jsonMode" };

/**
 * Chat reasoning summary type for o1 models
 */
type ChatOpenAIReasoningSummary = Omit<
  OpenAI.Chat.ChatCompletionReasoningSummary,
  "output"
>;

Headers and Tools Utility Types

/**
 * Headers-like type for HTTP requests
 */
type HeadersLike = 
  | Headers
  | Record<string, string | string[]>
  | [string, string][];

/**
 * Custom tool call extension
 */
type CustomToolCall = ToolCall & {
  name: string;
  args: Record<string, any>;
  type: "custom_tool_call";
};

/**
 * Responses tool type from OpenAI SDK
 */
type ResponsesTool = NonNullable<
  OpenAI.Chat.ChatCompletionTool
>;

/**
 * Responses tool choice type
 */
type ResponsesToolChoice = NonNullable<
  OpenAI.Chat.ChatCompletionToolChoiceOption
>;

/**
 * LangChain error codes for enhanced error handling
 */
type LangChainErrorCodes =
  | "invalid_api_key"
  | "rate_limit_exceeded"
  | "model_not_found"
  | "context_length_exceeded"
  | "insufficient_quota"
  | "server_error";

This comprehensive type system ensures type safety across all @langchain/openai functionality while providing clear interfaces for configuration and usage.

docs

azure-integration.md

chat-models.md

embeddings.md

index.md

language-models.md

tools.md

types-and-configuration.md

tile.json