0
# Types and Configuration
1
2
Comprehensive type system covering all configuration options, call parameters, and response formats for the @langchain/openai package.
3
4
## Core Configuration Types
5
6
### OpenAI Base Configuration
7
8
Base configuration interface shared across all OpenAI integrations.
9
10
```typescript { .api }
11
/**
12
* Base configuration interface for OpenAI models
13
* Provides common authentication and client settings
14
*/
15
interface OpenAIBaseInput {
16
/** OpenAI API key for authentication */
17
openAIApiKey?: string;
18
19
/** OpenAI organization ID */
20
organization?: string;
21
22
/** Custom base URL for API requests */
23
baseURL?: string;
24
25
/** Request timeout in milliseconds */
26
timeout?: number;
27
28
/** Maximum number of retry attempts */
29
maxRetries?: number;
30
31
/** Allow usage in browser environments */
32
dangerouslyAllowBrowser?: boolean;
33
34
/** Custom HTTP headers */
35
defaultHeaders?: Record<string, string>;
36
37
/** Custom query parameters */
38
defaultQuery?: Record<string, string>;
39
}
40
41
/**
42
* Extended configuration for OpenAI chat models
43
*/
44
interface OpenAIChatInput extends OpenAIBaseInput {
45
/** Model name (e.g., "gpt-4o", "gpt-3.5-turbo") */
46
model?: string;
47
48
/** Sampling temperature (0-2) */
49
temperature?: number;
50
51
/** Maximum tokens to generate */
52
maxTokens?: number;
53
54
/** Nucleus sampling parameter (0-1) */
55
topP?: number;
56
57
/** Frequency penalty (-2 to 2) */
58
frequencyPenalty?: number;
59
60
/** Presence penalty (-2 to 2) */
61
presencePenalty?: number;
62
63
/** Number of completions to generate */
64
n?: number;
65
66
/** Enable streaming responses */
67
streaming?: boolean;
68
69
/** Include usage metadata in streams */
70
streamUsage?: boolean;
71
72
/** Return log probabilities */
73
logprobs?: boolean;
74
75
/** Number of top log probabilities to return */
76
topLogprobs?: number;
77
78
/** Use Responses API by default */
79
useResponsesApi?: boolean;
80
81
/** Enable strict tool calling */
82
supportsStrictToolCalling?: boolean;
83
84
/** Audio output configuration */
85
audio?: OpenAIClient.Chat.ChatCompletionAudioParam;
86
87
/** Output modalities */
88
modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>;
89
90
/** Reasoning model configuration */
91
reasoning?: OpenAIClient.Reasoning;
92
93
/** Zero data retention mode */
94
zdrEnabled?: boolean;
95
96
/** Service tier for processing priority */
97
service_tier?: string;
98
99
/** Prompt caching key */
100
promptCacheKey?: string;
101
102
/** Response verbosity level */
103
verbosity?: OpenAIVerbosityParam;
104
}
105
```
106
107
### Azure Configuration
108
109
Configuration interface for Azure OpenAI services.
110
111
```typescript { .api }
112
/**
113
* Configuration interface for Azure OpenAI services
114
* Extends base OpenAI configuration with Azure-specific settings
115
*/
116
interface AzureOpenAIInput extends OpenAIBaseInput {
117
/** Azure API version (required for Azure) */
118
azureOpenAIApiVersion?: string;
119
120
/** Azure OpenAI API key */
121
azureOpenAIApiKey?: string;
122
123
/** Azure AD token provider function */
124
azureADTokenProvider?: () => Promise<string>;
125
126
/** Azure OpenAI instance name */
127
azureOpenAIApiInstanceName?: string;
128
129
/** Azure deployment name */
130
azureOpenAIApiDeploymentName?: string;
131
132
/** Azure base path for custom endpoints */
133
azureOpenAIBasePath?: string;
134
135
/** Azure endpoint URL */
136
azureOpenAIEndpoint?: string;
137
138
/** Embeddings-specific deployment name */
139
azureOpenAIApiEmbeddingsDeploymentName?: string;
140
141
/** Completions-specific deployment name */
142
azureOpenAIApiCompletionsDeploymentName?: string;
143
}
144
```
145
146
## Call Options
147
148
### Chat Model Call Options
149
150
Runtime options that can be passed to chat model generation methods.
151
152
```typescript { .api }
153
/**
154
* Runtime call options for chat models
155
* Combines base options with chat-specific parameters
156
*/
157
interface ChatOpenAICallOptions extends BaseChatOpenAICallOptions {
158
/** Tools available to the model */
159
tools?: ChatOpenAIToolType[];
160
161
/** Tool selection strategy */
162
tool_choice?: OpenAIToolChoice | ResponsesToolChoice;
163
164
/** Response format specification */
165
response_format?: ChatOpenAIResponseFormat;
166
167
/** Deterministic sampling seed */
168
seed?: number;
169
170
/** Streaming configuration options */
171
stream_options?: OpenAIClient.Chat.ChatCompletionStreamOptions;
172
173
/** Enable parallel tool calling */
174
parallel_tool_calls?: boolean;
175
176
/** Enable strict mode for tools and schemas */
177
strict?: boolean;
178
179
/** Output modalities (text, audio) */
180
modalities?: Array<OpenAIClient.Chat.ChatCompletionModality>;
181
182
/** Audio output configuration */
183
audio?: OpenAIClient.Chat.ChatCompletionAudioParam;
184
185
/** Predicted output for optimization */
186
prediction?: OpenAIClient.ChatCompletionPredictionContent;
187
188
/** Reasoning model options */
189
reasoning?: OpenAIClient.Reasoning;
190
191
/** Service tier for processing priority */
192
service_tier?: string;
193
194
/** Prompt cache key */
195
promptCacheKey?: string;
196
197
/** Response verbosity level */
198
verbosity?: OpenAIVerbosityParam;
199
}
200
201
/**
202
* Base call options shared across OpenAI models
203
*/
204
interface BaseChatOpenAICallOptions extends OpenAICallOptions, BaseFunctionCallOptions {
205
/** Custom user identifier for monitoring */
206
user?: string;
207
208
/** Custom request headers */
209
headers?: Record<string, string>;
210
211
/** Additional request options */
212
options?: RequestOptions;
213
}
214
215
/**
216
* Core OpenAI call options
217
*/
218
interface OpenAICallOptions {
219
/** Stop sequences to end generation */
220
stop?: string | string[];
221
222
/** Custom user identifier */
223
user?: string;
224
225
/** Deterministic sampling seed */
226
seed?: number;
227
228
/** Logit bias for token probabilities */
229
logit_bias?: Record<string, number>;
230
231
/** Request timeout override */
232
timeout?: number;
233
}
234
```
235
236
### Language Model Call Options
237
238
Call options specific to text completion models.
239
240
```typescript { .api }
241
/**
242
* Call options for OpenAI language models (completions API)
243
*/
244
interface OpenAICallOptions extends BaseOpenAICallOptions {
245
/** Stop sequences to end generation */
246
stop?: string | string[];
247
248
/** Custom user identifier for abuse monitoring */
249
user?: string;
250
251
/** Deterministic sampling seed */
252
seed?: number;
253
254
/** Logit bias for specific tokens */
255
logit_bias?: Record<string, number>;
256
257
/** Text to append after completion */
258
suffix?: string;
259
260
/** Echo back the prompt in response */
261
echo?: boolean;
262
263
/** Generate multiple completions server-side and return best */
264
best_of?: number;
265
}
266
```
267
268
## Response Format Types
269
270
### Chat Response Formats
271
272
Types for specifying output formats in chat completions.
273
274
```typescript { .api }
275
/**
276
* Response format specification for chat completions
277
* Supports JSON mode, JSON schema, and text formats
278
*/
279
type ChatOpenAIResponseFormat =
280
| { type: "text" }
281
| { type: "json_object" }
282
| {
283
type: "json_schema";
284
json_schema: {
285
name: string;
286
description?: string;
287
schema: Record<string, any>;
288
strict?: boolean;
289
};
290
};
291
292
/**
293
* Verbosity levels for model responses
294
*/
295
type OpenAIVerbosityParam = "low" | "medium" | "high" | null;
296
297
/**
298
* Reasoning summary type for o1 models
299
*/
300
interface ChatOpenAIReasoningSummary {
301
/** Reasoning content */
302
content: string;
303
304
/** Reasoning steps */
305
steps?: Array<{
306
content: string;
307
type?: string;
308
}>;
309
}
310
```
311
312
## Tool Types
313
314
### Tool Definitions
315
316
Types for defining and working with tools in OpenAI models.
317
318
```typescript { .api }
319
/**
320
* Union type for all supported tool formats
321
*/
322
type ChatOpenAIToolType =
323
| BindToolsInput
324
| OpenAIClient.Chat.ChatCompletionTool
325
| ResponsesTool
326
| CustomTool;
327
328
/**
329
* Tool type for Responses API
330
*/
331
interface ResponsesTool {
332
type: "function" | "file_search" | "code_interpreter";
333
function?: {
334
name: string;
335
description?: string;
336
parameters?: Record<string, any>;
337
strict?: boolean;
338
};
339
}
340
341
/**
342
* Custom tool definition
343
*/
344
interface CustomTool<T extends Record<string, any> = Record<string, any>> {
345
type: "custom_tool";
346
name: string;
347
description: string;
348
schema: T;
349
func: RunnableFunc<string, string, ToolRunnableConfig>;
350
}
351
352
/**
353
* Tool choice specification
354
*/
355
type OpenAIToolChoice =
356
| "auto"
357
| "none"
358
| "required"
359
| {
360
type: "function";
361
function: {
362
name: string;
363
};
364
};
365
366
/**
367
* Tool choice for Responses API
368
*/
369
type ResponsesToolChoice =
370
| "auto"
371
| "none"
372
| "required"
373
| {
374
type: "function" | "code_interpreter" | "file_search";
375
function?: {
376
name: string;
377
};
378
};
379
```
380
381
## Model Identifier Types
382
383
### Model Names and IDs
384
385
Type definitions for OpenAI model identifiers.
386
387
```typescript { .api }
388
/**
389
* Type alias for OpenAI chat model identifiers
390
* Includes all supported GPT models
391
*/
392
type OpenAIChatModelId =
393
| "gpt-4o"
394
| "gpt-4o-2024-08-06"
395
| "gpt-4o-2024-05-13"
396
| "gpt-4o-mini"
397
| "gpt-4o-mini-2024-07-18"
398
| "gpt-4o-audio-preview"
399
| "gpt-4-turbo"
400
| "gpt-4-turbo-2024-04-09"
401
| "gpt-4"
402
| "gpt-4-0613"
403
| "gpt-4-0314"
404
| "gpt-3.5-turbo"
405
| "gpt-3.5-turbo-0125"
406
| "gpt-3.5-turbo-1106"
407
| "o1-preview"
408
| "o1-mini"
409
| string; // Allow custom model names
410
411
/**
412
* Embedding model identifiers
413
*/
414
type OpenAIEmbeddingModelId =
415
| "text-embedding-ada-002"
416
| "text-embedding-3-small"
417
| "text-embedding-3-large"
418
| string;
419
420
/**
421
* Language model (completions) identifiers
422
*/
423
type OpenAILanguageModelId =
424
| "gpt-3.5-turbo-instruct"
425
| "text-davinci-003" // Deprecated
426
| "text-davinci-002" // Deprecated
427
| string;
428
429
/**
430
* DALL-E model identifiers
431
*/
432
type OpenAIImageModelId =
433
| "dall-e-2"
434
| "dall-e-3"
435
| string;
436
```
437
438
## Embeddings Configuration
439
440
### Embeddings Parameters
441
442
Configuration types for embedding models.
443
444
```typescript { .api }
445
/**
446
* Configuration parameters for OpenAI embeddings
447
*/
448
interface OpenAIEmbeddingsParams extends EmbeddingsParams {
449
/** Embedding model name */
450
model: string;
451
452
/** Number of texts to batch in single API call */
453
batchSize: number;
454
455
/** Strip newlines from input text */
456
stripNewLines: boolean;
457
458
/** Output dimensions (text-embedding-3-* only) */
459
dimensions?: number;
460
461
/** Maximum number of concurrent requests */
462
maxConcurrency?: number;
463
}
464
465
/**
466
* Base embeddings configuration
467
*/
468
interface EmbeddingsParams extends AsyncCallerParams {
469
/** Maximum number of documents to embed in single call */
470
maxConcurrency?: number;
471
472
/** Maximum number of retries */
473
maxRetries?: number;
474
}
475
```
476
477
## Usage Examples
478
479
### Type-Safe Configuration
480
481
```typescript
482
import { ChatOpenAI, OpenAI, OpenAIEmbeddings } from "@langchain/openai";
483
import type {
484
ChatOpenAICallOptions,
485
OpenAICallOptions,
486
OpenAIChatModelId,
487
ChatOpenAIResponseFormat
488
} from "@langchain/openai";
489
490
// Type-safe model configuration
491
const chatConfig: Partial<OpenAIChatInput> = {
492
model: "gpt-4o" as OpenAIChatModelId,
493
temperature: 0.7,
494
maxTokens: 1000,
495
streaming: true,
496
topP: 0.9
497
};
498
499
const chatModel = new ChatOpenAI(chatConfig);
500
501
// Type-safe call options
502
const callOptions: ChatOpenAICallOptions = {
503
seed: 42,
504
stop: ["\n\n", "END"],
505
user: "user-123",
506
tools: [], // Will be type-checked
507
response_format: { type: "json_object" } as ChatOpenAIResponseFormat
508
};
509
510
const response = await chatModel.invoke("Hello!", callOptions);
511
```
512
513
### Response Format Configuration
514
515
```typescript
516
// Text response (default)
517
const textFormat: ChatOpenAIResponseFormat = { type: "text" };
518
519
// JSON object mode
520
const jsonFormat: ChatOpenAIResponseFormat = { type: "json_object" };
521
522
// JSON schema mode with validation
523
const schemaFormat: ChatOpenAIResponseFormat = {
524
type: "json_schema",
525
json_schema: {
526
name: "PersonInfo",
527
description: "Information about a person",
528
schema: {
529
type: "object",
530
properties: {
531
name: { type: "string" },
532
age: { type: "number" },
533
occupation: { type: "string" }
534
},
535
required: ["name", "age"],
536
additionalProperties: false
537
},
538
strict: true
539
}
540
};
541
542
const model = new ChatOpenAI({
543
model: "gpt-4o",
544
temperature: 0
545
});
546
547
// Use different formats
548
const textResponse = await model.invoke("Hello", { response_format: textFormat });
549
const jsonResponse = await model.invoke("Return person info", { response_format: jsonFormat });
550
const schemaResponse = await model.invoke("Extract person", { response_format: schemaFormat });
551
```
552
553
### Tool Type Definitions
554
555
```typescript
556
import { z } from "zod";
557
import type { ChatOpenAIToolType, OpenAIToolChoice } from "@langchain/openai";
558
559
// Define tool schemas with proper types
560
const weatherSchema = z.object({
561
location: z.string().describe("City name"),
562
units: z.enum(["celsius", "fahrenheit"]).optional()
563
});
564
565
const calculatorSchema = z.object({
566
operation: z.enum(["add", "subtract", "multiply", "divide"]),
567
a: z.number(),
568
b: z.number()
569
});
570
571
// Create type-safe tools
572
const tools: ChatOpenAIToolType[] = [
573
{
574
type: "function",
575
function: {
576
name: "get_weather",
577
description: "Get weather information",
578
parameters: weatherSchema
579
}
580
},
581
{
582
type: "function",
583
function: {
584
name: "calculator",
585
description: "Perform calculations",
586
parameters: calculatorSchema
587
}
588
}
589
];
590
591
// Type-safe tool choice
592
const toolChoice: OpenAIToolChoice = {
593
type: "function",
594
function: { name: "get_weather" }
595
};
596
597
const modelWithTools = chatModel.bindTools(tools);
598
const result = await modelWithTools.invoke("Weather in NYC", {
599
tool_choice: toolChoice
600
});
601
```
602
603
### Advanced Configuration Types
604
605
```typescript
606
// Audio configuration
607
const audioConfig: OpenAIClient.Chat.ChatCompletionAudioParam = {
608
voice: "alloy",
609
format: "wav"
610
};
611
612
// Modalities configuration
613
const modalities: Array<OpenAIClient.Chat.ChatCompletionModality> = ["text", "audio"];
614
615
// Stream options
616
const streamOptions: OpenAIClient.Chat.ChatCompletionStreamOptions = {
617
include_usage: true
618
};
619
620
// Reasoning configuration (for o1 models)
621
const reasoningConfig: OpenAIClient.Reasoning = {
622
effort: "medium" // low, medium, high
623
};
624
625
// Complete advanced configuration
626
const advancedModel = new ChatOpenAI({
627
model: "gpt-4o-audio-preview",
628
temperature: 0.5,
629
audio: audioConfig,
630
modalities: modalities,
631
reasoning: reasoningConfig,
632
service_tier: "priority",
633
verbosity: "medium" as OpenAIVerbosityParam,
634
zdrEnabled: true
635
});
636
637
const advancedResponse = await advancedModel.invoke("Tell me a story", {
638
stream_options: streamOptions,
639
seed: 12345,
640
parallel_tool_calls: false
641
});
642
```
643
644
### Error Handling Types
645
646
```typescript
647
import type { OpenAIError } from "openai";
648
649
// Type-safe error handling
650
async function handleOpenAICall() {
651
try {
652
const response = await chatModel.invoke("Hello!");
653
return response;
654
} catch (error) {
655
if (error instanceof Error) {
656
// LangChain wrapped errors
657
console.error("LangChain error:", error.message);
658
659
// Check for specific error codes
660
if ('code' in error) {
661
switch (error.code) {
662
case 'invalid_api_key':
663
console.error("Invalid API key");
664
break;
665
case 'rate_limit_exceeded':
666
console.error("Rate limit exceeded");
667
break;
668
case 'model_not_found':
669
console.error("Model not found");
670
break;
671
default:
672
console.error("Unknown error code:", error.code);
673
}
674
}
675
}
676
throw error;
677
}
678
}
679
```
680
681
### Custom Type Extensions
682
683
```typescript
684
// Extend configuration types for custom use cases
685
interface CustomChatConfig extends OpenAIChatInput {
686
customRetryDelay?: number;
687
customHeaders?: Record<string, string>;
688
debugMode?: boolean;
689
}
690
691
interface CustomCallOptions extends ChatOpenAICallOptions {
692
requestId?: string;
693
priority?: "high" | "normal" | "low";
694
context?: Record<string, any>;
695
}
696
697
class CustomChatOpenAI extends ChatOpenAI {
698
private customConfig: CustomChatConfig;
699
700
constructor(config: CustomChatConfig) {
701
super(config);
702
this.customConfig = config;
703
}
704
705
async invoke(
706
input: BaseLanguageModelInput,
707
options?: CustomCallOptions
708
): Promise<BaseMessage> {
709
if (this.customConfig.debugMode) {
710
console.log("Request ID:", options?.requestId);
711
console.log("Priority:", options?.priority);
712
}
713
714
return super.invoke(input, options);
715
}
716
}
717
718
// Usage with custom types
719
const customModel = new CustomChatOpenAI({
720
model: "gpt-4o",
721
temperature: 0.7,
722
debugMode: true,
723
customRetryDelay: 1000
724
});
725
726
const customResponse = await customModel.invoke("Hello", {
727
requestId: "req-123",
728
priority: "high",
729
context: { userId: "user-456" }
730
});
731
```
732
733
## Environment Variable Types
734
735
### Configuration from Environment
736
737
```typescript
738
/**
739
* Environment variable configuration
740
* All these can be set as process.env variables
741
*/
742
interface EnvironmentConfig {
743
// OpenAI configuration
744
OPENAI_API_KEY?: string;
745
OPENAI_ORGANIZATION?: string;
746
747
// Azure OpenAI configuration
748
AZURE_OPENAI_API_KEY?: string;
749
AZURE_OPENAI_API_INSTANCE_NAME?: string;
750
AZURE_OPENAI_API_DEPLOYMENT_NAME?: string;
751
AZURE_OPENAI_API_VERSION?: string;
752
AZURE_OPENAI_BASE_PATH?: string;
753
AZURE_OPENAI_ENDPOINT?: string;
754
AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME?: string;
755
AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME?: string;
756
}
757
758
// Type-safe environment variable loading
759
function loadConfigFromEnv(): Partial<OpenAIChatInput & AzureOpenAIInput> {
760
return {
761
openAIApiKey: process.env.OPENAI_API_KEY,
762
organization: process.env.OPENAI_ORGANIZATION,
763
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
764
azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME,
765
azureOpenAIApiDeploymentName: process.env.AZURE_OPENAI_API_DEPLOYMENT_NAME,
766
azureOpenAIApiVersion: process.env.AZURE_OPENAI_API_VERSION
767
};
768
}
769
770
const envConfig = loadConfigFromEnv();
771
const model = new ChatOpenAI(envConfig);
772
```
773
774
## Utility Types
775
776
### Core Utility Types
777
778
Additional utility types exported by the package for advanced usage.
779
780
```typescript { .api }
781
/**
782
* Tiktoken model type for token counting
783
*/
784
type TiktokenModel = string;
785
786
/**
787
* OpenAI verbosity parameter for reasoning models
788
*/
789
type OpenAIVerbosityParam = "low" | "medium" | "high" | null;
790
791
/**
792
* Custom tool fields for Responses API
793
*/
794
type CustomToolFields = Omit<OpenAI.Responses.CustomTool, "type">;
795
796
/**
797
* OpenAI endpoint configuration for Azure
798
*/
799
interface OpenAIEndpointConfig {
800
azureOpenAIApiDeploymentName?: string;
801
azureOpenAIApiInstanceName?: string;
802
azureOpenAIApiKey?: string;
803
azureOpenAIBasePath?: string;
804
azureOpenAIApiVersion?: string;
805
baseURL?: string;
806
}
807
808
/**
809
* Response format configuration union type
810
*/
811
type ResponseFormatConfiguration =
812
| { method: "functionCalling" }
813
| { method: "jsonMode" };
814
815
/**
816
* Chat reasoning summary type for o1 models
817
*/
818
type ChatOpenAIReasoningSummary = Omit<
819
OpenAI.Chat.ChatCompletionReasoningSummary,
820
"output"
821
>;
822
```
823
824
### Headers and Tools Utility Types
825
826
```typescript { .api }
827
/**
828
* Headers-like type for HTTP requests
829
*/
830
type HeadersLike =
831
| Headers
832
| Record<string, string | string[]>
833
| [string, string][];
834
835
/**
836
* Custom tool call extension
837
*/
838
type CustomToolCall = ToolCall & {
839
name: string;
840
args: Record<string, any>;
841
type: "custom_tool_call";
842
};
843
844
/**
845
* Responses tool type from OpenAI SDK
846
*/
847
type ResponsesTool = NonNullable<
848
OpenAI.Chat.ChatCompletionTool
849
>;
850
851
/**
852
* Responses tool choice type
853
*/
854
type ResponsesToolChoice = NonNullable<
855
OpenAI.Chat.ChatCompletionToolChoiceOption
856
>;
857
858
/**
859
* LangChain error codes for enhanced error handling
860
*/
861
type LangChainErrorCodes =
862
| "invalid_api_key"
863
| "rate_limit_exceeded"
864
| "model_not_found"
865
| "context_length_exceeded"
866
| "insufficient_quota"
867
| "server_error";
868
```
869
870
This comprehensive type system ensures type safety across all @langchain/openai functionality while providing clear interfaces for configuration and usage.