0
# Language Models
1
2
Abstract base classes and interfaces for language models and chat models with unified APIs. These abstractions enable any provider to implement LLM functionality that integrates seamlessly with the broader LangChain ecosystem.
3
4
## Capabilities
5
6
### Base Language Model
7
8
Abstract base class for all language model implementations.
9
10
```typescript { .api }
11
/**
12
* Abstract base class for all language models
13
* @template RunOutput - Type of model output (typically string or BaseMessage)
14
* @template CallOptions - Options for model invocation
15
*/
16
abstract class BaseLanguageModel<RunOutput = any, CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions> extends Runnable<BaseLanguageModelInput, RunOutput, CallOptions> {
17
/** Async caller for managing API calls */
18
caller: AsyncCaller;
19
20
/** Generate text from prompts */
21
abstract generate(prompts: string[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
22
23
/** Generate from PromptValue objects */
24
generatePrompt(promptValues: BasePromptValue[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<LLMResult>;
25
26
/** Get number of tokens in text */
27
getNumTokens(text: string): Promise<number>;
28
29
/** Predict single output from single input */
30
predict(text: string, options?: string[] | CallOptions, callbacks?: Callbacks): Promise<string>;
31
32
/** Predict messages from text input */
33
predictMessages(messages: BaseMessage[], options?: string[] | CallOptions, callbacks?: Callbacks): Promise<BaseMessage>;
34
35
/** Get type identifier for serialization */
36
_modelType(): string;
37
38
/** Get model parameters for LangSmith */
39
getLsParams(options?: this["ParsedCallOptions"]): LangSmithParams;
40
}
41
```
42
43
### Base Chat Model
44
45
Abstract base class for chat-based language models.
46
47
```typescript { .api }
48
/**
49
* Abstract base class for chat models
50
* @template CallOptions - Options for chat model calls
51
* @template OutputMessageType - Type of output messages
52
*/
53
abstract class BaseChatModel<CallOptions extends BaseChatModelCallOptions = BaseChatModelCallOptions, OutputMessageType extends BaseMessage = BaseMessage> extends BaseLanguageModel<OutputMessageType, CallOptions> {
54
/** Bind tools to the chat model */
55
bindTools(tools: BindToolsInput, kwargs?: Record<string, unknown>): BaseChatModel<CallOptions, OutputMessageType>;
56
57
/** Enable structured output with schema validation */
58
withStructuredOutput<T>(outputSchema: z.ZodSchema<T> | Record<string, unknown>, config?: StructuredOutputConfig): Runnable<BaseMessage[], T>;
59
60
/** Core generation method - must be implemented by subclasses */
61
abstract _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
62
63
/** Stream response chunks */
64
_streamResponseChunks(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<ChatGenerationChunk>;
65
66
/** Get model identifier */
67
_modelType(): "base_chat_model";
68
69
/** Get LangSmith parameters */
70
getLsParams(options?: this["ParsedCallOptions"]): LangSmithParams;
71
}
72
```
73
74
**Usage Examples:**
75
76
```typescript
77
// Example chat model implementation
78
class MyChatModel extends BaseChatModel {
79
async _generate(messages: BaseMessage[], options: any): Promise<ChatResult> {
80
// Implementation specific logic
81
const response = await this.callAPI(messages, options);
82
return {
83
generations: [{
84
message: new AIMessage(response.content),
85
generationInfo: response.metadata
86
}]
87
};
88
}
89
}
90
91
// Using a chat model
92
const model = new MyChatModel();
93
const messages = [
94
new SystemMessage("You are a helpful assistant"),
95
new HumanMessage("What is 2+2?")
96
];
97
98
const result = await model.invoke(messages);
99
console.log(result.content); // AI response
100
101
// Bind tools to model
102
const modelWithTools = model.bindTools([
103
{
104
name: "calculator",
105
description: "Perform calculations",
106
parameters: {
107
type: "object",
108
properties: {
109
operation: { type: "string" },
110
a: { type: "number" },
111
b: { type: "number" }
112
}
113
}
114
}
115
]);
116
```
117
118
### Simple Chat Model
119
120
Simplified chat model base class for easier implementation.
121
122
```typescript { .api }
123
/**
124
* Simplified chat model base class
125
* @template CallOptions - Call options type
126
*/
127
abstract class SimpleChatModel<CallOptions extends BaseChatModelCallOptions = BaseChatModelCallOptions> extends BaseChatModel<CallOptions> {
128
/** Simplified call method returning string */
129
abstract _call(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
130
131
/** Convert string response to chat result */
132
async _generate(messages: BaseMessage[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<ChatResult>;
133
}
134
```
135
136
**Usage Examples:**
137
138
```typescript
139
// Simplified implementation
140
class MySimpleChatModel extends SimpleChatModel {
141
async _call(messages: BaseMessage[]): Promise<string> {
142
// Just return a string - framework handles the rest
143
return "This is a simple response";
144
}
145
}
146
```
147
148
### Base LLM
149
150
Abstract base class for completion-style language models.
151
152
```typescript { .api }
153
/**
154
* Abstract base class for completion-style LLMs
155
* @template CallOptions - Call options type
156
*/
157
abstract class BaseLLM<CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions> extends BaseLanguageModel<string, CallOptions> {
158
/** Generate completions from text prompts */
159
abstract _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
160
161
/** Stream completion chunks */
162
_streamResponseChunks(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk>;
163
164
/** Get model type */
165
_modelType(): "base_llm";
166
}
167
```
168
169
### LLM
170
171
Simplified LLM base class.
172
173
```typescript { .api }
174
/**
175
* Simplified LLM base class
176
* @template CallOptions - Call options type
177
*/
178
abstract class LLM<CallOptions extends BaseLanguageModelCallOptions = BaseLanguageModelCallOptions> extends BaseLLM<CallOptions> {
179
/** Simplified call method */
180
abstract _call(prompt: string, options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<string>;
181
182
/** Convert call result to generation result */
183
async _generate(prompts: string[], options: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): Promise<LLMResult>;
184
}
185
```
186
187
## Tool Binding
188
189
### Tool Binding Input
190
191
```typescript { .api }
192
/**
193
* Input for binding tools to models
194
*/
195
type BindToolsInput = (StructuredToolInterface | Record<string, unknown> | RunnableToolLike)[];
196
197
interface ToolDefinition {
198
name: string;
199
description: string;
200
parameters: Record<string, unknown>;
201
}
202
203
interface RunnableToolLike {
204
name: string;
205
description?: string;
206
parameters?: Record<string, unknown>;
207
invoke(input: unknown): Promise<unknown>;
208
}
209
```
210
211
### Structured Output Configuration
212
213
```typescript { .api }
214
interface StructuredOutputConfig {
215
/** Method for structured output */
216
method?: "function_calling" | "json_mode";
217
/** Whether to include raw output */
218
includeRaw?: boolean;
219
/** JSON schema for validation */
220
schema?: Record<string, unknown>;
221
}
222
```
223
224
## Model Results
225
226
### LLM Result
227
228
```typescript { .api }
229
interface LLMResult {
230
/** Generated completions */
231
generations: Generation[][];
232
/** Token usage information */
233
llmOutput?: Record<string, unknown>;
234
/** Run information for callbacks */
235
run?: Run[];
236
}
237
238
interface Generation {
239
/** Generated text */
240
text: string;
241
/** Additional generation info */
242
generationInfo?: Record<string, unknown>;
243
}
244
245
interface GenerationChunk extends Generation {
246
/** Combine with another chunk */
247
concat(chunk: GenerationChunk): GenerationChunk;
248
}
249
```
250
251
### Chat Result
252
253
```typescript { .api }
254
interface ChatResult {
255
/** Generated chat completions */
256
generations: ChatGeneration[];
257
/** Model output metadata */
258
llmOutput?: Record<string, unknown>;
259
}
260
261
interface ChatGeneration {
262
/** Generated message */
263
message: BaseMessage;
264
/** Additional generation info */
265
generationInfo?: Record<string, unknown>;
266
}
267
268
interface ChatGenerationChunk extends ChatGeneration {
269
/** Generated message chunk */
270
message: BaseMessageChunk;
271
/** Combine with another chunk */
272
concat(chunk: ChatGenerationChunk): ChatGenerationChunk;
273
}
274
```
275
276
## Call Options
277
278
```typescript { .api }
279
interface BaseLanguageModelCallOptions extends RunnableConfig {
280
/** Stop sequences for generation */
281
stop?: string[];
282
/** Timeout for model calls */
283
timeout?: number;
284
/** Additional model-specific options */
285
[key: string]: unknown;
286
}
287
288
interface BaseChatModelCallOptions extends BaseLanguageModelCallOptions {
289
/** Tool choice strategy */
290
tool_choice?: ToolChoice;
291
/** Tools available to the model */
292
tools?: ToolDefinition[];
293
}
294
295
type ToolChoice = "auto" | "none" | "required" | { type: "function"; function: { name: string } };
296
```
297
298
## Types
299
300
```typescript { .api }
301
type BaseLanguageModelInput = string | BaseMessage[];
302
303
interface LangSmithParams {
304
/** LangSmith run name */
305
ls_name?: string;
306
/** LangSmith run type */
307
ls_type?: string;
308
/** Provider name */
309
ls_provider?: string;
310
/** Model name */
311
ls_model_name?: string;
312
/** Model type */
313
ls_model_type?: string;
314
/** Temperature setting */
315
ls_temperature?: number;
316
/** Max tokens */
317
ls_max_tokens?: number;
318
/** Stop sequences */
319
ls_stop?: string[];
320
}
321
322
interface BasePromptValue {
323
toString(): string;
324
toChatMessages(): BaseMessage[];
325
}
326
```