Universal chat model initialization supporting 18+ providers with automatic inference and runtime configuration.
async function initChatModel<
RunInput extends BaseLanguageModelInput = BaseLanguageModelInput,
CallOptions extends ConfigurableChatModelCallOptions = ConfigurableChatModelCallOptions
>(
model?: string,
fields?: InitChatModelFields
): Promise<ConfigurableModel<RunInput, CallOptions>>;
interface InitChatModelFields {
modelProvider?: ChatModelProvider;
configurableFields?: string[] | "any";
configPrefix?: string;
profile?: ModelProfile;
[key: string]: any;
}
interface ModelProfile {
packageName: string;
className: string;
}
type ChatModelProvider =
| "openai" | "anthropic" | "azure_openai" | "cohere"
| "google-vertexai" | "google-vertexai-web" | "google-genai" | "ollama"
| "mistralai" | "mistral" | "groq" | "cerebras" | "bedrock" | "deepseek"
| "xai" | "fireworks" | "together" | "perplexity";Examples:
import { initChatModel } from "langchain";
// Automatic provider inference
const model1 = initChatModel("gpt-4o");
const model2 = initChatModel("claude-3-5-sonnet");
// Explicit provider
const model3 = initChatModel("openai:gpt-4o");
// With configuration
const model4 = initChatModel("gpt-4o", {
temperature: 0.7,
maxTokens: 1000,
});
// Azure OpenAI
const model5 = initChatModel("my-deployment", {
provider: "azure_openai",
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "my-instance",
azureOpenAIApiDeploymentName: "my-deployment",
azureOpenAIApiVersion: "2024-02-15-preview",
});
// Ollama (local)
const model6 = initChatModel("llama2", {
provider: "ollama",
baseUrl: "http://localhost:11434",
});class ConfigurableModel extends BaseChatModel {
model: BaseChatModel;
invoke(
input: BaseMessage[],
options?: ConfigurableChatModelCallOptions
): Promise<AIMessage>;
stream(
input: BaseMessage[],
options?: ConfigurableChatModelCallOptions
): Promise<IterableReadableStream<AIMessageChunk>>;
}
interface ConfigurableChatModelCallOptions extends RunnableConfig {
temperature?: number;
maxTokens?: number;
[key: string]: any;
}| Provider | Package | Class | Inference Pattern |
|---|---|---|---|
openai | @langchain/openai | ChatOpenAI | gpt-3/4/5, o1, o3, o4 |
anthropic | @langchain/anthropic | ChatAnthropic | claude |
azure_openai | @langchain/openai | AzureChatOpenAI | - |
cohere | @langchain/cohere | ChatCohere | command |
google-vertexai | @langchain/google-vertexai | ChatVertexAI | gemini |
google-vertexai-web | @langchain/google-vertexai-web | ChatVertexAI | - |
google-genai | @langchain/google-genai | ChatGoogleGenerativeAI | - |
ollama | @langchain/ollama | ChatOllama | - |
mistralai | @langchain/mistralai | ChatMistralAI | mistral |
mistral | @langchain/mistralai | ChatMistralAI | (alias) |
groq | @langchain/groq | ChatGroq | - |
cerebras | @langchain/cerebras | ChatCerebras | - |
bedrock | @langchain/aws | ChatBedrockConverse | amazon. |
deepseek | @langchain/deepseek | ChatDeepSeek | - |
xai | @langchain/xai | ChatXAI | - |
fireworks | @langchain/community/chat_models/fireworks | ChatFireworks | accounts/fireworks |
together | @langchain/community/chat_models/togetherai | ChatTogetherAI | - |
perplexity | @langchain/community/chat_models/perplexity | ChatPerplexity | sonar, pplx |
function _inferModelProvider(model: string): ChatModelProvider | undefined;
async function getChatModelByClassName(className: string): Promise<typeof BaseChatModel | undefined>;OPENAI_API_KEYANTHROPIC_API_KEYGOOGLE_APPLICATION_CREDENTIALSGOOGLE_API_KEYCOHERE_API_KEYGROQ_API_KEYMISTRAL_API_KEYDEEPSEEK_API_KEYXAI_API_KEY// Uses OPENAI_API_KEY from environment
const model = initChatModel("gpt-4o");
// Explicit API key
const model2 = initChatModel("gpt-4o", {
openAIApiKey: "sk-...",
});import { createAgent, initChatModel } from "langchain";
const model = initChatModel("anthropic:claude-3-5-sonnet", {
temperature: 0.5,
maxTokens: 2000,
});
const agent = createAgent({
model: model,
tools: [searchTool],
systemPrompt: "You are a research assistant.",
});import { initChatModel } from "langchain";
const model = initChatModel("gpt-4o");
// Override temperature at runtime
const creative = await model.invoke(messages, { temperature: 0.9 });
const precise = await model.invoke(messages, { temperature: 0.1 });