docs
This guide covers universal model initialization and configuration across 18+ LLM providers.
import { initChatModel } from "langchain";
// Initialize with provider:model format
const model = initChatModel("openai:gpt-4o");import { initChatModel } from "langchain";
const model = initChatModel("openai:gpt-4o", {
temperature: 0.7,
maxTokens: 1000,
timeout: 30000,
});import { createAgent, initChatModel } from "langchain";
// Option 1: Pass string directly
const agent1 = createAgent({
model: "openai:gpt-4o",
tools: [],
});
// Option 2: Initialize model first
const model = initChatModel("anthropic:claude-3-5-sonnet-20241022", {
temperature: 0.5,
});
const agent2 = createAgent({
model: model,
tools: [],
});const gpt4o = initChatModel("openai:gpt-4o");
const gpt4oMini = initChatModel("openai:gpt-4o-mini");
const gpt4Turbo = initChatModel("openai:gpt-4-turbo");
const gpt35 = initChatModel("openai:gpt-3.5-turbo");const sonnet = initChatModel("anthropic:claude-3-5-sonnet-20241022");
const haiku = initChatModel("anthropic:claude-3-5-haiku-20241022");
const opus = initChatModel("anthropic:claude-3-opus-20240229");// Vertex AI
const vertexPro = initChatModel("google-vertexai:gemini-1.5-pro");
const vertexFlash = initChatModel("google-vertexai:gemini-1.5-flash");
// Vertex AI (web)
const vertexWeb = initChatModel("google-vertexai-web:gemini-1.5-pro");
// Generative AI
const genaiPro = initChatModel("google-genai:gemini-1.5-pro");
const genaiFlash = initChatModel("google-genai:gemini-1.5-flash");// Cohere
const commandR = initChatModel("cohere:command-r-plus");
// Mistral AI
const mistral = initChatModel("mistralai:mistral-large-latest");
// AWS Bedrock
const bedrock = initChatModel("bedrock:anthropic.claude-3-5-sonnet-20241022-v2:0");
// Ollama (local)
const llama = initChatModel("ollama:llama3.1");
// Groq
const groq = initChatModel("groq:llama-3.3-70b-versatile");
// Cerebras
const cerebras = initChatModel("cerebras:llama3.1-70b");
// DeepSeek
const deepseek = initChatModel("deepseek:deepseek-chat");
// X.AI
const grok = initChatModel("xai:grok-2-latest");
// Fireworks
const fireworks = initChatModel("fireworks:accounts/fireworks/models/llama-v3p1-70b-instruct");
// Together AI
const together = initChatModel("together:meta-llama/Llama-3-70b-chat-hf");
// Perplexity
const perplexity = initChatModel("perplexity:llama-3.1-sonar-large-128k-online");const model = initChatModel("openai:gpt-4o", {
// Temperature (0.0 to 2.0)
temperature: 0.7,
// Maximum tokens to generate
maxTokens: 1000,
// Request timeout (milliseconds)
timeout: 30000,
// Enable streaming
streaming: true,
// Top-p sampling
topP: 0.9,
// Frequency penalty
frequencyPenalty: 0.5,
// Presence penalty
presencePenalty: 0.5,
// Stop sequences
stop: ["\n\n", "END"],
});// OpenAI-specific
const openai = initChatModel("openai:gpt-4o", {
temperature: 0.7,
logitBias: { "50256": -100 },
user: "user-123",
});
// Anthropic-specific
const anthropic = initChatModel("anthropic:claude-3-5-sonnet-20241022", {
temperature: 0.5,
topK: 40,
});
// Ollama-specific
const ollama = initChatModel("ollama:llama3.1", {
temperature: 0.8,
baseUrl: "http://localhost:11434",
});import { createAgent } from "langchain";
// Start with OpenAI
let agent = createAgent({
model: "openai:gpt-4o",
tools: [],
});
// Switch to Anthropic (same interface)
agent = createAgent({
model: "anthropic:claude-3-5-sonnet-20241022",
tools: [],
});
// Switch to local Ollama (same interface)
agent = createAgent({
model: "ollama:llama3.1",
tools: [],
});
// All agents work the same way
const result = await agent.invoke({
messages: [{ role: "user", content: "Hello!" }],
});import { createAgent, initChatModel } from "langchain";
const agent = createAgent({
model: (state) => {
// Select model based on task complexity
if (state.messages.length <= 2) {
return initChatModel("openai:gpt-4o-mini"); // Fast model
}
return initChatModel("openai:gpt-4o"); // Powerful model
},
tools: [],
});import { initChatModel } from "langchain";
async function getModel() {
try {
// Try primary model
return initChatModel("openai:gpt-4o");
} catch (error) {
console.log("Primary model unavailable, using fallback");
// Fallback to alternative
return initChatModel("anthropic:claude-3-5-sonnet-20241022");
}
}
const model = await getModel();# OpenAI
export OPENAI_API_KEY="sk-..."
# Anthropic
export ANTHROPIC_API_KEY="sk-ant-..."
# Google
export GOOGLE_APPLICATION_CREDENTIALS="/path/to/credentials.json"
# Cohere
export COHERE_API_KEY="..."
# Groq
export GROQ_API_KEY="gsk_..."
# Mistral
export MISTRAL_API_KEY="..."
# DeepSeek
export DEEPSEEK_API_KEY="..."
# X.AI
export XAI_API_KEY="..."export AZURE_OPENAI_API_KEY="..."
export AZURE_OPENAI_API_INSTANCE_NAME="..."
export AZURE_OPENAI_API_DEPLOYMENT_NAME="..."
export AZURE_OPENAI_API_VERSION="..."export AWS_ACCESS_KEY_ID="..."
export AWS_SECRET_ACCESS_KEY="..."
export AWS_REGION="us-east-1"import { ChatOpenAI } from "@langchain/openai";
import { createAgent } from "langchain";
// Create fully customized model instance
const customModel = new ChatOpenAI({
modelName: "gpt-4o",
temperature: 0.9,
apiKey: process.env.CUSTOM_OPENAI_KEY,
organization: "org-123",
maxRetries: 5,
});
// Use with agent
const agent = createAgent({
model: customModel,
tools: [],
});const model = initChatModel("openai:gpt-4o");
// Override configuration for specific call
const response = await model.invoke(
[{ role: "user", content: "Hello" }],
{
temperature: 0.9, // Override default
maxTokens: 500,
}
);const model = initChatModel("openai:gpt-4o", {
streaming: true,
});
const stream = await model.stream([
{ role: "user", content: "Tell me a story" },
]);
for await (const chunk of stream) {
process.stdout.write(chunk.content);
}const model = initChatModel("openai:gpt-4o");
const results = await model.batch([
[{ role: "user", content: "What is 2+2?" }],
[{ role: "user", content: "What is the capital of France?" }],
[{ role: "user", content: "Name a color" }],
]);
results.forEach((result, i) => {
console.log(`Result ${i}:`, result.content);
});See Model API Reference for complete API documentation.