OpenAI integrations for LangChain.js providing chat models, embeddings, tools, and Azure support.
—
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Pending
The risk profile of this skill
Complete Azure OpenAI service support with custom endpoints, API versions, and authentication methods including Azure AD. Provides the same functionality as OpenAI models but deployed on Azure infrastructure.
Azure OpenAI chat model integration with all the features of ChatOpenAI plus Azure-specific configuration.
/**
* Azure OpenAI chat model integration
* Extends ChatOpenAI with Azure-specific configuration and authentication
*/
class AzureChatOpenAI<CallOptions extends ChatOpenAICallOptions = ChatOpenAICallOptions>
extends ChatOpenAI<CallOptions> {
constructor(fields?: AzureOpenAIInput & ChatOpenAIFields);
/** Azure-specific configuration */
azureOpenAIApiVersion?: string; // Azure API version (e.g., "2024-02-01")
azureOpenAIApiKey?: string; // Azure API key
azureADTokenProvider?: () => Promise<string>; // Azure AD token provider function
azureOpenAIApiInstanceName?: string; // Azure instance name
azureOpenAIApiDeploymentName?: string; // Azure deployment name
azureOpenAIBasePath?: string; // Azure base path
azureOpenAIEndpoint?: string; // Azure endpoint URL
/** Inherited from ChatOpenAI */
invoke(input: BaseLanguageModelInput, options?: CallOptions): Promise<BaseMessage>;
stream(input: BaseLanguageModelInput, options?: CallOptions): AsyncIterable<BaseMessageChunk>;
bindTools(tools: ChatOpenAIToolType[], kwargs?: Partial<CallOptions>): Runnable;
withStructuredOutput<T>(outputSchema: z.ZodType<T> | Record<string, any>, config?: any): Runnable<BaseLanguageModelInput, T>;
}Azure OpenAI text completion model wrapper for legacy completions API.
/**
* Azure OpenAI text completion model wrapper
* Extends OpenAI LLM with Azure-specific configuration
*/
class AzureOpenAI extends OpenAI {
constructor(fields?: AzureOpenAIInput);
/** Azure configuration inherited from AzureOpenAIInput */
azureOpenAIApiVersion?: string;
azureOpenAIApiKey?: string;
azureADTokenProvider?: () => Promise<string>;
azureOpenAIApiInstanceName?: string;
azureOpenAIApiDeploymentName?: string;
azureOpenAIBasePath?: string;
azureOpenAIEndpoint?: string;
/** Inherited LLM functionality */
_generate(prompts: string[], options: OpenAICallOptions): Promise<LLMResult>;
_streamResponseChunks(input: string, options: OpenAICallOptions): AsyncIterable<GenerationChunk>;
}Azure OpenAI embeddings integration with deployment-specific configuration.
/**
* Azure OpenAI embeddings integration
* Extends OpenAIEmbeddings with Azure-specific configuration and deployment names
*/
class AzureOpenAIEmbeddings extends OpenAIEmbeddings {
constructor(fields?: AzureOpenAIInput & Partial<OpenAIEmbeddingsParams>);
/** Azure-specific embedding configuration */
azureOpenAIApiEmbeddingsDeploymentName?: string; // Specific deployment for embeddings
azureOpenAIApiCompletionsDeploymentName?: string; // Completions deployment fallback
/** Inherited embedding functionality */
embedDocuments(texts: string[]): Promise<number[][]>;
embedQuery(text: string): Promise<number[]>;
}Configuration interface for Azure OpenAI services.
interface AzureOpenAIInput extends OpenAIBaseInput {
/** Azure API version (required) */
azureOpenAIApiVersion?: string;
/** Azure API key */
azureOpenAIApiKey?: string;
/** Azure AD token provider for authentication */
azureADTokenProvider?: () => Promise<string>;
/** Azure OpenAI instance name */
azureOpenAIApiInstanceName?: string;
/** Azure deployment name */
azureOpenAIApiDeploymentName?: string;
/** Azure base path for custom endpoints */
azureOpenAIBasePath?: string;
/** Azure endpoint URL */
azureOpenAIEndpoint?: string;
/** Embeddings-specific deployment name */
azureOpenAIApiEmbeddingsDeploymentName?: string;
/** Completions-specific deployment name */
azureOpenAIApiCompletionsDeploymentName?: string;
}import { AzureChatOpenAI } from "@langchain/openai";
// Method 1: Using instance name and deployment
const azureModel = new AzureChatOpenAI({
azureOpenAIApiKey: "your-azure-key",
azureOpenAIApiInstanceName: "your-instance-name",
azureOpenAIApiDeploymentName: "your-deployment-name",
azureOpenAIApiVersion: "2024-02-01",
temperature: 0.7,
maxTokens: 1000
});
// Method 2: Using direct endpoint
const azureModelDirect = new AzureChatOpenAI({
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIEndpoint: "https://your-instance.openai.azure.com/",
azureOpenAIApiDeploymentName: "gpt-4o-deployment",
azureOpenAIApiVersion: "2024-02-01"
});
// Basic usage - same as ChatOpenAI
const response = await azureModel.invoke("Hello, how are you?");
console.log(response.content);// Set these environment variables:
// AZURE_OPENAI_API_KEY=your-azure-key
// AZURE_OPENAI_API_INSTANCE_NAME=your-instance
// AZURE_OPENAI_API_DEPLOYMENT_NAME=your-deployment
// AZURE_OPENAI_API_VERSION=2024-02-01
import { AzureChatOpenAI } from "@langchain/openai";
// Configuration will be loaded from environment variables
const azureModel = new AzureChatOpenAI({
temperature: 0.5
// Azure config loaded from env vars automatically
});
const result = await azureModel.invoke("What is Azure OpenAI?");import { DefaultAzureCredential } from "@azure/identity";
// Using Azure AD with managed identity
async function createAzureADTokenProvider() {
const credential = new DefaultAzureCredential();
return async () => {
const token = await credential.getToken("https://cognitiveservices.azure.com/.default");
return token.token;
};
}
const azureModelWithAD = new AzureChatOpenAI({
azureADTokenProvider: await createAzureADTokenProvider(),
azureOpenAIApiInstanceName: "your-instance",
azureOpenAIApiDeploymentName: "your-deployment",
azureOpenAIApiVersion: "2024-02-01"
});
const response = await azureModelWithAD.invoke("Hello from Azure AD authenticated model!");// Configure different models for different use cases
const chatModel = new AzureChatOpenAI({
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "my-instance",
azureOpenAIApiDeploymentName: "gpt-4o-deployment", // For chat
azureOpenAIApiVersion: "2024-02-01",
temperature: 0.3
});
const creativeModel = new AzureChatOpenAI({
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "my-instance",
azureOpenAIApiDeploymentName: "gpt-4o-creative", // Different deployment
azureOpenAIApiVersion: "2024-02-01",
temperature: 0.9
});
// Use different models for different tasks
const factualResponse = await chatModel.invoke("What is the capital of France?");
const creativeResponse = await creativeModel.invoke("Write a poem about Paris");import { AzureOpenAIEmbeddings } from "@langchain/openai";
const azureEmbeddings = new AzureOpenAIEmbeddings({
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "my-instance",
azureOpenAIApiEmbeddingsDeploymentName: "text-embedding-ada-002", // Specific embedding deployment
azureOpenAIApiVersion: "2024-02-01",
batchSize: 512
});
// Usage same as OpenAIEmbeddings
const query = "What is machine learning?";
const queryEmbedding = await azureEmbeddings.embedQuery(query);
const documents = [
"Machine learning is a subset of AI",
"Deep learning uses neural networks",
"Natural language processing handles text"
];
const docEmbeddings = await azureEmbeddings.embedDocuments(documents);import { AzureOpenAI } from "@langchain/openai";
const azureLLM = new AzureOpenAI({
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "my-instance",
azureOpenAIApiDeploymentName: "text-davinci-003-deployment",
azureOpenAIApiVersion: "2024-02-01",
temperature: 0.7,
maxTokens: 200
});
// Text completion
const completion = await azureLLM.invoke("The future of artificial intelligence is");
console.log(completion);// For custom Azure endpoints or special configurations
const customAzureModel = new AzureChatOpenAI({
azureOpenAIApiKey: "your-key",
azureOpenAIBasePath: "/openai/deployments/your-deployment",
azureOpenAIEndpoint: "https://your-custom-endpoint.com",
azureOpenAIApiVersion: "2024-02-01",
// No deployment name needed when using basePath
});
// Alternative with full URL construction
const fullUrlModel = new AzureChatOpenAI({
azureOpenAIApiKey: "your-key",
baseURL: "https://your-instance.openai.azure.com/openai/deployments/your-deployment",
azureOpenAIApiVersion: "2024-02-01"
});const azureStreamingModel = new AzureChatOpenAI({
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "my-instance",
azureOpenAIApiDeploymentName: "gpt-4o-deployment",
azureOpenAIApiVersion: "2024-02-01",
streaming: true,
temperature: 0.5
});
// Stream responses
const stream = await azureStreamingModel.stream("Tell me a story about a robot");
for await (const chunk of stream) {
process.stdout.write(chunk.content);
}import { z } from "zod";
const azureToolModel = new AzureChatOpenAI({
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "my-instance",
azureOpenAIApiDeploymentName: "gpt-4o-tools", // Deployment supporting tools
azureOpenAIApiVersion: "2024-02-01",
temperature: 0
});
const tools = [
{
name: "search_database",
description: "Search company database for information",
schema: z.object({
query: z.string().describe("Search query"),
department: z.string().optional().describe("Department to search in")
})
}
];
const modelWithTools = azureToolModel.bindTools(tools);
const response = await modelWithTools.invoke("Find information about sales performance");
// Handle tool calls
if (response.tool_calls) {
console.log("Model wants to call tools:", response.tool_calls);
}import { z } from "zod";
const ResponseSchema = z.object({
summary: z.string().describe("Brief summary"),
keyPoints: z.array(z.string()).describe("Main points"),
sentiment: z.enum(["positive", "negative", "neutral"]).describe("Overall sentiment"),
confidence: z.number().min(0).max(1).describe("Confidence score")
});
const azureStructuredModel = new AzureChatOpenAI({
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "my-instance",
azureOpenAIApiDeploymentName: "gpt-4o-deployment",
azureOpenAIApiVersion: "2024-02-01"
});
const structuredModel = azureStructuredModel.withStructuredOutput(ResponseSchema, {
name: "DocumentAnalysis"
});
const analysis = await structuredModel.invoke(`
Analyze this customer feedback: "I love the new features in your app!
The user interface is much cleaner and the performance is significantly better.
However, I wish there were more customization options available."
`);
console.log(analysis);
// Output: { summary: "...", keyPoints: [...], sentiment: "positive", confidence: 0.85 }# Basic Azure OpenAI configuration
AZURE_OPENAI_API_KEY=your-azure-api-key
AZURE_OPENAI_API_INSTANCE_NAME=your-instance-name
AZURE_OPENAI_API_DEPLOYMENT_NAME=your-deployment-name
AZURE_OPENAI_API_VERSION=2024-02-01
# Alternative endpoint configuration
AZURE_OPENAI_ENDPOINT=https://your-instance.openai.azure.com/
AZURE_OPENAI_BASE_PATH=/openai/deployments/your-deployment
# Specialized deployments
AZURE_OPENAI_API_EMBEDDINGS_DEPLOYMENT_NAME=text-embedding-deployment
AZURE_OPENAI_API_COMPLETIONS_DEPLOYMENT_NAME=completions-deploymentThe Azure utilities use this priority order for configuration:
// This will use constructor parameter over environment variable
const model = new AzureChatOpenAI({
azureOpenAIApiKey: "constructor-key", // Used
// Even if AZURE_OPENAI_API_KEY is set in environment
});
// This will use environment variable
const model2 = new AzureChatOpenAI({
// azureOpenAIApiKey not specified, will use AZURE_OPENAI_API_KEY from env
temperature: 0.7
});Azure utilities construct endpoints in this order:
import { getEndpoint } from "@langchain/openai";
// Example endpoint construction
const config = {
azureOpenAIApiInstanceName: "my-instance",
azureOpenAIApiDeploymentName: "gpt-4o-deployment",
azureOpenAIApiVersion: "2024-02-01"
};
const endpoint = getEndpoint(config);
// Result: "https://my-instance.openai.azure.com/openai/deployments/gpt-4o-deployment"import { AzureChatOpenAI } from "@langchain/openai";
const azureModel = new AzureChatOpenAI({
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "my-instance",
azureOpenAIApiDeploymentName: "gpt-4o-deployment",
azureOpenAIApiVersion: "2024-02-01",
maxRetries: 3,
timeout: 60000
});
try {
const response = await azureModel.invoke("Hello!");
} catch (error) {
console.error("Azure OpenAI error:", error);
// Common Azure-specific errors
if (error.message.includes('DeploymentNotFound')) {
console.error("Deployment not found - check deployment name");
} else if (error.message.includes('InvalidApiVersionParameter')) {
console.error("Invalid API version - check azureOpenAIApiVersion");
} else if (error.message.includes('Unauthorized')) {
console.error("Authentication failed - check API key or Azure AD token");
}
}// Create a factory for consistent Azure model creation
class AzureModelFactory {
private baseConfig: Partial<AzureOpenAIInput>;
constructor(baseConfig: Partial<AzureOpenAIInput>) {
this.baseConfig = {
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: process.env.AZURE_OPENAI_API_INSTANCE_NAME,
azureOpenAIApiVersion: "2024-02-01",
...baseConfig
};
}
createChatModel(deploymentName: string, options?: Partial<AzureOpenAIInput>) {
return new AzureChatOpenAI({
...this.baseConfig,
azureOpenAIApiDeploymentName: deploymentName,
...options
});
}
createEmbeddings(deploymentName: string) {
return new AzureOpenAIEmbeddings({
...this.baseConfig,
azureOpenAIApiEmbeddingsDeploymentName: deploymentName
});
}
}
// Usage
const factory = new AzureModelFactory({
azureOpenAIApiInstanceName: "my-instance"
});
const chatModel = factory.createChatModel("gpt-4o-deployment", { temperature: 0.3 });
const embeddings = factory.createEmbeddings("text-embedding-deployment");function validateAzureConfig(config: Partial<AzureOpenAIInput>): void {
const requiredFields = ['azureOpenAIApiVersion'];
const authFields = ['azureOpenAIApiKey', 'azureADTokenProvider'];
const endpointFields = [
'azureOpenAIApiInstanceName',
'azureOpenAIEndpoint',
'azureOpenAIBasePath'
];
// Check required fields
for (const field of requiredFields) {
if (!config[field as keyof AzureOpenAIInput]) {
throw new Error(`Missing required field: ${field}`);
}
}
// Check authentication
const hasAuth = authFields.some(field => config[field as keyof AzureOpenAIInput]);
if (!hasAuth) {
throw new Error("Must provide either azureOpenAIApiKey or azureADTokenProvider");
}
// Check endpoint configuration
const hasEndpoint = endpointFields.some(field => config[field as keyof AzureOpenAIInput]);
if (!hasEndpoint) {
throw new Error("Must provide endpoint configuration");
}
}
// Usage
const config = {
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "my-instance",
azureOpenAIApiVersion: "2024-02-01"
};
validateAzureConfig(config);
const model = new AzureChatOpenAI(config);// Support multiple Azure regions for reliability
class MultiRegionAzureModel {
private regions: AzureChatOpenAI[] = [];
private currentRegionIndex = 0;
constructor(regionConfigs: AzureOpenAIInput[]) {
this.regions = regionConfigs.map(config => new AzureChatOpenAI(config));
}
async invoke(input: string, maxRetries = 3): Promise<BaseMessage> {
let lastError: Error | null = null;
for (let attempt = 0; attempt < maxRetries; attempt++) {
const model = this.regions[this.currentRegionIndex];
try {
return await model.invoke(input);
} catch (error) {
lastError = error as Error;
console.warn(`Region ${this.currentRegionIndex} failed:`, error.message);
// Switch to next region
this.currentRegionIndex = (this.currentRegionIndex + 1) % this.regions.length;
}
}
throw new Error(`All regions failed. Last error: ${lastError?.message}`);
}
}
// Usage
const multiRegionModel = new MultiRegionAzureModel([
{
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "us-east-instance",
azureOpenAIApiDeploymentName: "gpt-4o-deployment",
azureOpenAIApiVersion: "2024-02-01"
},
{
azureOpenAIApiKey: process.env.AZURE_OPENAI_API_KEY,
azureOpenAIApiInstanceName: "eu-west-instance",
azureOpenAIApiDeploymentName: "gpt-4o-deployment",
azureOpenAIApiVersion: "2024-02-01"
}
]);
const response = await multiRegionModel.invoke("Hello from multi-region setup!");