Data framework for your LLM application
—
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Pending
The risk profile of this skill
Comprehensive integration with large language models including OpenAI, Anthropic, and other providers in LlamaIndex.TS.
import { OpenAI, Settings } from "llamaindex";
// Or specific LLM providers
import { OpenAI, Anthropic, Groq } from "llamaindex/llms";LlamaIndex.TS provides extensive support for various LLM providers with unified interfaces for chat, completion, streaming, and tool calling capabilities. The framework abstracts provider differences while exposing provider-specific features.
interface LLM {
chat(messages: ChatMessage[], options?: LLMChatParams): Promise<ChatResponse>;
complete(prompt: string, options?: LLMCompletionParams): Promise<CompletionResponse>;
metadata: LLMMetadata;
}
interface LLMMetadata {
model: string;
temperature?: number;
topP?: number;
topK?: number;
maxTokens?: number;
contextWindow: number;
tokenizer?: (text: string) => string[];
}interface ChatMessage {
role: MessageType;
content: MessageContent;
}
type MessageType = "system" | "user" | "assistant" | "tool";
type MessageContent = string | MessageContentDetail[];
interface MessageContentDetail {
type: "text" | "image_url" | "audio" | "video" | "file";
text?: string;
image_url?: { url: string };
audio?: { data: string };
}
interface ChatResponse {
message: ChatMessage;
raw?: any;
delta?: string;
}class OpenAI implements LLM {
constructor(options?: {
model?: string;
temperature?: number;
topP?: number;
maxTokens?: number;
apiKey?: string;
additionalChatOptions?: any;
});
chat(messages: ChatMessage[], options?: LLMChatParams): Promise<ChatResponse>;
complete(prompt: string, options?: LLMCompletionParams): Promise<CompletionResponse>;
metadata: LLMMetadata;
}import { OpenAI, Settings } from "llamaindex";
// Configure global LLM
Settings.llm = new OpenAI({
model: "gpt-4",
temperature: 0.1,
maxTokens: 2048,
});
// Use with chat
const response = await Settings.llm.chat([
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "Explain quantum computing" },
]);
console.log(response.message.content);import { Anthropic } from "llamaindex";
const claude = new Anthropic({
model: "claude-3-opus-20240229",
temperature: 0.0,
maxTokens: 4096,
});
Settings.llm = claude;// LLMs with tool calling support
const toolCallLLM = new OpenAI({
model: "gpt-4",
additionalChatOptions: {
tools: [
{
type: "function",
function: {
name: "get_weather",
description: "Get weather information",
parameters: {
type: "object",
properties: {
location: { type: "string" },
},
},
},
},
],
},
});// Environment-specific configuration
if (process.env.NODE_ENV === "production") {
Settings.llm = new OpenAI({
model: "gpt-4",
temperature: 0.0, // Consistent responses
});
} else {
Settings.llm = new OpenAI({
model: "gpt-3.5-turbo", // Cheaper for development
temperature: 0.1,
});
}