Data framework for your LLM application
—
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Pending
The risk profile of this skill
Text embedding generation and similarity operations for semantic search and retrieval in LlamaIndex.TS.
import { OpenAIEmbedding, Settings } from "llamaindex";Embeddings convert text into numerical vector representations that capture semantic meaning, enabling similarity search and retrieval operations in LlamaIndex.TS.
interface BaseEmbedding {
getTextEmbedding(text: string): Promise<number[]>;
getQueryEmbedding(query: string): Promise<number[]>;
embedBatchSize?: number;
dimensions?: number;
}class OpenAIEmbedding implements BaseEmbedding {
constructor(options?: {
model?: string;
dimensions?: number;
apiKey?: string;
});
getTextEmbedding(text: string): Promise<number[]>;
getQueryEmbedding(query: string): Promise<number[]>;
}import { OpenAIEmbedding, Settings } from "llamaindex";
// Configure global embedding model
Settings.embedModel = new OpenAIEmbedding({
model: "text-embedding-3-large",
dimensions: 1536,
});
// Generate embeddings
const text = "LlamaIndex is a data framework";
const embedding = await Settings.embedModel.getTextEmbedding(text);
console.log("Embedding dimensions:", embedding.length);function similarity(embedding1: number[], embedding2: number[]): number;
function getTopKEmbeddings(
queryEmbedding: number[],
embeddings: number[][],
k: number
): number[];// Embeddings are automatically used by VectorStoreIndex
const index = await VectorStoreIndex.fromDocuments(documents);
// Uses Settings.embedModel to generate embeddings for documents