Google Gen AI JavaScript SDK for building applications powered by Gemini with content generation, image/video generation, function calling, caching, and real-time live sessions
The Models module provides text embedding generation capabilities for semantic search, similarity comparison, and clustering tasks.
Generate embeddings for text content.
/**
* Generate embeddings for text
* @param params - Embedding parameters
* @returns Promise resolving to embeddings response
*/
function embedContent(
params: EmbedContentParameters
): Promise<EmbedContentResponse>;
interface EmbedContentParameters {
/** Model name (e.g., 'text-embedding-004') */
model: string;
/** Content to embed */
contents: ContentListUnion;
/** Embedding configuration */
config?: EmbedContentConfig;
}
interface EmbedContentResponse {
/** Generated embeddings */
embeddings?: ContentEmbedding[];
}
interface ContentEmbedding {
/** Embedding values (vector) */
values?: number[];
}Usage Examples:
import { GoogleGenAI } from '@google/genai';
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
// Generate embedding for single text
const response = await client.models.embedContent({
model: 'text-embedding-004',
contents: 'What is machine learning?'
});
const embedding = response.embeddings?.[0]?.values;
console.log('Embedding dimensions:', embedding?.length);
console.log('Embedding vector:', embedding);
// Generate embeddings for multiple texts
const multiResponse = await client.models.embedContent({
model: 'text-embedding-004',
contents: [
{ role: 'user', parts: [{ text: 'Document 1 content' }] },
{ role: 'user', parts: [{ text: 'Document 2 content' }] },
{ role: 'user', parts: [{ text: 'Document 3 content' }] }
]
});
multiResponse.embeddings?.forEach((emb, i) => {
console.log(`Embedding ${i}:`, emb.values?.slice(0, 5));
});Configuration for embedding generation.
interface EmbedContentConfig {
/** Task type for embedding */
taskType?: TaskType;
/** Title for retrieval document */
title?: string;
/** Output dimensionality */
outputDimensionality?: number;
}
enum TaskType {
TASK_TYPE_UNSPECIFIED = 'TASK_TYPE_UNSPECIFIED',
/** Retrieval query embedding */
RETRIEVAL_QUERY = 'RETRIEVAL_QUERY',
/** Retrieval document embedding */
RETRIEVAL_DOCUMENT = 'RETRIEVAL_DOCUMENT',
/** Semantic similarity */
SEMANTIC_SIMILARITY = 'SEMANTIC_SIMILARITY',
/** Classification */
CLASSIFICATION = 'CLASSIFICATION',
/** Clustering */
CLUSTERING = 'CLUSTERING',
/** Question answering */
QUESTION_ANSWERING = 'QUESTION_ANSWERING',
/** Fact verification */
FACT_VERIFICATION = 'FACT_VERIFICATION'
}Embedding result for a single piece of content.
interface ContentEmbedding {
/** Embedding values (vector of floats) */
values?: number[];
}import { GoogleGenAI, TaskType } from '@google/genai';
const client = new GoogleGenAI({ apiKey: 'YOUR_API_KEY' });
// Document corpus
const documents = [
'The quick brown fox jumps over the lazy dog',
'Machine learning is a subset of artificial intelligence',
'Python is a popular programming language',
'Climate change affects global weather patterns',
'Neural networks are inspired by biological neurons'
];
// Generate document embeddings
const docEmbeddings = await Promise.all(
documents.map(doc =>
client.models.embedContent({
model: 'text-embedding-004',
contents: doc,
config: {
taskType: TaskType.RETRIEVAL_DOCUMENT
}
})
)
);
const docVectors = docEmbeddings.map(r => r.embeddings![0].values!);
// Query
const query = 'Tell me about AI and machine learning';
const queryResponse = await client.models.embedContent({
model: 'text-embedding-004',
contents: query,
config: {
taskType: TaskType.RETRIEVAL_QUERY
}
});
const queryVector = queryResponse.embeddings![0].values!;
// Calculate cosine similarity
function cosineSimilarity(a: number[], b: number[]): number {
const dotProduct = a.reduce((sum, val, i) => sum + val * b[i], 0);
const magnitudeA = Math.sqrt(a.reduce((sum, val) => sum + val * val, 0));
const magnitudeB = Math.sqrt(b.reduce((sum, val) => sum + val * val, 0));
return dotProduct / (magnitudeA * magnitudeB);
}
// Find most similar documents
const similarities = docVectors.map((docVec, i) => ({
document: documents[i],
similarity: cosineSimilarity(queryVector, docVec)
}));
similarities.sort((a, b) => b.similarity - a.similarity);
console.log('Query:', query);
console.log('\nMost similar documents:');
similarities.forEach((result, i) => {
console.log(`${i + 1}. ${result.document}`);
console.log(` Similarity: ${result.similarity.toFixed(4)}\n`);
});import { TaskType } from '@google/genai';
const documents = [
'Dogs are loyal pets',
'Cats are independent animals',
'Python is used for data science',
'JavaScript runs in browsers',
'Birds can fly in the sky',
'Fish live in water',
'Java is object-oriented',
'Ruby is great for web development'
];
// Generate embeddings for clustering
const embeddings = await Promise.all(
documents.map(doc =>
client.models.embedContent({
model: 'text-embedding-004',
contents: doc,
config: {
taskType: TaskType.CLUSTERING
}
})
)
);
const vectors = embeddings.map(r => r.embeddings![0].values!);
// Simple K-means clustering (k=3)
function kMeansClustering(vectors: number[][], k: number): number[] {
// Simplified k-means implementation
// In production, use a proper ML library
const assignments = new Array(vectors.length).fill(0);
// Initialize centroids randomly
const centroids = vectors.slice(0, k);
for (let iter = 0; iter < 10; iter++) {
// Assign to nearest centroid
vectors.forEach((vec, i) => {
let minDist = Infinity;
let assignment = 0;
centroids.forEach((centroid, j) => {
const dist = euclideanDistance(vec, centroid);
if (dist < minDist) {
minDist = dist;
assignment = j;
}
});
assignments[i] = assignment;
});
// Update centroids
for (let j = 0; j < k; j++) {
const clusterVectors = vectors.filter((_, i) => assignments[i] === j);
if (clusterVectors.length > 0) {
centroids[j] = clusterVectors[0].map((_, dim) =>
clusterVectors.reduce((sum, vec) => sum + vec[dim], 0) / clusterVectors.length
);
}
}
}
return assignments;
}
function euclideanDistance(a: number[], b: number[]): number {
return Math.sqrt(a.reduce((sum, val, i) => sum + Math.pow(val - b[i], 2), 0));
}
const clusters = kMeansClustering(vectors, 3);
// Group documents by cluster
const clusterGroups: { [key: number]: string[] } = {};
documents.forEach((doc, i) => {
const cluster = clusters[i];
if (!clusterGroups[cluster]) {
clusterGroups[cluster] = [];
}
clusterGroups[cluster].push(doc);
});
console.log('Document Clusters:');
Object.entries(clusterGroups).forEach(([cluster, docs]) => {
console.log(`\nCluster ${cluster}:`);
docs.forEach(doc => console.log(` - ${doc}`));
});import { TaskType } from '@google/genai';
// Training data (categories and examples)
const trainingData = [
{ category: 'sports', text: 'The team won the championship' },
{ category: 'sports', text: 'The player scored a goal' },
{ category: 'technology', text: 'New AI model released' },
{ category: 'technology', text: 'Software update available' },
{ category: 'food', text: 'The recipe uses fresh ingredients' },
{ category: 'food', text: 'The restaurant serves Italian cuisine' }
];
// Generate embeddings for training data
const trainingEmbeddings = await Promise.all(
trainingData.map(item =>
client.models.embedContent({
model: 'text-embedding-004',
contents: item.text,
config: {
taskType: TaskType.CLASSIFICATION
}
})
)
);
// New texts to classify
const testTexts = [
'The basketball game was exciting',
'The new smartphone has great features',
'This pasta dish is delicious'
];
// Generate embeddings for test data
const testEmbeddings = await Promise.all(
testTexts.map(text =>
client.models.embedContent({
model: 'text-embedding-004',
contents: text,
config: {
taskType: TaskType.CLASSIFICATION
}
})
)
);
// Classify using k-NN (k=3)
testTexts.forEach((text, i) => {
const testVec = testEmbeddings[i].embeddings![0].values!;
// Calculate distances to all training examples
const distances = trainingEmbeddings.map((emb, j) => ({
category: trainingData[j].category,
distance: euclideanDistance(testVec, emb.embeddings![0].values!)
}));
// Sort by distance and get top k
distances.sort((a, b) => a.distance - b.distance);
const topK = distances.slice(0, 3);
// Vote for category
const votes: { [key: string]: number } = {};
topK.forEach(item => {
votes[item.category] = (votes[item.category] || 0) + 1;
});
const prediction = Object.entries(votes).sort((a, b) => b[1] - a[1])[0][0];
console.log(`Text: "${text}"`);
console.log(`Predicted category: ${prediction}\n`);
});import { TaskType } from '@google/genai';
// FAQ database
const faqs = [
{
question: 'How do I reset my password?',
answer: 'Click on "Forgot Password" on the login page and follow the instructions.'
},
{
question: 'What are your business hours?',
answer: 'We are open Monday to Friday, 9 AM to 5 PM.'
},
{
question: 'How can I contact support?',
answer: 'You can reach our support team at support@example.com or call 1-800-123-4567.'
},
{
question: 'What is your return policy?',
answer: 'Items can be returned within 30 days of purchase with a receipt.'
}
];
// Generate embeddings for FAQ questions
const faqEmbeddings = await Promise.all(
faqs.map(faq =>
client.models.embedContent({
model: 'text-embedding-004',
contents: faq.question,
config: {
taskType: TaskType.QUESTION_ANSWERING
}
})
)
);
// User question
const userQuestion = 'I forgot my password, what should I do?';
const questionResponse = await client.models.embedContent({
model: 'text-embedding-004',
contents: userQuestion,
config: {
taskType: TaskType.QUESTION_ANSWERING
}
});
const questionVec = questionResponse.embeddings![0].values!;
// Find most similar FAQ
const similarities = faqEmbeddings.map((emb, i) => ({
faq: faqs[i],
similarity: cosineSimilarity(questionVec, emb.embeddings![0].values!)
}));
similarities.sort((a, b) => b.similarity - a.similarity);
const bestMatch = similarities[0];
console.log('User Question:', userQuestion);
console.log('\nMost Similar FAQ:');
console.log('Q:', bestMatch.faq.question);
console.log('A:', bestMatch.faq.answer);
console.log('Similarity:', bestMatch.similarity.toFixed(4));import { TaskType } from '@google/genai';
// Documents with potential duplicates
const documents = [
'The cat sat on the mat',
'A feline was sitting on the rug',
'Machine learning is amazing',
'Dogs are great companions',
'ML is an incredible technology',
'Canines make wonderful friends'
];
// Generate embeddings
const embeddings = await Promise.all(
documents.map(doc =>
client.models.embedContent({
model: 'text-embedding-004',
contents: doc,
config: {
taskType: TaskType.SEMANTIC_SIMILARITY
}
})
)
);
const vectors = embeddings.map(r => r.embeddings![0].values!);
// Find duplicates (similarity > threshold)
const threshold = 0.85;
const duplicates: Array<[number, number, number]> = [];
for (let i = 0; i < vectors.length; i++) {
for (let j = i + 1; j < vectors.length; j++) {
const similarity = cosineSimilarity(vectors[i], vectors[j]);
if (similarity > threshold) {
duplicates.push([i, j, similarity]);
}
}
}
console.log('Potential Duplicates:');
duplicates.forEach(([i, j, sim]) => {
console.log(`\nSimilarity: ${sim.toFixed(4)}`);
console.log(` 1. "${documents[i]}"`);
console.log(` 2. "${documents[j]}"`);
});
// Remove duplicates
const toRemove = new Set<number>();
duplicates.forEach(([i, j]) => {
toRemove.add(j); // Keep first, remove second
});
const uniqueDocs = documents.filter((_, i) => !toRemove.has(i));
console.log('\nUnique Documents:');
uniqueDocs.forEach(doc => console.log(` - ${doc}`));// Generate embeddings for large dataset efficiently
const largeDataset = Array.from({ length: 1000 }, (_, i) =>
`Document ${i} with unique content`
);
// Process in batches
const batchSize = 50;
const allEmbeddings: number[][] = [];
for (let i = 0; i < largeDataset.length; i += batchSize) {
const batch = largeDataset.slice(i, i + batchSize);
const batchEmbeddings = await Promise.all(
batch.map(doc =>
client.models.embedContent({
model: 'text-embedding-004',
contents: doc
})
)
);
const vectors = batchEmbeddings.map(r => r.embeddings![0].values!);
allEmbeddings.push(...vectors);
console.log(`Processed ${Math.min(i + batchSize, largeDataset.length)}/${largeDataset.length}`);
}
console.log(`Generated ${allEmbeddings.length} embeddings`);// Generate lower-dimensional embeddings for faster computation
const response = await client.models.embedContent({
model: 'text-embedding-004',
contents: 'Sample text for embedding',
config: {
outputDimensionality: 256 // Reduce from default (e.g., 768)
}
});
const embedding = response.embeddings![0].values!;
console.log('Embedding dimensions:', embedding.length); // 256import { TaskType } from '@google/genai';
interface DocumentWithEmbedding {
id: string;
text: string;
embedding: number[];
metadata: Record<string, any>;
}
const documents = [
{ id: '1', text: 'Document 1', category: 'tech' },
{ id: '2', text: 'Document 2', category: 'sports' },
{ id: '3', text: 'Document 3', category: 'food' }
];
// Generate and structure for storage
const documentsWithEmbeddings: DocumentWithEmbedding[] = await Promise.all(
documents.map(async doc => {
const response = await client.models.embedContent({
model: 'text-embedding-004',
contents: doc.text,
config: {
taskType: TaskType.RETRIEVAL_DOCUMENT
}
});
return {
id: doc.id,
text: doc.text,
embedding: response.embeddings![0].values!,
metadata: { category: doc.category }
};
})
);
// Store in vector database (pseudo-code)
// await vectorDB.insert(documentsWithEmbeddings);
console.log('Documents indexed with embeddings');// Compare texts in different languages
const texts = [
'Hello, how are you?',
'Bonjour, comment allez-vous?', // French
'Hola, como estas?', // Spanish
'The weather is nice today',
'Il fait beau aujourd\'hui' // French
];
const embeddings = await Promise.all(
texts.map(text =>
client.models.embedContent({
model: 'text-embedding-004',
contents: text,
config: {
taskType: TaskType.SEMANTIC_SIMILARITY
}
})
)
);
const vectors = embeddings.map(r => r.embeddings![0].values!);
// Calculate similarity matrix
console.log('Similarity Matrix:');
for (let i = 0; i < texts.length; i++) {
for (let j = 0; j < texts.length; j++) {
const similarity = cosineSimilarity(vectors[i], vectors[j]);
console.log(`"${texts[i]}" <-> "${texts[j]}": ${similarity.toFixed(4)}`);
}
console.log('');
}// Utility functions for working with embeddings
function cosineSimilarity(a: number[], b: number[]): number {
const dotProduct = a.reduce((sum, val, i) => sum + val * b[i], 0);
const magnitudeA = Math.sqrt(a.reduce((sum, val) => sum + val * val, 0));
const magnitudeB = Math.sqrt(b.reduce((sum, val) => sum + val * val, 0));
return dotProduct / (magnitudeA * magnitudeB);
}
function euclideanDistance(a: number[], b: number[]): number {
return Math.sqrt(a.reduce((sum, val, i) => sum + Math.pow(val - b[i], 2), 0));
}
function normalizeVector(vec: number[]): number[] {
const magnitude = Math.sqrt(vec.reduce((sum, val) => sum + val * val, 0));
return vec.map(val => val / magnitude);
}
function dotProduct(a: number[], b: number[]): number {
return a.reduce((sum, val, i) => sum + val * b[i], 0);
}Install with Tessl CLI
npx tessl i tessl/npm-google--genai