CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/maven-dev-langchain4j--langchain4j-bedrock

AWS Bedrock integration for LangChain4j enabling Java applications to interact with various LLM providers through a unified interface

Overview
Eval results
Files

semantic-search.mddocs/guides/

Semantic Search

Build semantic search with embeddings.

Basic Semantic Search

import dev.langchain4j.model.bedrock.BedrockCohereEmbeddingModel;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingMatch;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.data.embedding.Embedding;

// Create embedding models
BedrockCohereEmbeddingModel docModel = BedrockCohereEmbeddingModel.builder()
    .model(BedrockCohereEmbeddingModel.Model.COHERE_EMBED_ENGLISH_V3)
    .inputType(BedrockCohereEmbeddingModel.InputType.SEARCH_DOCUMENT)
    .build();

BedrockCohereEmbeddingModel queryModel = BedrockCohereEmbeddingModel.builder()
    .model(BedrockCohereEmbeddingModel.Model.COHERE_EMBED_ENGLISH_V3)
    .inputType(BedrockCohereEmbeddingModel.InputType.SEARCH_QUERY)
    .build();

// Index documents
EmbeddingStore<TextSegment> store = new InMemoryEmbeddingStore<>();

List<TextSegment> documents = List.of(
    TextSegment.from("Machine learning is a subset of artificial intelligence"),
    TextSegment.from("Deep learning uses neural networks with multiple layers"),
    TextSegment.from("Natural language processing enables computers to understand text")
);

Response<List<Embedding>> docEmbeddings = docModel.embedAll(documents);
for (int i = 0; i < documents.size(); i++) {
    store.add(docEmbeddings.content().get(i), documents.get(i));
}

// Search
String query = "neural networks";
Embedding queryEmbedding = queryModel.embed(query).content();
List<EmbeddingMatch<TextSegment>> matches = store.findRelevant(queryEmbedding, 3);

for (EmbeddingMatch<TextSegment> match : matches) {
    System.out.println("Score: " + match.score());
    System.out.println("Text: " + match.embedded().text());
}

With Metadata Filtering

import dev.langchain4j.data.segment.Metadata;

// Add documents with metadata
List<TextSegment> documentsWithMetadata = List.of(
    TextSegment.from("ML article content", Metadata.from("category", "ai")),
    TextSegment.from("Recipe content", Metadata.from("category", "cooking")),
    TextSegment.from("DL article content", Metadata.from("category", "ai"))
);

Response<List<Embedding>> embeddings = docModel.embedAll(documentsWithMetadata);
for (int i = 0; i < documentsWithMetadata.size(); i++) {
    store.add(embeddings.content().get(i), documentsWithMetadata.get(i));
}

// Search with metadata filter
Embedding queryEmbedding = queryModel.embed("neural networks").content();
List<EmbeddingMatch<TextSegment>> matches = store.findRelevant(
    queryEmbedding,
    3,
    metadata -> "ai".equals(metadata.getString("category"))
);

RAG (Retrieval-Augmented Generation)

public class RAGSystem {
    private final BedrockCohereEmbeddingModel docModel;
    private final BedrockCohereEmbeddingModel queryModel;
    private final BedrockChatModel chatModel;
    private final EmbeddingStore<TextSegment> store;

    public RAGSystem() {
        this.docModel = BedrockCohereEmbeddingModel.builder()
            .model(BedrockCohereEmbeddingModel.Model.COHERE_EMBED_ENGLISH_V3)
            .inputType(BedrockCohereEmbeddingModel.InputType.SEARCH_DOCUMENT)
            .build();

        this.queryModel = BedrockCohereEmbeddingModel.builder()
            .model(BedrockCohereEmbeddingModel.Model.COHERE_EMBED_ENGLISH_V3)
            .inputType(BedrockCohereEmbeddingModel.InputType.SEARCH_QUERY)
            .build();

        this.chatModel = BedrockChatModel.builder()
            .modelId("anthropic.claude-3-5-sonnet-20241022-v2:0")
            .build();

        this.store = new InMemoryEmbeddingStore<>();
    }

    public void indexDocuments(List<String> documents) {
        List<TextSegment> segments = documents.stream()
            .map(TextSegment::from)
            .toList();

        Response<List<Embedding>> embeddings = docModel.embedAll(segments);
        for (int i = 0; i < segments.size(); i++) {
            store.add(embeddings.content().get(i), segments.get(i));
        }
    }

    public String query(String question) {
        // Retrieve relevant documents
        Embedding queryEmbedding = queryModel.embed(question).content();
        List<EmbeddingMatch<TextSegment>> matches = store.findRelevant(queryEmbedding, 3);

        // Build context from retrieved documents
        String context = matches.stream()
            .map(match -> match.embedded().text())
            .collect(Collectors.joining("\n\n"));

        // Generate answer using context
        String prompt = String.format(
            "Answer the question based on the context below.\n\nContext:\n%s\n\nQuestion: %s",
            context, question
        );

        return chatModel.generate(prompt);
    }
}

// Usage
RAGSystem rag = new RAGSystem();
rag.indexDocuments(loadKnowledgeBase());
String answer = rag.query("What is machine learning?");

Similarity Comparison

BedrockTitanEmbeddingModel model = BedrockTitanEmbeddingModel.builder()
    .model(BedrockTitanEmbeddingModel.Types.TitanEmbedTextV2.getValue())
    .dimensions(512)
    .normalize(true)  // Normalization simplifies similarity calculation
    .build();

TextSegment text1 = TextSegment.from("The cat sat on the mat");
TextSegment text2 = TextSegment.from("A feline rested on the rug");

Embedding emb1 = model.embed(text1).content();
Embedding emb2 = model.embed(text2).content();

// For normalized embeddings, dot product = cosine similarity
double similarity = dotProduct(emb1.vector(), emb2.vector());
System.out.println("Similarity: " + similarity);

private double dotProduct(float[] v1, float[] v2) {
    double result = 0.0;
    for (int i = 0; i < v1.length; i++) {
        result += v1[i] * v2[i];
    }
    return result;
}

Document Clustering

BedrockCohereEmbeddingModel model = BedrockCohereEmbeddingModel.builder()
    .model(BedrockCohereEmbeddingModel.Model.COHERE_EMBED_ENGLISH_V3)
    .inputType(BedrockCohereEmbeddingModel.InputType.CLUSTERING)
    .build();

List<TextSegment> documents = loadDocuments();
Response<List<Embedding>> embeddings = model.embedAll(documents);

// Apply clustering algorithm (e.g., K-means)
List<Cluster> clusters = kMeansClustering(embeddings.content(), 5);

Text Classification

BedrockCohereEmbeddingModel model = BedrockCohereEmbeddingModel.builder()
    .model(BedrockCohereEmbeddingModel.Model.COHERE_EMBED_ENGLISH_V3)
    .inputType(BedrockCohereEmbeddingModel.InputType.CLASSIFICATION)
    .build();

// Embed training data
List<TextSegment> trainingTexts = loadTrainingData();
List<String> labels = loadLabels();

Response<List<Embedding>> trainingEmbeddings = model.embedAll(trainingTexts);

// Train classifier
Classifier classifier = trainClassifier(trainingEmbeddings.content(), labels);

// Classify new text
TextSegment newText = TextSegment.from("This is a new document");
Embedding newEmbedding = model.embed(newText).content();
String predictedLabel = classifier.predict(newEmbedding);

Batch Processing

// Cohere supports batching up to 96 segments
BedrockCohereEmbeddingModel model = BedrockCohereEmbeddingModel.builder()
    .model(BedrockCohereEmbeddingModel.Model.COHERE_EMBED_ENGLISH_V3)
    .maxSegmentsPerBatch(96)
    .build();

// Large dataset automatically batched
List<TextSegment> largeDataset = loadThousandsOfDocuments();
Response<List<Embedding>> embeddings = model.embedAll(largeDataset);
// Automatically splits into batches of 96

Performance Optimization

// Use smaller dimensions for faster processing (Titan V2)
BedrockTitanEmbeddingModel fastModel = BedrockTitanEmbeddingModel.builder()
    .model(BedrockTitanEmbeddingModel.Types.TitanEmbedTextV2.getValue())
    .dimensions(256)  // Faster, smaller memory footprint
    .build();

// Use larger dimensions for higher quality
BedrockTitanEmbeddingModel qualityModel = BedrockTitanEmbeddingModel.builder()
    .model(BedrockTitanEmbeddingModel.Types.TitanEmbedTextV2.getValue())
    .dimensions(1024)  // Higher quality, more memory
    .build();

Related:

  • Embeddings Quick Start
  • Embedding Models API
  • Model Selection

Install with Tessl CLI

npx tessl i tessl/maven-dev-langchain4j--langchain4j-bedrock

docs

index.md

README.md

tile.json