AWS Bedrock integration for LangChain4j enabling Java applications to interact with various LLM providers through a unified interface
Generate vector embeddings for semantic search and similarity.
import dev.langchain4j.model.bedrock.BedrockCohereEmbeddingModel;
import dev.langchain4j.data.segment.TextSegment;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.model.output.Response;
import software.amazon.awssdk.regions.Region;
BedrockCohereEmbeddingModel model = BedrockCohereEmbeddingModel.builder()
.region(Region.US_EAST_1)
.model(BedrockCohereEmbeddingModel.Model.COHERE_EMBED_ENGLISH_V3)
.inputType(BedrockCohereEmbeddingModel.InputType.SEARCH_DOCUMENT)
.build();
TextSegment segment = TextSegment.from("Machine learning is a subset of AI");
Response<Embedding> response = model.embed(segment);
float[] vector = response.content().vector();import dev.langchain4j.model.bedrock.BedrockTitanEmbeddingModel;
BedrockTitanEmbeddingModel model = BedrockTitanEmbeddingModel.builder()
.region(Region.US_EAST_1)
.model(BedrockTitanEmbeddingModel.Types.TitanEmbedTextV2.getValue())
.dimensions(512)
.normalize(true)
.build();
Embedding embedding = model.embed("Sample text").content();import java.util.List;
List<TextSegment> documents = List.of(
TextSegment.from("Document 1 about ML"),
TextSegment.from("Document 2 about AI"),
TextSegment.from("Document 3 about NLP")
);
Response<List<Embedding>> embeddings = model.embedAll(documents);
for (Embedding emb : embeddings.content()) {
float[] vector = emb.vector();
// Store or process vector
}import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.inmemory.InMemoryEmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingMatch;
// Document model for indexing
BedrockCohereEmbeddingModel docModel = BedrockCohereEmbeddingModel.builder()
.model(BedrockCohereEmbeddingModel.Model.COHERE_EMBED_ENGLISH_V3)
.inputType(BedrockCohereEmbeddingModel.InputType.SEARCH_DOCUMENT)
.build();
// Query model for searching
BedrockCohereEmbeddingModel queryModel = BedrockCohereEmbeddingModel.builder()
.model(BedrockCohereEmbeddingModel.Model.COHERE_EMBED_ENGLISH_V3)
.inputType(BedrockCohereEmbeddingModel.InputType.SEARCH_QUERY)
.build();
// Index documents
EmbeddingStore<TextSegment> store = new InMemoryEmbeddingStore<>();
List<TextSegment> docs = loadDocuments();
Response<List<Embedding>> docEmbeddings = docModel.embedAll(docs);
for (int i = 0; i < docs.size(); i++) {
store.add(docEmbeddings.content().get(i), docs.get(i));
}
// Search
String query = "neural networks";
Embedding queryEmbedding = queryModel.embed(query).content();
List<EmbeddingMatch<TextSegment>> matches = store.findRelevant(queryEmbedding, 3);
for (EmbeddingMatch<TextSegment> match : matches) {
System.out.println("Score: " + match.score());
System.out.println("Text: " + match.embedded().text());
}Next Steps: