LangChain4j integration for Google AI Gemini models providing chat, streaming, embeddings, image generation, and batch processing capabilities
A comprehensive Java integration library for Google AI Gemini models within the LangChain4j framework, enabling developers to build advanced AI-powered applications with enterprise Java frameworks. This package provides unified APIs for chat interactions (synchronous and streaming), embeddings, image generation, token estimation, file management, and cost-efficient batch processing.
<dependency>
<groupId>dev.langchain4j</groupId>
<artifactId>langchain4j-google-ai-gemini</artifactId>
<version>1.11.0</version>
</dependency>import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel;
import dev.langchain4j.model.googleai.GoogleAiGeminiStreamingChatModel;
import dev.langchain4j.model.googleai.GoogleAiEmbeddingModel;
import dev.langchain4j.model.googleai.GoogleAiGeminiImageModel;
import dev.langchain4j.model.googleai.GeminiFiles;
import dev.langchain4j.model.googleai.GoogleAiGeminiTokenCountEstimator;import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.model.output.Response;
// Create a chat model
GoogleAiGeminiChatModel model = GoogleAiGeminiChatModel.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.modelName("gemini-2.5-pro")
.temperature(0.7)
.build();
// Send a chat message
Response<AiMessage> response = model.generate("What is the capital of France?");
System.out.println(response.content().text());
// Use streaming for real-time responses
GoogleAiGeminiStreamingChatModel streamingModel = GoogleAiGeminiStreamingChatModel.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.modelName("gemini-2.5-flash")
.build();
streamingModel.generate("Tell me a story", new StreamingResponseHandler<AiMessage>() {
@Override
public void onNext(String token) {
System.out.print(token);
}
@Override
public void onComplete(Response<AiMessage> response) {
System.out.println("\nDone!");
}
@Override
public void onError(Throwable error) {
error.printStackTrace();
}
});The langchain4j-google-ai-gemini package is organized around several key components:
GoogleAiGeminiChatModel) and streaming (GoogleAiGeminiStreamingChatModel) interfaces for conversational AI, with support for multimodal inputs (text, images, video, audio, PDFs), function calling, structured outputs, and thinking modeGoogleAiGeminiBatchChatModel, GoogleAiGeminiBatchEmbeddingModel, GoogleAiGeminiBatchImageModel) offering 50% cost reduction with 24-hour processing SLOGoogleAiEmbeddingModel) with configurable task types and output dimensionalityGoogleAiGeminiImageModel) with safety settings and Google Search groundingGeminiFiles) supporting up to 20GB project capacity and 2GB per-file uploads with 48-hour persistenceGoogleAiGeminiTokenCountEstimator) and model catalog (GoogleAiGeminiModelCatalog) for resource planning and model discoveryAll models follow the Builder Pattern for construction and integrate seamlessly with LangChain4j core interfaces, enabling use with Spring Boot, Quarkus, Helidon, and Micronaut frameworks.
Synchronous chat model for Gemini API supporting multimodal inputs, function calling, structured outputs, code execution, and thinking mode.
public class GoogleAiGeminiChatModel {
public static GoogleAiGeminiChatModelBuilder builder();
public ChatRequestParameters defaultRequestParameters();
public ChatResponse doChat(ChatRequest chatRequest);
public Set<Capability> supportedCapabilities();
public List<ChatModelListener> listeners();
public ModelProvider provider();
}Streaming chat model for Gemini API enabling real-time token-by-token responses with multimodal support and function calling.
public class GoogleAiGeminiStreamingChatModel {
public static GoogleAiGeminiStreamingChatModelBuilder builder();
public ChatRequestParameters defaultRequestParameters();
public void doChat(ChatRequest request, StreamingChatResponseHandler handler);
public List<ChatModelListener> listeners();
public ModelProvider provider();
}Batch processing for chat requests offering 50% cost reduction with 24-hour processing SLO (experimental).
public class GoogleAiGeminiBatchChatModel {
public static Builder builder();
public BatchResponse<ChatResponse> createBatchInline(String displayName, Long priority, List<ChatRequest> requests);
public BatchResponse<ChatResponse> createBatchFromFile(String displayName, GeminiFile file);
public void writeBatchToFile(JsonLinesWriter writer, Iterable<BatchFileRequest<ChatRequest>> requests);
public BatchResponse<ChatResponse> retrieveBatchResults(BatchName name);
public void cancelBatchJob(BatchName name);
public void deleteBatchJob(BatchName name);
public BatchList<ChatResponse> listBatchJobs(Integer pageSize, String pageToken);
}Text embedding generation with configurable task types and output dimensionality.
public class GoogleAiEmbeddingModel {
public static GoogleAiEmbeddingModelBuilder builder();
public Response<Embedding> embed(TextSegment textSegment);
public Response<Embedding> embed(String text);
public Response<List<Embedding>> embedAll(List<TextSegment> textSegments);
public String modelName();
public Integer knownDimension();
public enum TaskType {
RETRIEVAL_QUERY,
RETRIEVAL_DOCUMENT,
SEMANTIC_SIMILARITY,
CLASSIFICATION,
CLUSTERING,
QUESTION_ANSWERING,
FACT_VERIFICATION
}
}Image generation and editing with safety settings and Google Search grounding (experimental). Includes utility class for extracting generated images from responses.
public class GoogleAiGeminiImageModel {
public static GoogleAiGeminiImageModelBuilder builder();
public String modelName();
public Response<Image> generate(String prompt);
public Response<Image> edit(Image image, String prompt);
public Response<Image> edit(Image image, Image mask, String prompt);
}
public class GeneratedImageHelper {
public static List<Image> getGeneratedImages(AiMessage aiMessage);
public static boolean hasGeneratedImages(AiMessage aiMessage);
}Gemini Files API for uploading and managing files (images, videos, audio, PDFs) with up to 20GB project capacity and 2GB per-file limit.
public class GeminiFiles {
public static Builder builder();
public GeminiFile uploadFile(Path filePath, String displayName);
public GeminiFile uploadFile(byte[] fileBytes, String mimeType, String name);
public GeminiFile getMetadata(String name);
public List<GeminiFile> listFiles();
public void deleteFile(String name);
}
public record GeminiFile(
String name,
String displayName,
String mimeType,
Long sizeBytes,
String createTime,
String updateTime,
String expirationTime,
String sha256Hash,
String uri,
String state
) {
public boolean isActive();
public boolean isProcessing();
public boolean isFailed();
}Token count estimation for text, messages, and tool specifications to manage API costs and context limits.
public class GoogleAiGeminiTokenCountEstimator {
public static Builder builder();
public int estimateTokenCountInText(String text);
public int estimateTokenCountInMessage(ChatMessage message);
public int estimateTokenCountInMessages(Iterable<ChatMessage> messages);
public int estimateTokenCountInToolExecutionRequests(Iterable<ToolExecutionRequest> requests);
public int estimateTokenCountInToolSpecifications(Iterable<ToolSpecification> specifications);
}List available Gemini models and their capabilities.
public class GoogleAiGeminiModelCatalog {
public static Builder builder();
public List<ModelDescription> listModels();
public ModelProvider provider();
}Content safety filtering configuration to control harmful content blocking.
public class GeminiSafetySetting {
public GeminiSafetySetting(GeminiHarmCategory category, GeminiHarmBlockThreshold threshold);
public GeminiHarmCategory getCategory();
public GeminiHarmBlockThreshold getThreshold();
public void setCategory(GeminiHarmCategory category);
public void setThreshold(GeminiHarmBlockThreshold threshold);
}
public enum GeminiHarmCategory {
HARM_CATEGORY_HATE_SPEECH,
HARM_CATEGORY_SEXUALLY_EXPLICIT,
HARM_CATEGORY_DANGEROUS_CONTENT,
HARM_CATEGORY_HARASSMENT,
HARM_CATEGORY_CIVIC_INTEGRITY
}
public enum GeminiHarmBlockThreshold {
HARM_BLOCK_THRESHOLD_UNSPECIFIED,
BLOCK_LOW_AND_ABOVE,
BLOCK_MEDIUM_AND_ABOVE,
BLOCK_ONLY_HIGH,
BLOCK_NONE
}Fine-grained control over generation behavior including structured output schemas, stop sequences, penalties, and response modalities.
public record GeminiGenerationConfig(
List<String> stopSequences,
String responseMimeType,
Object responseSchema,
Map<String, Object> responseJsonSchema,
Integer candidateCount,
Integer maxOutputTokens,
Double temperature,
Integer topK,
Integer seed,
Double topP,
Double presencePenalty,
Double frequencyPenalty,
GeminiThinkingConfig thinkingConfig,
Boolean responseLogprobs,
Boolean enableEnhancedCivicAnswers,
List<GeminiResponseModality> responseModalities,
GeminiImageConfig imageConfig,
Integer logprobs,
GeminiMediaResolutionLevel mediaResolution
) {
public static Builder builder();
}Chat response metadata including grounding information from web search, Google Maps, and retrieval operations.
public class GoogleAiGeminiChatResponseMetadata {
public static Builder builder();
public GroundingMetadata groundingMetadata();
public Builder toBuilder();
}
public record GroundingMetadata(
List<GroundingChunk> groundingChunks,
List<GroundingSupport> groundingSupports,
List<String> webSearchQueries,
SearchEntryPoint searchEntryPoint,
RetrievalMetadata retrievalMetadata,
String googleMapsWidgetContextToken
) {
public static Builder builder();
}Sealed interfaces and records for batch job management and result retrieval.
public sealed interface BatchResponse<T> permits BatchIncomplete, BatchSuccess, BatchError {
// Permitted implementations in sub-doc
}
public record BatchList<T>(String pageToken, List<BatchResponse<T>> responses) {}
public record BatchName(String value) {}
public record BatchFileRequest<REQ>(String key, REQ request) {}Interface for writing batch request files in JSON Lines format.
public interface JsonLinesWriter {
void write(Object object);
void write(Iterable<?> objects);
void flush();
void close();
}
public class JsonLinesWriters {
public static JsonLinesWriter streaming(Path path);
public static JsonLinesWriter streaming(OutputStream outputStream);
}Install with Tessl CLI
npx tessl i tessl/maven-dev-langchain4j--langchain4j-google-ai-gemini