LangChain4j integration for Google AI Gemini models providing chat, streaming, embeddings, image generation, and batch processing capabilities
Model catalog for discovering and listing available Google AI Gemini models with their capabilities, input/output token limits, supported features, and version information. Essential for programmatically discovering model options and their specifications.
Main catalog class for listing and discovering available Gemini models.
/**
* Model catalog for listing available Google AI Gemini models.
* Provides programmatic access to model metadata and capabilities.
*/
public class GoogleAiGeminiModelCatalog {
/**
* Creates a new builder for configuring the model catalog.
* @return Builder instance
*/
public static Builder builder();
/**
* Lists all available Gemini models.
* Returns detailed information about each model including capabilities,
* token limits, supported features, and version information.
* @return List of ModelDescription objects
*/
public List<ModelDescription> listModels();
/**
* Returns the model provider (GOOGLE).
* @return ModelProvider enum value
*/
public ModelProvider provider();
}Builder class for constructing GoogleAiGeminiModelCatalog instances.
/**
* Builder for GoogleAiGeminiModelCatalog.
*/
public static class Builder {
/**
* Sets the HTTP client builder for customizing requests.
* @param httpClientBuilder HTTP client builder instance
* @return Builder instance for chaining
*/
public Builder httpClientBuilder(HttpClientBuilder httpClientBuilder);
/**
* Sets the base URL for the API endpoint.
* @param baseUrl Custom base URL (optional)
* @return Builder instance for chaining
*/
public Builder baseUrl(String baseUrl);
/**
* Sets the API key for authentication (required).
* @param apiKey Google AI API key
* @return Builder instance for chaining
*/
public Builder apiKey(String apiKey);
/**
* Enables logging of both requests and responses.
* @param logRequestsAndResponses True to enable full logging
* @return Builder instance for chaining
*/
public Builder logRequestsAndResponses(Boolean logRequestsAndResponses);
/**
* Enables logging of requests only.
* @param logRequests True to enable request logging
* @return Builder instance for chaining
*/
public Builder logRequests(Boolean logRequests);
/**
* Enables logging of responses only.
* @param logResponses True to enable response logging
* @return Builder instance for chaining
*/
public Builder logResponses(Boolean logResponses);
/**
* Sets a custom logger for the catalog.
* @param logger Logger instance
* @return Builder instance for chaining
*/
public Builder logger(Logger logger);
/**
* Sets the request timeout duration.
* @param timeout Timeout duration
* @return Builder instance for chaining
*/
public Builder timeout(Duration timeout);
/**
* Builds the GoogleAiGeminiModelCatalog instance.
* @return Configured GoogleAiGeminiModelCatalog
* @throws IllegalArgumentException if required fields are missing
*/
public GoogleAiGeminiModelCatalog build();
}import dev.langchain4j.model.googleai.GoogleAiGeminiModelCatalog;
import dev.langchain4j.model.chat.ModelDescription;
// Create model catalog
GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
// List all available models
List<ModelDescription> models = catalog.listModels();
System.out.println("Available Gemini models: " + models.size());
for (ModelDescription model : models) {
System.out.println("- " + model.name());
}GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
List<ModelDescription> models = catalog.listModels();
for (ModelDescription model : models) {
System.out.println("\nModel: " + model.name());
System.out.println(" Display Name: " + model.displayName());
System.out.println(" Description: " + model.description());
System.out.println(" Input Token Limit: " + model.inputTokenLimit());
System.out.println(" Output Token Limit: " + model.outputTokenLimit());
System.out.println(" Supported Generation Methods: " + model.supportedGenerationMethods());
System.out.println(" Version: " + model.version());
}GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
String targetModel = "gemini-2.5-pro";
List<ModelDescription> models = catalog.listModels();
ModelDescription geminiPro = models.stream()
.filter(m -> m.name().contains(targetModel))
.findFirst()
.orElse(null);
if (geminiPro != null) {
System.out.println("Found " + geminiPro.name());
System.out.println("Input limit: " + geminiPro.inputTokenLimit());
System.out.println("Output limit: " + geminiPro.outputTokenLimit());
} else {
System.out.println("Model not found: " + targetModel);
}GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
List<ModelDescription> models = catalog.listModels();
// Find models supporting streaming
List<ModelDescription> streamingModels = models.stream()
.filter(m -> m.supportedGenerationMethods().contains("generateContentStream"))
.toList();
System.out.println("Models with streaming support:");
streamingModels.forEach(m -> System.out.println("- " + m.name()));
// Find models with large context windows
List<ModelDescription> largeContextModels = models.stream()
.filter(m -> m.inputTokenLimit() != null && m.inputTokenLimit() > 100000)
.toList();
System.out.println("\nModels with >100K token context:");
largeContextModels.forEach(m ->
System.out.println("- " + m.name() + " (" + m.inputTokenLimit() + " tokens)")
);GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
public ModelDescription selectBestModel(int requiredInputTokens, boolean needsStreaming) {
List<ModelDescription> models = catalog.listModels();
return models.stream()
.filter(m -> m.inputTokenLimit() != null)
.filter(m -> m.inputTokenLimit() >= requiredInputTokens)
.filter(m -> !needsStreaming ||
m.supportedGenerationMethods().contains("generateContentStream"))
.min(Comparator.comparing(ModelDescription::inputTokenLimit))
.orElse(null);
}
// Use the helper
ModelDescription selected = selectBestModel(50000, true);
if (selected != null) {
System.out.println("Selected model: " + selected.name());
} else {
System.out.println("No suitable model found");
}GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
List<ModelDescription> models = catalog.listModels();
// Compare Pro vs Flash models
ModelDescription pro = models.stream()
.filter(m -> m.name().contains("gemini-2.5-pro"))
.findFirst()
.orElse(null);
ModelDescription flash = models.stream()
.filter(m -> m.name().contains("gemini-2.5-flash"))
.findFirst()
.orElse(null);
if (pro != null && flash != null) {
System.out.println("Gemini Pro vs Flash Comparison:");
System.out.println("\nPro Model:");
System.out.println(" Input: " + pro.inputTokenLimit());
System.out.println(" Output: " + pro.outputTokenLimit());
System.out.println("\nFlash Model:");
System.out.println(" Input: " + flash.inputTokenLimit());
System.out.println(" Output: " + flash.outputTokenLimit());
}import dev.langchain4j.model.googleai.GoogleAiGeminiChatModel;
GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
// Get the latest Pro model
List<ModelDescription> models = catalog.listModels();
ModelDescription latestPro = models.stream()
.filter(m -> m.name().contains("gemini") && m.name().contains("pro"))
.max(Comparator.comparing(ModelDescription::version))
.orElse(null);
if (latestPro != null) {
// Create chat model with discovered model
GoogleAiGeminiChatModel chatModel = GoogleAiGeminiChatModel.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.modelName(latestPro.name())
.maxOutputTokens(latestPro.outputTokenLimit())
.build();
System.out.println("Using model: " + latestPro.name());
}GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
List<ModelDescription> models = catalog.listModels();
for (ModelDescription model : models) {
System.out.println("\n" + model.name() + " capabilities:");
if (model.supportedGenerationMethods() != null) {
System.out.println(" Generation methods:");
for (String method : model.supportedGenerationMethods()) {
System.out.println(" - " + method);
}
}
System.out.println(" Context window: " + model.inputTokenLimit() + " tokens");
System.out.println(" Max output: " + model.outputTokenLimit() + " tokens");
}GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
public boolean isModelAvailable(String modelName) {
List<ModelDescription> models = catalog.listModels();
return models.stream()
.anyMatch(m -> m.name().equals(modelName) || m.name().contains(modelName));
}
// Check availability
if (isModelAvailable("gemini-2.5-pro")) {
System.out.println("Gemini 2.5 Pro is available");
} else {
System.out.println("Gemini 2.5 Pro is not available");
}GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
public List<ModelDescription> findModelsWithMinTokens(int minInputTokens) {
return catalog.listModels().stream()
.filter(m -> m.inputTokenLimit() != null)
.filter(m -> m.inputTokenLimit() >= minInputTokens)
.sorted(Comparator.comparing(ModelDescription::inputTokenLimit))
.toList();
}
// Find models with at least 500K token context
List<ModelDescription> largeContextModels = findModelsWithMinTokens(500000);
System.out.println("Models with 500K+ token context:");
largeContextModels.forEach(m ->
System.out.println("- " + m.displayName() + ": " + m.inputTokenLimit() + " tokens")
);GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
List<ModelDescription> models = catalog.listModels();
// Group models by base name
Map<String, List<ModelDescription>> modelFamilies = models.stream()
.collect(Collectors.groupingBy(m -> {
// Extract base name (e.g., "gemini-2.5-pro" from "gemini-2.5-pro-001")
String name = m.name();
int lastDash = name.lastIndexOf('-');
if (lastDash > 0 && name.substring(lastDash + 1).matches("\\d+")) {
return name.substring(0, lastDash);
}
return name;
}));
System.out.println("Model families:");
modelFamilies.forEach((family, versions) -> {
System.out.println("\n" + family + " (" + versions.size() + " versions):");
versions.forEach(v -> System.out.println(" - " + v.name() + " (v" + v.version() + ")"));
});import com.google.gson.Gson;
import com.google.gson.GsonBuilder;
import java.nio.file.Files;
import java.nio.file.Path;
GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
List<ModelDescription> models = catalog.listModels();
// Export to JSON
Gson gson = new GsonBuilder().setPrettyPrinting().create();
String json = gson.toJson(models);
Files.writeString(Path.of("gemini-models.json"), json);
System.out.println("Model catalog exported to gemini-models.json");GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
public ModelDescription recommendModel(String useCase) {
List<ModelDescription> models = catalog.listModels();
return switch (useCase.toLowerCase()) {
case "fast" -> models.stream()
.filter(m -> m.name().contains("flash"))
.findFirst()
.orElse(null);
case "quality" -> models.stream()
.filter(m -> m.name().contains("pro"))
.max(Comparator.comparing(ModelDescription::inputTokenLimit))
.orElse(null);
case "lightweight" -> models.stream()
.filter(m -> m.name().contains("8b"))
.findFirst()
.orElse(null);
case "long-context" -> models.stream()
.filter(m -> m.inputTokenLimit() != null)
.max(Comparator.comparing(ModelDescription::inputTokenLimit))
.orElse(null);
default -> models.stream().findFirst().orElse(null);
};
}
// Get recommendations
ModelDescription fastModel = recommendModel("fast");
ModelDescription qualityModel = recommendModel("quality");
ModelDescription longContextModel = recommendModel("long-context");
System.out.println("Recommendations:");
if (fastModel != null) {
System.out.println(" Fast: " + fastModel.name());
}
if (qualityModel != null) {
System.out.println(" Quality: " + qualityModel.name());
}
if (longContextModel != null) {
System.out.println(" Long Context: " + longContextModel.name() +
" (" + longContextModel.inputTokenLimit() + " tokens)");
}import java.util.concurrent.TimeUnit;
public class CachedModelCatalog {
private final GoogleAiGeminiModelCatalog catalog;
private List<ModelDescription> cachedModels;
private long lastFetchTime;
private static final long CACHE_DURATION_MS = TimeUnit.HOURS.toMillis(24);
public CachedModelCatalog(String apiKey) {
this.catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(apiKey)
.build();
}
public List<ModelDescription> getModels() {
long now = System.currentTimeMillis();
if (cachedModels == null || (now - lastFetchTime) > CACHE_DURATION_MS) {
cachedModels = catalog.listModels();
lastFetchTime = now;
System.out.println("Refreshed model catalog cache");
}
return new ArrayList<>(cachedModels);
}
}
// Usage
CachedModelCatalog cachedCatalog = new CachedModelCatalog(
System.getenv("GOOGLE_AI_API_KEY")
);
// First call fetches from API
List<ModelDescription> models1 = cachedCatalog.getModels();
// Subsequent calls use cache
List<ModelDescription> models2 = cachedCatalog.getModels();import java.time.Duration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
Logger logger = LoggerFactory.getLogger("ModelCatalog");
GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.timeout(Duration.ofSeconds(15))
.logRequestsAndResponses(true)
.logger(logger)
.build();
List<ModelDescription> models = catalog.listModels();
System.out.println("Fetched " + models.size() + " models");import dev.langchain4j.model.provider.ModelProvider;
GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
ModelProvider provider = catalog.provider();
System.out.println("Provider: " + provider); // GOOGLE
List<ModelDescription> models = catalog.listModels();
System.out.println("Available " + provider + " models: " + models.size());The ModelDescription class contains:
GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
// Select production model based on requirements
ModelDescription productionModel = catalog.listModels().stream()
.filter(m -> !m.name().contains("exp")) // Exclude experimental
.filter(m -> m.inputTokenLimit() >= 100000) // Need large context
.filter(m -> m.supportedGenerationMethods().contains("generateContentStream"))
.findFirst()
.orElseThrow(() -> new RuntimeException("No suitable model found"));
System.out.println("Production model: " + productionModel.name());GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
// Choose most cost-effective model for use case
List<ModelDescription> models = catalog.listModels();
// Flash models are typically more cost-effective
ModelDescription costEffective = models.stream()
.filter(m -> m.name().contains("flash"))
.filter(m -> !m.name().contains("exp"))
.findFirst()
.orElse(null);
if (costEffective != null) {
System.out.println("Cost-effective model: " + costEffective.name());
}GoogleAiGeminiModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
public List<String> getModelFallbackChain() {
List<ModelDescription> models = catalog.listModels();
return models.stream()
.filter(m -> m.name().contains("gemini"))
.filter(m -> !m.name().contains("exp"))
.sorted(Comparator.comparing(ModelDescription::inputTokenLimit).reversed())
.map(ModelDescription::name)
.toList();
}
// Use fallback chain
List<String> fallbackChain = getModelFallbackChain();
System.out.println("Model fallback order:");
fallbackChain.forEach(m -> System.out.println(" " + m));GoogleAiGeminiModelCatalog integrates with LangChain4j's model discovery infrastructure:
import dev.langchain4j.model.catalog.ModelCatalog;
ModelCatalog catalog = GoogleAiGeminiModelCatalog.builder()
.apiKey(System.getenv("GOOGLE_AI_API_KEY"))
.build();
// Use with LangChain4j's model abstraction
List<ModelDescription> models = catalog.listModels();Install with Tessl CLI
npx tessl i tessl/maven-dev-langchain4j--langchain4j-google-ai-gemini@1.11.0