Quarkus deployment extension for LangChain4j integration providing build-time processing, BuildItem APIs, and configuration for integrating Large Language Models into Quarkus applications
This document covers all build-time configuration classes for the Quarkus LangChain4j deployment extension.
Quarkus uses SmallRye Config for configuration management. Build-time configuration uses:
@ConfigRoot(phase = BUILD_TIME) - Marks a root configuration interface@ConfigMapping(prefix = "...") - Maps to a configuration prefix@ConfigGroup - Groups related configuration properties@WithDefault("...") - Provides default values@WithParentName - Uses parent's prefix instead of adding a new levelConfiguration is injected into BuildStep methods as parameters:
@BuildStep
void useBuildConfig(LangChain4jBuildConfig config) {
// Access build-time configuration
boolean devServicesEnabled = config.devservices().enabled();
String provider = config.defaultConfig().chatModel().provider().orElse("default");
}Package: io.quarkiverse.langchain4j.deployment.config
Root build-time configuration for LangChain4j.
@ConfigRoot(phase = BUILD_TIME)
@ConfigMapping(prefix = "quarkus.langchain4j")
public interface LangChain4jBuildConfig {
/**
* Default model configuration.
*/
@WithParentName
@ConfigDocSection
BaseConfig defaultConfig();
/**
* Named model configurations.
* Map key is the model name.
*/
@WithParentName
@ConfigDocMapKey("model-name")
@ConfigDocSection
Map<String, BaseConfig> namedConfig();
/**
* DevServices related configuration.
*/
DevServicesConfig devservices();
/**
* Enable or disable the {response schema} placeholder
* in @SystemMessage/@UserMessage.
* Default: true
*/
@WithDefault("true")
boolean responseSchema();
}Configuration Example:
# Default model configuration
quarkus.langchain4j.chat-model.provider=openai
# Named model configuration
quarkus.langchain4j.my-model.chat-model.provider=ollama
# DevServices
quarkus.langchain4j.devservices.enabled=true
quarkus.langchain4j.devservices.port=11434
# Response schema placeholder
quarkus.langchain4j.response-schema=trueUsage in BuildStep:
@BuildStep
void processConfig(LangChain4jBuildConfig config) {
// Access default configuration
BaseConfig defaultConfig = config.defaultConfig();
Optional<String> provider = defaultConfig.chatModel().provider();
// Access named configurations
Map<String, BaseConfig> namedConfigs = config.namedConfig();
for (Map.Entry<String, BaseConfig> entry : namedConfigs.entrySet()) {
String name = entry.getKey();
BaseConfig modelConfig = entry.getValue();
// Process named configuration
}
// DevServices configuration
DevServicesConfig devServices = config.devservices();
boolean enabled = devServices.enabled();
int port = devServices.port();
}Nested interface within LangChain4jBuildConfig for model configuration.
interface BaseConfig {
/**
* Chat model configuration.
*/
ChatModelConfig chatModel();
/**
* Rerank/scoring model configuration.
*/
ScoringModelConfig scoringModel();
/**
* Embedding model configuration.
*/
EmbeddingModelConfig embeddingModel();
/**
* Moderation model configuration.
*/
ModerationModelConfig moderationModel();
/**
* Image model configuration.
*/
ImageModelConfig imageModel();
}Usage:
BaseConfig config = buildConfig.defaultConfig();
// Get chat model provider
Optional<String> chatProvider = config.chatModel().provider();
// Get embedding model provider
Optional<String> embeddingProvider = config.embeddingModel().provider();
// Get all model types
Optional<String> imageProvider = config.imageModel().provider();
Optional<String> moderationProvider = config.moderationModel().provider();
Optional<String> scoringProvider = config.scoringModel().provider();Nested interface for DevServices configuration.
@ConfigGroup
interface DevServicesConfig {
/**
* Enable or disable DevServices.
* DevServices is generally enabled by default unless
* existing configuration is present.
* Default: true
*/
@WithDefault("true")
boolean enabled();
/**
* Default port where the inference server listens.
* Default: 11434 (Ollama default)
*/
@WithDefault("11434")
Integer port();
/**
* Instructs Ollama to preload models for faster response times.
* Default: true
*/
@WithDefault("true")
boolean preload();
}Configuration Example:
quarkus.langchain4j.devservices.enabled=true
quarkus.langchain4j.devservices.port=11434
quarkus.langchain4j.devservices.preload=truePackage: io.quarkiverse.langchain4j.deployment
Chat memory build-time configuration.
@ConfigRoot(phase = BUILD_TIME)
@ConfigMapping(prefix = "quarkus.langchain4j.chat-memory")
public interface ChatMemoryBuildConfig {
/**
* The type of chat memory to use.
* Options: MESSAGE_WINDOW, TOKEN_WINDOW
*/
Type type();
/**
* Chat memory type enumeration.
*/
enum Type {
MESSAGE_WINDOW,
TOKEN_WINDOW
}
}Configuration Example:
quarkus.langchain4j.chat-memory.type=MESSAGE_WINDOWUsage in BuildStep:
@BuildStep
void configureChatMemory(ChatMemoryBuildConfig config) {
ChatMemoryBuildConfig.Type memoryType = config.type();
switch (memoryType) {
case MESSAGE_WINDOW:
// Configure message window memory
break;
case TOKEN_WINDOW:
// Configure token window memory
break;
}
}Configuration groups are reusable configuration fragments that can be nested within root configurations.
Package: io.quarkiverse.langchain4j.deployment.config
Chat model provider configuration.
@ConfigGroup
public interface ChatModelConfig {
/**
* The chat model provider to use.
* Examples: "openai", "ollama", "huggingface", etc.
*/
Optional<String> provider();
}Configuration Example:
# Default chat model
quarkus.langchain4j.chat-model.provider=openai
# Named chat model
quarkus.langchain4j.my-model.chat-model.provider=ollamaUsage:
BaseConfig config = buildConfig.defaultConfig();
Optional<String> provider = config.chatModel().provider();
if (provider.isPresent()) {
String providerName = provider.get();
// Configure for this provider
}Package: io.quarkiverse.langchain4j.deployment.config
Embedding model provider configuration.
@ConfigGroup
public interface EmbeddingModelConfig {
/**
* The embedding model provider to use.
* Examples: "openai", "ollama", "huggingface", etc.
*/
Optional<String> provider();
}Configuration Example:
quarkus.langchain4j.embedding-model.provider=openai
quarkus.langchain4j.my-model.embedding-model.provider=huggingfacePackage: io.quarkiverse.langchain4j.deployment.config
Image model provider configuration.
@ConfigGroup
public interface ImageModelConfig {
/**
* The image model provider to use.
* Examples: "openai", "stability", etc.
*/
Optional<String> provider();
}Configuration Example:
quarkus.langchain4j.image-model.provider=openaiPackage: io.quarkiverse.langchain4j.deployment.config
Moderation model provider configuration.
@ConfigGroup
public interface ModerationModelConfig {
/**
* The moderation model provider to use.
* Examples: "openai", etc.
*/
Optional<String> provider();
}Configuration Example:
quarkus.langchain4j.moderation-model.provider=openaiPackage: io.quarkiverse.langchain4j.deployment.config
Scoring/rerank model provider configuration.
@ConfigGroup
public interface ScoringModelConfig {
/**
* The scoring/rerank model provider to use.
* Examples: "cohere", etc.
*/
Optional<String> provider();
}Configuration Example:
quarkus.langchain4j.scoring-model.provider=cohereLangChain4j supports both default and named model configurations:
# Default configuration (unnamed)
quarkus.langchain4j.chat-model.provider=openai
quarkus.langchain4j.embedding-model.provider=openai
# Named configuration
quarkus.langchain4j.customer-service.chat-model.provider=ollama
quarkus.langchain4j.customer-service.embedding-model.provider=huggingface
quarkus.langchain4j.analytics.chat-model.provider=openaiAccessing in BuildStep:
@BuildStep
void processConfigurations(LangChain4jBuildConfig config) {
// Default configuration
BaseConfig defaultConfig = config.defaultConfig();
processModelConfig("default", defaultConfig);
// Named configurations
Map<String, BaseConfig> namedConfigs = config.namedConfig();
for (Map.Entry<String, BaseConfig> entry : namedConfigs.entrySet()) {
String name = entry.getKey();
BaseConfig modelConfig = entry.getValue();
processModelConfig(name, modelConfig);
}
}
void processModelConfig(String name, BaseConfig config) {
config.chatModel().provider().ifPresent(provider -> {
// Process chat model provider for this config
});
config.embeddingModel().provider().ifPresent(provider -> {
// Process embedding model provider for this config
});
}Use configuration to determine which provider to use:
@BuildStep
void selectProvider(
LangChain4jBuildConfig config,
List<ChatModelProviderCandidateBuildItem> candidates,
BuildProducer<SelectedChatModelProviderBuildItem> selected) {
// Get configured provider
Optional<String> configuredProvider =
config.defaultConfig().chatModel().provider();
if (configuredProvider.isPresent()) {
String provider = configuredProvider.get();
// Verify provider is registered
boolean isRegistered = candidates.stream()
.anyMatch(c -> provider.equals(c.getProvider()));
if (isRegistered) {
selected.produce(new SelectedChatModelProviderBuildItem(
provider, "default"
));
} else {
throw new IllegalStateException(
"Provider '" + provider + "' not found"
);
}
}
}Check DevServices configuration to conditionally start services:
@BuildStep(onlyIfNot = IsNormal.class) // Only in dev/test mode
void maybeStartDevServices(
LangChain4jBuildConfig config,
BuildProducer<DevServicesResultBuildItem> devServices) {
DevServicesConfig devServicesConfig = config.devservices();
if (!devServicesConfig.enabled()) {
// DevServices disabled
return;
}
int port = devServicesConfig.port();
boolean preload = devServicesConfig.preload();
// Start DevServices container
// ...
}Process all model types in a single BuildStep:
@BuildStep
void configureAllModels(LangChain4jBuildConfig config) {
BaseConfig baseConfig = config.defaultConfig();
// Chat model
baseConfig.chatModel().provider().ifPresent(provider -> {
configureChatModel(provider);
});
// Embedding model
baseConfig.embeddingModel().provider().ifPresent(provider -> {
configureEmbeddingModel(provider);
});
// Image model
baseConfig.imageModel().provider().ifPresent(provider -> {
configureImageModel(provider);
});
// Moderation model
baseConfig.moderationModel().provider().ifPresent(provider -> {
configureModerationModel(provider);
});
// Scoring model
baseConfig.scoringModel().provider().ifPresent(provider -> {
configureScoringModel(provider);
});
}Here's a comprehensive example showing all configuration options:
# Default model configuration
quarkus.langchain4j.chat-model.provider=openai
quarkus.langchain4j.embedding-model.provider=openai
quarkus.langchain4j.image-model.provider=openai
quarkus.langchain4j.moderation-model.provider=openai
# Named configuration for customer service
quarkus.langchain4j.customer-service.chat-model.provider=ollama
quarkus.langchain4j.customer-service.embedding-model.provider=huggingface
# Named configuration for analytics
quarkus.langchain4j.analytics.chat-model.provider=openai
quarkus.langchain4j.analytics.scoring-model.provider=cohere
# Chat memory configuration
quarkus.langchain4j.chat-memory.type=MESSAGE_WINDOW
# DevServices configuration
quarkus.langchain4j.devservices.enabled=true
quarkus.langchain4j.devservices.port=11434
quarkus.langchain4j.devservices.preload=true
# Response schema
quarkus.langchain4j.response-schema=trueComplete example showing how to use all configuration classes:
import io.quarkiverse.langchain4j.deployment.ChatMemoryBuildConfig;
import io.quarkiverse.langchain4j.deployment.config.LangChain4jBuildConfig;
import io.quarkus.deployment.annotations.BuildStep;
public class MyProcessor {
@BuildStep
void processConfiguration(
LangChain4jBuildConfig langchainConfig,
ChatMemoryBuildConfig memoryConfig) {
// Process default configuration
processBaseConfig("default", langchainConfig.defaultConfig());
// Process named configurations
langchainConfig.namedConfig().forEach((name, config) -> {
processBaseConfig(name, config);
});
// Process DevServices
LangChain4jBuildConfig.DevServicesConfig devServices =
langchainConfig.devservices();
if (devServices.enabled()) {
configureDevServices(devServices.port(), devServices.preload());
}
// Process chat memory
ChatMemoryBuildConfig.Type memoryType = memoryConfig.type();
configureChatMemory(memoryType);
// Check response schema setting
boolean responseSchema = langchainConfig.responseSchema();
}
private void processBaseConfig(String name, LangChain4jBuildConfig.BaseConfig config) {
config.chatModel().provider().ifPresent(provider -> {
System.out.println("Chat model for " + name + ": " + provider);
});
config.embeddingModel().provider().ifPresent(provider -> {
System.out.println("Embedding model for " + name + ": " + provider);
});
config.imageModel().provider().ifPresent(provider -> {
System.out.println("Image model for " + name + ": " + provider);
});
config.moderationModel().provider().ifPresent(provider -> {
System.out.println("Moderation model for " + name + ": " + provider);
});
config.scoringModel().provider().ifPresent(provider -> {
System.out.println("Scoring model for " + name + ": " + provider);
});
}
private void configureDevServices(int port, boolean preload) {
// DevServices setup
}
private void configureChatMemory(ChatMemoryBuildConfig.Type type) {
// Memory configuration
}
}Install with Tessl CLI
npx tessl i tessl/maven-io-quarkiverse-langchain4j--quarkus-langchain4j-core-deployment@1.7.0