tessl install tessl/maven-io-quarkiverse-langchain4j--quarkus-langchain4j-core@1.5.0Quarkus LangChain4j Core provides runtime integration for LangChain4j with the Quarkus framework, enabling declarative AI service creation through CDI annotations.
Configuration capabilities enable customization of LangChain4j behavior through Microprofile Config, including logging, timeouts, temperature, guardrails, and tracing settings.
Main configuration interface for LangChain4j settings.
// Package: io.quarkiverse.langchain4j.runtime.config
/**
* Main configuration interface for Quarkus LangChain4j.
* Mapped to properties under quarkus.langchain4j prefix.
*/
@ConfigMapping(prefix = "quarkus.langchain4j")
public interface LangChain4jConfig {
/**
* Enable/disable request logging.
*/
Optional<Boolean> logRequests();
/**
* Enable/disable response logging.
*/
Optional<Boolean> logResponses();
/**
* Global timeout for model requests.
*/
Optional<Duration> timeout();
/**
* Global temperature setting for model requests.
* Typically ranges from 0.0 (deterministic) to 2.0 (creative).
*/
OptionalDouble temperature();
/**
* Guardrails configuration.
*/
GuardrailsConfig guardrails();
/**
* Tracing configuration.
*/
TracingConfig tracing();
}Configuration for guardrail behavior.
// Package: io.quarkiverse.langchain4j.runtime.config
/**
* Configuration for guardrails.
*/
public interface GuardrailsConfig {
/**
* Maximum retry attempts for guardrail failures.
* Default: 3
*/
int maxRetries();
}Configuration for OpenTelemetry tracing.
// Package: io.quarkiverse.langchain4j.runtime.config
/**
* Configuration for OpenTelemetry tracing.
*/
public interface TracingConfig {
/**
* Include prompt text in OpenTelemetry spans.
* May expose sensitive data in traces.
*/
Boolean includePrompt();
/**
* Include completion text in OpenTelemetry spans.
* May expose sensitive data in traces.
*/
Boolean includeCompletion();
/**
* Include tool arguments in OpenTelemetry spans.
* May expose sensitive data in traces.
*/
Boolean includeToolArguments();
/**
* Include tool results in OpenTelemetry spans.
* May expose sensitive data in traces.
*/
Boolean includeToolResult();
}Configuration for chat memory behavior.
// Package: io.quarkiverse.langchain4j.runtime.aiservice
/**
* Configuration for chat memory.
*/
public interface ChatMemoryConfig {
// Configuration properties for chat memory behavior
}# Request/Response Logging
quarkus.langchain4j.log-requests=true
quarkus.langchain4j.log-responses=true
# Global Timeout
quarkus.langchain4j.timeout=60s
# Global Temperature
quarkus.langchain4j.temperature=0.7
# Guardrails
quarkus.langchain4j.guardrails.max-retries=5
# Tracing (use with caution - may expose sensitive data)
quarkus.langchain4j.tracing.include-prompt=false
quarkus.langchain4j.tracing.include-completion=false
quarkus.langchain4j.tracing.include-tool-arguments=true
quarkus.langchain4j.tracing.include-tool-result=true# OpenAI GPT-4
quarkus.langchain4j.openai.gpt-4.api-key=${OPENAI_API_KEY}
quarkus.langchain4j.openai.gpt-4.model-name=gpt-4
quarkus.langchain4j.openai.gpt-4.temperature=0.7
quarkus.langchain4j.openai.gpt-4.timeout=60s
quarkus.langchain4j.openai.gpt-4.max-tokens=2000
quarkus.langchain4j.openai.gpt-4.log-requests=true
quarkus.langchain4j.openai.gpt-4.log-responses=true
# OpenAI GPT-3.5 Turbo
quarkus.langchain4j.openai.gpt-3-5-turbo.api-key=${OPENAI_API_KEY}
quarkus.langchain4j.openai.gpt-3-5-turbo.model-name=gpt-3.5-turbo
quarkus.langchain4j.openai.gpt-3-5-turbo.temperature=0.3
quarkus.langchain4j.openai.gpt-3-5-turbo.timeout=30s
quarkus.langchain4j.openai.gpt-3-5-turbo.max-tokens=1000
# Anthropic Claude
quarkus.langchain4j.anthropic.claude.api-key=${ANTHROPIC_API_KEY}
quarkus.langchain4j.anthropic.claude.model-name=claude-3-opus-20240229
quarkus.langchain4j.anthropic.claude.temperature=1.0
quarkus.langchain4j.anthropic.claude.max-tokens=4096# In-memory chat memory
quarkus.langchain4j.chat-memory.type=in-memory
quarkus.langchain4j.chat-memory.max-messages=100
# Redis chat memory
quarkus.langchain4j.chat-memory.type=redis
quarkus.langchain4j.chat-memory.redis.host=localhost
quarkus.langchain4j.chat-memory.redis.port=6379# In-memory embedding store
quarkus.langchain4j.embedding-store.type=in-memory
# Chroma embedding store
quarkus.langchain4j.embedding-store.type=chroma
quarkus.langchain4j.embedding-store.chroma.base-url=http://localhost:8000
# Redis embedding store
quarkus.langchain4j.embedding-store.type=redis
quarkus.langchain4j.embedding-store.redis.host=localhost
quarkus.langchain4j.embedding-store.redis.port=6379# Development settings
quarkus.langchain4j.log-requests=true
quarkus.langchain4j.log-responses=true
quarkus.langchain4j.timeout=120s
quarkus.langchain4j.temperature=0.8
# Use cheaper models in development
quarkus.langchain4j.openai.dev.model-name=gpt-3.5-turbo# Production settings
quarkus.langchain4j.log-requests=false
quarkus.langchain4j.log-responses=false
quarkus.langchain4j.timeout=30s
quarkus.langchain4j.temperature=0.5
# Use production-grade models
quarkus.langchain4j.openai.prod.model-name=gpt-4import io.quarkiverse.langchain4j.runtime.config.LangChain4jConfig;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
import org.eclipse.microprofile.config.inject.ConfigProperty;
import java.time.Duration;
import java.util.Optional;
@ApplicationScoped
public class ConfigConsumer {
@Inject
LangChain4jConfig config;
@ConfigProperty(name = "quarkus.langchain4j.temperature")
Optional<Double> temperature;
public void printConfig() {
System.out.println("Log requests: " + config.logRequests().orElse(false));
System.out.println("Timeout: " + config.timeout().orElse(Duration.ofSeconds(30)));
System.out.println("Temperature: " + config.temperature().orElse(0.7));
System.out.println("Max retries: " + config.guardrails().maxRetries());
}
}import org.eclipse.microprofile.config.ConfigProvider;
public class DynamicConfig {
public void loadConfig(String modelName) {
String apiKey = ConfigProvider.getConfig()
.getValue("quarkus.langchain4j." + modelName + ".api-key", String.class);
Double temperature = ConfigProvider.getConfig()
.getOptionalValue("quarkus.langchain4j." + modelName + ".temperature", Double.class)
.orElse(0.7);
}
}Use Quarkus profiles for different configurations:
# Default configuration
quarkus.langchain4j.temperature=0.7
# Development profile
%dev.quarkus.langchain4j.temperature=1.0
%dev.quarkus.langchain4j.log-requests=true
# Test profile
%test.quarkus.langchain4j.temperature=0.0
%test.quarkus.langchain4j.timeout=5s
# Production profile
%prod.quarkus.langchain4j.temperature=0.5
%prod.quarkus.langchain4j.log-requests=false# Enable metrics collection
quarkus.micrometer.enabled=true
quarkus.micrometer.binder.langchain4j.enabled=true
# Metrics export
quarkus.micrometer.export.prometheus.enabled=true# Enable OpenTelemetry tracing
quarkus.otel.enabled=true
quarkus.otel.traces.enabled=true
# Configure what to include in traces
quarkus.langchain4j.tracing.include-prompt=false
quarkus.langchain4j.tracing.include-completion=false
quarkus.langchain4j.tracing.include-tool-arguments=true
quarkus.langchain4j.tracing.include-tool-result=true
# OTLP exporter
quarkus.otel.exporter.otlp.endpoint=http://localhost:4317# Use environment variables for secrets
quarkus.langchain4j.openai.api-key=${OPENAI_API_KEY}
quarkus.langchain4j.anthropic.api-key=${ANTHROPIC_API_KEY}
# Or use configuration encryption
quarkus.langchain4j.openai.api-key=${enc:ENCRYPTED_VALUE}# Disable sensitive data in traces for production
%prod.quarkus.langchain4j.tracing.include-prompt=false
%prod.quarkus.langchain4j.tracing.include-completion=false
%prod.quarkus.langchain4j.tracing.include-tool-arguments=false
%prod.quarkus.langchain4j.tracing.include-tool-result=false
# Enable for debugging in development
%dev.quarkus.langchain4j.tracing.include-prompt=true
%dev.quarkus.langchain4j.tracing.include-completion=trueConfiguration is essential for: