CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/maven-io-quarkiverse-langchain4j--quarkus-langchain4j-openai-deployment

Quarkus extension deployment module for OpenAI integration with LangChain4j providing build-time processing and CDI bean generation

Overview
Eval results
Files

runtime-components.mddocs/

Runtime Components Reference

Runtime classes and APIs provided by this deployment module, primarily for advanced use cases and programmatic model configuration.

Capabilities

QuarkusOpenAiClient

Enhanced OpenAI HTTP client with Quarkus-specific features including TLS configuration, proxy support, and observability integration.

package io.quarkiverse.langchain4j.openai.common;

import dev.langchain4j.model.openai.spi.OpenAiClientBuilderFactory.SyncOrAsync;
import dev.langchain4j.model.openai.spi.OpenAiClientBuilderFactory.SyncOrAsyncOrStreaming;
import java.net.Proxy;
import java.time.Duration;
import java.util.Map;
import java.util.function.Supplier;

/**
 * Quarkus-enhanced OpenAI HTTP client.
 * Provides access to OpenAI REST API with Quarkus-specific enhancements.
 */
public class QuarkusOpenAiClient {

    /**
     * Builder for creating QuarkusOpenAiClient instances.
     * Supports all standard OpenAI client configuration plus Quarkus-specific features.
     */
    public static class Builder {

        /**
         * Set the OpenAI API base URL.
         * @param baseUrl Base URL (default: https://api.openai.com/v1/)
         * @return This builder
         */
        public Builder baseUrl(String baseUrl);

        /**
         * Set the OpenAI API key.
         * @param apiKey API key starting with "sk-"
         * @return This builder
         */
        public Builder openAiApiKey(String apiKey);

        /**
         * Set the organization ID.
         * @param organizationId OpenAI organization ID
         * @return This builder
         */
        public Builder organizationId(String organizationId);

        /**
         * Set the Quarkus configuration name.
         * Used for named model configurations.
         * @param configName Configuration name (e.g., "premium")
         * @return This builder
         */
        public Builder configName(String configName);

        /**
         * Set the Quarkus TLS configuration name.
         * References a TLS configuration from quarkus.tls namespace.
         * @param tlsConfigurationName TLS configuration name
         * @return This builder
         */
        public Builder tlsConfigurationName(String tlsConfigurationName);

        /**
         * Set the call timeout duration.
         * @param callTimeout Overall timeout for API calls
         * @return This builder
         */
        public Builder callTimeout(Duration callTimeout);

        /**
         * Set the connection timeout duration.
         * @param connectTimeout Timeout for establishing connections
         * @return This builder
         */
        public Builder connectTimeout(Duration connectTimeout);

        /**
         * Set the read timeout duration.
         * @param readTimeout Timeout for reading responses
         * @return This builder
         */
        public Builder readTimeout(Duration readTimeout);

        /**
         * Set the write timeout duration.
         * @param writeTimeout Timeout for writing requests
         * @return This builder
         */
        public Builder writeTimeout(Duration writeTimeout);

        /**
         * Enable request logging.
         * @param logRequests Whether to log requests
         * @return This builder
         */
        public Builder logRequests(boolean logRequests);

        /**
         * Enable response logging.
         * @param logResponses Whether to log responses
         * @return This builder
         */
        public Builder logResponses(boolean logResponses);

        /**
         * Enable streaming response logging.
         * @param logStreamingResponses Whether to log streaming responses
         * @return This builder
         */
        public Builder logStreamingResponses(boolean logStreamingResponses);

        /**
         * Enable cURL format request logging.
         * @param logCurl Whether to log requests in cURL format
         * @return This builder
         */
        public Builder logCurl(boolean logCurl);

        /**
         * Set HTTP proxy configuration.
         * @param proxy Proxy instance (HTTP, HTTPS, or SOCKS)
         * @return This builder
         */
        public Builder proxy(Proxy proxy);

        /**
         * Set custom user agent string.
         * @param userAgent User agent header value
         * @return This builder
         */
        public Builder userAgent(String userAgent);

        /**
         * Set custom headers supplier.
         * @param customHeadersSupplier Supplier of custom headers map
         * @return This builder
         */
        public Builder customHeadersSupplier(Supplier<Map<String, String>> customHeadersSupplier);

        /**
         * Build the QuarkusOpenAiClient instance.
         * @return Configured client instance
         */
        public QuarkusOpenAiClient build();
    }

    /**
     * Send a chat completion request.
     * @param request Chat completion request
     * @return Sync or async or streaming response
     */
    public SyncOrAsyncOrStreaming<ChatCompletionResponse> chatCompletion(ChatCompletionRequest request);

    /**
     * Send an embedding request.
     * @param request Embedding request
     * @return Sync or async response
     */
    public SyncOrAsync<EmbeddingResponse> embedding(EmbeddingRequest request);

    /**
     * Send a moderation request.
     * @param request Moderation request
     * @return Sync or async response
     */
    public SyncOrAsync<ModerationResponse> moderation(ModerationRequest request);

    /**
     * Send an image generation request.
     * @param request Image generation request
     * @return Sync or async response
     */
    public SyncOrAsync<ImagesGenerationResponse> imagesGeneration(ImagesGenerationRequest request);
}

Usage Example:

import io.quarkiverse.langchain4j.openai.common.QuarkusOpenAiClient;
import java.time.Duration;
import java.net.Proxy;
import java.net.InetSocketAddress;

public class CustomClientExample {

    public QuarkusOpenAiClient createClient() {
        Proxy proxy = new Proxy(
            Proxy.Type.HTTP,
            new InetSocketAddress("proxy.example.com", 8080)
        );

        return QuarkusOpenAiClient.builder()
            .baseUrl("https://api.openai.com/v1/")
            .openAiApiKey(System.getenv("OPENAI_API_KEY"))
            .organizationId("org-xxxxx")
            .configName("custom")
            .callTimeout(Duration.ofSeconds(30))
            .logRequests(true)
            .logCurl(true)
            .proxy(proxy)
            .build();
    }
}

OpenAiRecorder

Runtime recorder providing factory methods for creating model instances with Quarkus lifecycle integration.

package io.quarkiverse.langchain4j.openai.runtime;

import dev.langchain4j.model.chat.ChatLanguageModel;
import dev.langchain4j.model.chat.StreamingChatLanguageModel;
import dev.langchain4j.model.embedding.EmbeddingModel;
import dev.langchain4j.model.moderation.ModerationModel;
import dev.langchain4j.model.image.ImageModel;
import io.quarkus.arc.SyntheticCreationalContext;
import io.quarkus.runtime.ShutdownContext;
import java.util.function.Function;
import java.util.function.Supplier;

/**
 * Runtime recorder for creating OpenAI model instances.
 * Used internally by the deployment module to create CDI beans.
 */
@Recorder
public class OpenAiRecorder {

    /**
     * Create a chat model creation function.
     * @param configName Configuration name
     * @return Function that creates ChatLanguageModel instances
     */
    public Function<SyntheticCreationalContext<ChatLanguageModel>, ChatLanguageModel> chatModel(String configName);

    /**
     * Create a streaming chat model creation function.
     * @param configName Configuration name
     * @return Function that creates StreamingChatLanguageModel instances
     */
    public Function<SyntheticCreationalContext<StreamingChatLanguageModel>, StreamingChatLanguageModel> streamingChatModel(String configName);

    /**
     * Create an embedding model supplier.
     * @param configName Configuration name
     * @return Supplier that creates EmbeddingModel instances
     */
    public Supplier<EmbeddingModel> embeddingModel(String configName);

    /**
     * Create a moderation model supplier.
     * @param configName Configuration name
     * @return Supplier that creates ModerationModel instances
     */
    public Supplier<ModerationModel> moderationModel(String configName);

    /**
     * Create an image model supplier.
     * @param configName Configuration name
     * @return Supplier that creates ImageModel instances
     */
    public Supplier<ImageModel> imageModel(String configName);

    /**
     * Register cleanup handlers for application shutdown.
     * @param shutdownContext Quarkus shutdown context
     */
    public void cleanUp(ShutdownContext shutdownContext);
}

QuarkusOpenAiImageModel

Enhanced OpenAI image model with local persistence capabilities.

package io.quarkiverse.langchain4j.openai;

import dev.langchain4j.model.image.ImageModel;
import dev.langchain4j.model.output.Response;
import dev.langchain4j.data.image.Image;
import java.nio.file.Path;

/**
 * Quarkus-enhanced OpenAI image model.
 * Extends standard DALL-E functionality with local image persistence.
 */
public class QuarkusOpenAiImageModel implements ImageModel {

    /**
     * Generate a single image from a text prompt.
     * @param prompt Text description of the image to generate
     * @return Response containing the generated image
     */
    @Override
    public Response<Image> generate(String prompt);

    /**
     * Edit an existing image based on a text prompt.
     * @param image The image to edit
     * @param prompt Description of the desired changes
     * @return Response containing the edited image
     */
    @Override
    public Response<Image> edit(Image image, String prompt);

    /**
     * Builder for creating QuarkusOpenAiImageModel instances.
     */
    public static class Builder {

        /**
         * Enable persistence of generated images to disk.
         * @param persist Whether to persist images
         * @return This builder
         */
        public Builder persist(boolean persist);

        /**
         * Set the directory for persisted images.
         * @param persistDirectory Directory path for storing images
         * @return This builder
         */
        public Builder persistDirectory(Path persistDirectory);

        /**
         * Build the model instance.
         * @return Configured QuarkusOpenAiImageModel
         */
        public QuarkusOpenAiImageModel build();
    }
}

Persistence Behavior:

  • When persist=true, images are automatically saved to the configured directory
  • URL-based responses are downloaded and saved
  • Base64-encoded responses are decoded and saved
  • Filenames include timestamp for uniqueness
  • Persisted files are available for later access

Model Builder Factories

Service provider implementations that create Quarkus-enhanced model builders.

QuarkusOpenAiChatModelBuilderFactory

package io.quarkiverse.langchain4j.openai;

import dev.langchain4j.model.openai.OpenAiChatModel;
import dev.langchain4j.model.openai.spi.OpenAiChatModelBuilderFactory;

/**
 * Factory for creating Quarkus-enhanced chat model builders.
 * Registered via Java ServiceLoader mechanism.
 */
public class QuarkusOpenAiChatModelBuilderFactory implements OpenAiChatModelBuilderFactory {

    /**
     * Create a Quarkus-enhanced chat model builder.
     * @return Builder with Quarkus-specific configuration options
     */
    @Override
    public OpenAiChatModel.Builder get();
}

QuarkusOpenAiStreamingChatModelBuilderFactory

package io.quarkiverse.langchain4j.openai;

import dev.langchain4j.model.openai.OpenAiStreamingChatModel;
import dev.langchain4j.model.openai.spi.OpenAiStreamingChatModelBuilderFactory;

/**
 * Factory for creating Quarkus-enhanced streaming chat model builders.
 * Registered via Java ServiceLoader mechanism.
 */
public class QuarkusOpenAiStreamingChatModelBuilderFactory implements OpenAiStreamingChatModelBuilderFactory {

    /**
     * Create a Quarkus-enhanced streaming chat model builder.
     * @return Builder with Quarkus-specific configuration options
     */
    @Override
    public OpenAiStreamingChatModel.Builder get();
}

QuarkusOpenAiEmbeddingModelBuilderFactory

package io.quarkiverse.langchain4j.openai;

import dev.langchain4j.model.openai.OpenAiEmbeddingModel;
import dev.langchain4j.model.openai.spi.OpenAiEmbeddingModelBuilderFactory;

/**
 * Factory for creating Quarkus-enhanced embedding model builders.
 * Registered via Java ServiceLoader mechanism.
 */
public class QuarkusOpenAiEmbeddingModelBuilderFactory implements OpenAiEmbeddingModelBuilderFactory {

    /**
     * Create a Quarkus-enhanced embedding model builder.
     * @return Builder with Quarkus-specific configuration options
     */
    @Override
    public OpenAiEmbeddingModel.Builder get();
}

QuarkusOpenAiModerationModelBuilderFactory

package io.quarkiverse.langchain4j.openai;

import dev.langchain4j.model.openai.OpenAiModerationModel;
import dev.langchain4j.model.openai.spi.OpenAiModerationModelBuilderFactory;

/**
 * Factory for creating Quarkus-enhanced moderation model builders.
 * Registered via Java ServiceLoader mechanism.
 */
public class QuarkusOpenAiModerationModelBuilderFactory implements OpenAiModerationModelBuilderFactory {

    /**
     * Create a Quarkus-enhanced moderation model builder.
     * @return Builder with Quarkus-specific configuration options
     */
    @Override
    public OpenAiModerationModel.Builder get();
}

Service Provider Registration:

These factories are automatically registered via the Java ServiceLoader mechanism during native image builds. The deployment module produces ServiceProviderBuildItem instances to ensure they are available in GraalVM native executables.

Configuration Classes

Runtime configuration record classes for accessing configuration properties programmatically.

LangChain4jOpenAiConfig

package io.quarkiverse.langchain4j.openai.runtime.config;

import io.smallrye.config.ConfigMapping;
import io.smallrye.config.WithDefault;
import java.time.Duration;
import java.util.Map;
import java.util.Optional;

/**
 * Runtime configuration root for OpenAI integration.
 * Accessed via CDI injection or ConfigProvider.
 */
@ConfigMapping(prefix = "quarkus.langchain4j.openai")
public interface LangChain4jOpenAiConfig {

    /**
     * Get default configuration.
     */
    OpenAiConfig defaultConfig();

    /**
     * Get named configurations.
     * Key is the configuration name, value is the configuration.
     */
    Map<String, OpenAiConfig> namedConfig();
}

OpenAiConfig

/**
 * OpenAI configuration for a single instance (default or named).
 */
public interface OpenAiConfig {

    /**
     * OpenAI API base URL.
     */
    @WithDefault("https://api.openai.com/v1/")
    String baseUrl();

    /**
     * OpenAI API key.
     */
    String apiKey();

    /**
     * TLS configuration name.
     */
    Optional<String> tlsConfigurationName();

    /**
     * Organization ID.
     */
    Optional<String> organizationId();

    /**
     * Request timeout.
     */
    @WithDefault("10s")
    Duration timeout();

    /**
     * Maximum retry attempts.
     */
    @WithDefault("1")
    Integer maxRetries();

    /**
     * Enable request logging.
     */
    Optional<Boolean> logRequests();

    /**
     * Enable response logging.
     */
    Optional<Boolean> logResponses();

    /**
     * Enable cURL format logging.
     */
    Optional<Boolean> logRequestsCurl();

    /**
     * Enable OpenAI integration.
     */
    @WithDefault("true")
    Boolean enableIntegration();

    /**
     * Proxy type.
     */
    @WithDefault("HTTP")
    String proxyType();

    /**
     * Proxy host.
     */
    Optional<String> proxyHost();

    /**
     * Proxy port.
     */
    @WithDefault("3128")
    Integer proxyPort();

    /**
     * Chat model configuration.
     */
    ChatModelConfig chatModel();

    /**
     * Embedding model configuration.
     */
    EmbeddingModelConfig embeddingModel();

    /**
     * Moderation model configuration.
     */
    ModerationModelConfig moderationModel();

    /**
     * Image model configuration.
     */
    ImageModelConfig imageModel();
}

Usage Example:

import io.quarkiverse.langchain4j.openai.runtime.config.LangChain4jOpenAiConfig;
import jakarta.inject.Inject;

@ApplicationScoped
public class ConfigExample {

    @Inject
    LangChain4jOpenAiConfig config;

    public void printConfig() {
        OpenAiConfig defaultConfig = config.defaultConfig();
        System.out.println("Base URL: " + defaultConfig.baseUrl());
        System.out.println("Timeout: " + defaultConfig.timeout());

        // Access named configuration
        OpenAiConfig premiumConfig = config.namedConfig().get("premium");
        if (premiumConfig != null) {
            System.out.println("Premium model: " + premiumConfig.chatModel().modelName());
        }
    }
}

Cost Estimators

Built-in cost estimation for OpenAI API usage, providing token-based pricing calculations for different model variants.

package io.quarkiverse.langchain4j.openai.runtime.cost;

import dev.langchain4j.model.chat.listener.ChatModelListener;

/**
 * Cost estimators for OpenAI models.
 * Automatically integrated with ChatModelListener for usage tracking.
 */

/**
 * Cost estimator for GPT-4o model.
 * Input: $5.00 per 1M tokens
 * Output: $15.00 per 1M tokens
 */
public class BasicGpt4oCostEstimator implements CostEstimator {
    @Override
    public boolean supports(SupportsContext context);

    @Override
    public CostResult estimate(CostContext context);
}

/**
 * Cost estimator for GPT-4o-mini model.
 * Input: $0.15 per 1M tokens
 * Output: $0.60 per 1M tokens
 */
public class BasicGpt4oMiniCostEstimator implements CostEstimator {
    @Override
    public boolean supports(SupportsContext context);

    @Override
    public CostResult estimate(CostContext context);
}

/**
 * Cost estimator for O1-preview model.
 * Input: $15.00 per 1M tokens
 * Output: $60.00 per 1M tokens
 */
public class BasicO1PreviewCostEstimator implements CostEstimator {
    @Override
    public boolean supports(SupportsContext context);

    @Override
    public CostResult estimate(CostContext context);
}

/**
 * Cost estimator for O1-mini model.
 * Input: $3.00 per 1M tokens
 * Output: $12.00 per 1M tokens
 */
public class BasicO1MiniCostEstimator implements CostEstimator {
    @Override
    public boolean supports(SupportsContext context);

    @Override
    public CostResult estimate(CostContext context);
}

/**
 * Cost estimator for text-embedding-3-small model.
 * Input: $0.02 per 1M tokens
 */
public class BasicE3SmallCostEstimator implements CostEstimator {
    @Override
    public boolean supports(SupportsContext context);

    @Override
    public CostResult estimate(CostContext context);
}

/**
 * Cost estimator for text-embedding-3-large model.
 * Input: $0.13 per 1M tokens
 */
public class BasicE3BigCostEstimator implements CostEstimator {
    @Override
    public boolean supports(SupportsContext context);

    @Override
    public CostResult estimate(CostContext context);
}

Usage with ChatModelListener:

Cost estimators are automatically used when integrated with chat model listeners for tracking API costs:

import dev.langchain4j.model.chat.listener.ChatModelListener;
import dev.langchain4j.model.chat.listener.ChatModelRequest;
import dev.langchain4j.model.chat.listener.ChatModelResponse;
import io.quarkiverse.langchain4j.openai.runtime.cost.BasicGpt4oCostEstimator;
import jakarta.enterprise.context.ApplicationScoped;

@ApplicationScoped
public class CostTrackingListener implements ChatModelListener {

    private final CostEstimator costEstimator = new BasicGpt4oCostEstimator();
    private double totalCost = 0.0;

    @Override
    public void onResponse(ChatModelResponse response) {
        if (costEstimator.supports(response.context())) {
            CostResult cost = costEstimator.estimate(response.context());
            totalCost += cost.totalCost();
            System.out.println("Request cost: $" + cost.totalCost());
            System.out.println("Total cost: $" + totalCost);
        }
    }
}

Advanced Usage

Programmatic Model Creation

While CDI injection is recommended, models can be created programmatically for advanced use cases:

import dev.langchain4j.model.openai.OpenAiChatModel;
import io.quarkiverse.langchain4j.openai.QuarkusOpenAiChatModelBuilderFactory;
import java.time.Duration;

public class ProgrammaticExample {

    public ChatLanguageModel createCustomModel() {
        // Use the Quarkus-enhanced builder factory
        var factory = new QuarkusOpenAiChatModelBuilderFactory();
        var builder = factory.get();

        return builder
            .apiKey(System.getenv("OPENAI_API_KEY"))
            .modelName("gpt-4o")
            .temperature(0.7)
            .timeout(Duration.ofSeconds(30))
            .logRequests(true)
            .build();
    }
}

Accessing OpenAI REST API Directly

For advanced use cases requiring direct REST API access:

import io.quarkiverse.langchain4j.openai.common.QuarkusOpenAiClient;
import dev.langchain4j.model.openai.internal.api.ChatCompletionRequest;
import dev.langchain4j.model.openai.internal.api.ChatCompletionResponse;

public class DirectApiExample {

    public void callApiDirectly() {
        QuarkusOpenAiClient client = QuarkusOpenAiClient.builder()
            .openAiApiKey(System.getenv("OPENAI_API_KEY"))
            .logRequests(true)
            .build();

        ChatCompletionRequest request = ChatCompletionRequest.builder()
            .model("gpt-4o-mini")
            .addUserMessage("Hello, world!")
            .build();

        ChatCompletionResponse response = client.chatCompletion(request)
            .execute();

        System.out.println(response.choices().get(0).message().content());
    }
}

Install with Tessl CLI

npx tessl i tessl/maven-io-quarkiverse-langchain4j--quarkus-langchain4j-openai-deployment

docs

build-items.md

cdi-beans.md

configuration.md

dev-ui.md

index.md

runtime-components.md

tile.json