tessl install tessl/maven-io-quarkiverse-langchain4j--quarkus-langchain4j-core@1.5.0Quarkus LangChain4j Core provides runtime integration for LangChain4j with the Quarkus framework, enabling declarative AI service creation through CDI annotations.
Content Annotations enable multimodal interactions by marking parameters as specific content types (audio, image, video, PDF) in AI service methods.
Marks a parameter as audio content.
// Package: io.quarkiverse.langchain4j
/**
* Mark parameter as audio content.
* Supported parameter types: String, URL, URI, Audio.
*/
@Target(ElementType.PARAMETER)
@Retention(RetentionPolicy.RUNTIME)
public @interface AudioUrl {
}Usage Example:
import io.quarkiverse.langchain4j.RegisterAiService;
import io.quarkiverse.langchain4j.AudioUrl;
import dev.langchain4j.service.UserMessage;
@RegisterAiService
public interface AudioAssistant {
@UserMessage("Transcribe this audio")
String transcribe(@AudioUrl String audioUrl);
@UserMessage("What is discussed in this audio?")
String analyzeAudio(@AudioUrl URI audioUri);
}Marks a parameter as image content.
// Package: io.quarkiverse.langchain4j
/**
* Mark parameter as image content.
* Supported parameter types: String, URL, URI, Image.
*/
@Target(ElementType.PARAMETER)
@Retention(RetentionPolicy.RUNTIME)
public @interface ImageUrl {
}Usage Example:
import io.quarkiverse.langchain4j.RegisterAiService;
import io.quarkiverse.langchain4j.ImageUrl;
import dev.langchain4j.service.UserMessage;
@RegisterAiService(modelName = "gpt-4-vision")
public interface VisionAssistant {
@UserMessage("Describe what you see in this image")
String describeImage(@ImageUrl String imageUrl);
@UserMessage("What objects are in this image?")
List<String> detectObjects(@ImageUrl URL imageUrl);
@UserMessage("Is there a {object} in this image?")
boolean containsObject(@ImageUrl URI imageUri, String object);
}Marks a parameter as video content.
// Package: io.quarkiverse.langchain4j
/**
* Mark parameter as video content.
* Supported parameter types: String, URL, URI, Video.
*/
@Target(ElementType.PARAMETER)
@Retention(RetentionPolicy.RUNTIME)
public @interface VideoUrl {
}Usage Example:
import io.quarkiverse.langchain4j.RegisterAiService;
import io.quarkiverse.langchain4j.VideoUrl;
import dev.langchain4j.service.UserMessage;
@RegisterAiService
public interface VideoAssistant {
@UserMessage("Summarize this video")
String summarizeVideo(@VideoUrl String videoUrl);
@UserMessage("What actions occur in this video?")
List<String> detectActions(@VideoUrl URI videoUri);
}Marks a parameter as PDF content.
// Package: io.quarkiverse.langchain4j
/**
* Mark parameter as PDF content.
* Supported parameter types: String, URL, URI, PdfFile.
*/
@Target(ElementType.PARAMETER)
@Retention(RetentionPolicy.RUNTIME)
public @interface PdfUrl {
}Usage Example:
import io.quarkiverse.langchain4j.RegisterAiService;
import io.quarkiverse.langchain4j.PdfUrl;
import dev.langchain4j.service.UserMessage;
@RegisterAiService
public interface DocumentAssistant {
@UserMessage("Summarize this document")
String summarizeDocument(@PdfUrl String pdfUrl);
@UserMessage("Extract key points from this PDF")
List<String> extractKeyPoints(@PdfUrl URL pdfUrl);
@UserMessage("Find information about {topic} in this document")
String findInDocument(@PdfUrl URI pdfUri, String topic);
}All content annotations support these parameter types:
import io.quarkiverse.langchain4j.RegisterAiService;
import io.quarkiverse.langchain4j.ImageUrl;
import dev.langchain4j.service.UserMessage;
import java.net.URL;
@RegisterAiService
public interface MultiImageAssistant {
@UserMessage("Compare these two images and describe the differences")
String compareImages(
@ImageUrl String image1Url,
@ImageUrl String image2Url
);
@UserMessage("Which image contains {object}?")
String findObjectInImages(
@ImageUrl URL image1,
@ImageUrl URL image2,
@ImageUrl URL image3,
String object
);
}import io.quarkiverse.langchain4j.RegisterAiService;
import io.quarkiverse.langchain4j.ImageUrl;
import io.quarkiverse.langchain4j.PdfUrl;
import io.quarkiverse.langchain4j.AudioUrl;
import io.quarkiverse.langchain4j.VideoUrl;
import dev.langchain4j.service.UserMessage;
import java.net.URI;
@RegisterAiService
public interface MultimodalAssistant {
@UserMessage("Describe the relationship between this image and document")
String analyzeImageAndDocument(
@ImageUrl String imageUrl,
@PdfUrl String documentUrl
);
@UserMessage("Does the audio match the video content?")
boolean checkAudioVideoSync(
@AudioUrl URI audioUri,
@VideoUrl URI videoUri
);
}import io.quarkiverse.langchain4j.RegisterAiService;
import io.quarkiverse.langchain4j.ImageUrl;
import dev.langchain4j.service.UserMessage;
@RegisterAiService
public interface ContextualVisionAssistant {
@UserMessage("""
Context: {context}
Based on this context, analyze the image and answer: {question}
""")
String analyzeWithContext(
@ImageUrl String imageUrl,
String context,
String question
);
}Content annotations can also be used in tool methods:
import dev.langchain4j.agent.tool.Tool;
import io.quarkiverse.langchain4j.ImageUrl;
import java.net.URL;
import java.util.List;
public class ImageAnalysisTool {
@Tool("Analyze an image and extract text")
public String extractText(@ImageUrl String imageUrl) {
// OCR implementation
return performOcr(imageUrl);
}
@Tool("Detect faces in an image")
public List<String> detectFaces(@ImageUrl URL imageUrl) {
// Face detection implementation
return List.of("face1", "face2");
}
private String performOcr(String imageUrl) {
// OCR logic
return "extracted text";
}
}The framework automatically handles content loading:
import io.quarkiverse.langchain4j.RegisterAiService;
import io.quarkiverse.langchain4j.ImageUrl;
import dev.langchain4j.service.UserMessage;
@RegisterAiService
public interface SmartAssistant {
// Framework loads image from URL and passes to model
@UserMessage("What's in this image?")
String analyzeImage(@ImageUrl String imageUrl);
// Works with local file paths too
@UserMessage("Analyze this local image")
String analyzeLocalImage(@ImageUrl String filePath); // e.g., "file:///path/to/image.jpg"
}Content annotation support depends on the model being used:
Example with Multiple Models:
import io.quarkiverse.langchain4j.RegisterAiService;
import io.quarkiverse.langchain4j.ImageUrl;
import io.quarkiverse.langchain4j.AudioUrl;
import io.quarkiverse.langchain4j.PdfUrl;
@RegisterAiService(modelName = "gpt-4-vision")
public interface VisionService {
String analyzeImage(@ImageUrl String url);
}
@RegisterAiService(modelName = "whisper")
public interface AudioService {
String transcribe(@AudioUrl String url);
}
@RegisterAiService(modelName = "claude-3-opus")
public interface MultimodalService {
String analyze(@ImageUrl String imageUrl, @PdfUrl String docUrl);
}Handle cases where content can't be loaded:
import io.quarkiverse.langchain4j.RegisterAiService;
import io.quarkiverse.langchain4j.ImageUrl;
import jakarta.enterprise.context.ApplicationScoped;
import jakarta.inject.Inject;
@ApplicationScoped
public class SafeImageAnalyzer {
@Inject
VisionAssistant assistant;
public String safeAnalyze(String imageUrl) {
try {
return assistant.analyzeImage(imageUrl);
} catch (Exception e) {
// Handle content loading errors
if (e.getMessage().contains("404")) {
return "Image not found at URL: " + imageUrl;
} else if (e.getMessage().contains("invalid format")) {
return "Invalid image format";
}
throw e;
}
}
}
@RegisterAiService
interface VisionAssistant {
String analyzeImage(@ImageUrl String url);
}Content annotations are useful for: