Core classes and interfaces of LangChain4j providing foundational abstractions for LLM interaction, RAG, embeddings, agents, and observability
Package: dev.langchain4j.model.*
Thread-Safety: Implementation-dependent
LangChain4j supports various specialized model types beyond chat and embedding models for image generation, audio transcription, moderation, and scoring.
Package: dev.langchain4j.model.image
package dev.langchain4j.model.image;
import dev.langchain4j.data.image.Image;
import dev.langchain4j.model.output.Response;
/**
* Model for generating images from text prompts
* Thread-Safety: Implementation-dependent
*/
public interface ImageModel {
/**
* Generate image from text prompt
* @param prompt Text description of desired image (non-null)
* @return Response with generated image
*/
Response<Image> generate(String prompt);
/**
* Edit existing image based on prompt
* @param image Original image to edit (non-null)
* @param prompt Edit instructions (non-null)
* @return Response with edited image
*/
Response<Image> edit(Image image, String prompt);
}import dev.langchain4j.model.image.ImageModel;
import dev.langchain4j.data.image.Image;
ImageModel imageModel = /* provider-specific initialization */;
// Generate image
Response<Image> response = imageModel.generate(
"A serene landscape with mountains and a lake at sunset"
);
Image image = response.content();
// Save to file
image.saveAs("landscape.png");
// Get image URL (if available)
String url = image.url();
// Get image data
byte[] data = image.data();Package: dev.langchain4j.model.audio
package dev.langchain4j.model.audio;
import dev.langchain4j.data.audio.Audio;
import dev.langchain4j.model.output.Response;
/**
* Model for transcribing audio to text
* Thread-Safety: Implementation-dependent
*/
public interface AudioTranscriptionModel {
/**
* Transcribe audio to text
* @param audio Audio data (non-null)
* @return Response with transcribed text
*/
Response<String> transcribe(Audio audio);
}import dev.langchain4j.model.audio.AudioTranscriptionModel;
import dev.langchain4j.data.audio.Audio;
import java.nio.file.Path;
AudioTranscriptionModel transcriptionModel = /* provider-specific initialization */;
// Load audio file
Audio audio = Audio.from(Path.of("meeting-recording.mp3"));
// Transcribe
Response<String> response = transcriptionModel.transcribe(audio);
String transcript = response.content();
System.out.println("Transcript: " + transcript);Package: dev.langchain4j.model.moderation
package dev.langchain4j.model.moderation;
import dev.langchain4j.data.message.ChatMessage;
import dev.langchain4j.model.moderation.Moderation;
import dev.langchain4j.model.output.Response;
/**
* Model for content moderation and safety checks
* Thread-Safety: Implementation-dependent
*/
public interface ModerationModel {
/**
* Moderate single text
* @param text Text to moderate (non-null)
* @return Response with moderation results
*/
Response<Moderation> moderate(String text);
/**
* Moderate conversation messages
* @param messages List of chat messages (non-null)
* @return Response with moderation results
*/
Response<Moderation> moderate(List<ChatMessage> messages);
}package dev.langchain4j.model.moderation;
/**
* Moderation results with category flags and scores
* Immutability: Immutable, thread-safe
*/
public class Moderation {
/**
* Whether content was flagged
* @return true if content violates policies
*/
public boolean flagged() { /* ... */ }
/**
* Get score for specific category
* @param category Category name (e.g., "hate", "violence")
* @return Score 0.0-1.0 (higher = more likely violation)
*/
public double score(String category) { /* ... */ }
/**
* Get all category flags
* @return Map of category to flagged status
*/
public Map<String, Boolean> flags() { /* ... */ }
}import dev.langchain4j.model.moderation.ModerationModel;
import dev.langchain4j.model.moderation.Moderation;
ModerationModel moderationModel = /* provider-specific initialization */;
// Moderate text
String userInput = "Some user-generated content";
Response<Moderation> response = moderationModel.moderate(userInput);
Moderation moderation = response.content();
if (moderation.flagged()) {
// Content violates policies
System.err.println("Content flagged for moderation");
// Check specific categories
if (moderation.flags().get("hate")) {
System.err.println("Contains hate speech");
}
// Get scores
double hateScore = moderation.score("hate");
System.err.println("Hate score: " + hateScore);
// Reject content
throw new ContentViolationException("Content not allowed");
}
// Content is safe, proceed
processContent(userInput);import dev.langchain4j.guardrail.Guardrail;
import dev.langchain4j.guardrail.GuardrailResult;
// Use moderation as input guardrail
Guardrail inputGuardrail = (messages) -> {
Response<Moderation> response = moderationModel.moderate(messages);
Moderation moderation = response.content();
if (moderation.flagged()) {
return GuardrailResult.blocked("Content violates safety policies");
}
return GuardrailResult.allowed();
};Package: dev.langchain4j.model.scoring
package dev.langchain4j.model.scoring;
import dev.langchain4j.model.output.Response;
/**
* Model for scoring text relevance (e.g., reranking)
* Thread-Safety: Implementation-dependent
*/
public interface ScoringModel {
/**
* Score single text against query
* @param text Text to score (non-null)
* @param query Query or reference text (non-null)
* @return Response with relevance score
*/
Response<Double> score(String text, String query);
/**
* Score multiple texts against query (batch operation)
* @param texts List of texts to score (non-null)
* @param query Query or reference text (non-null)
* @return Response with list of scores (same order as texts)
*/
Response<List<Double>> scoreAll(List<String> texts, String query);
}import dev.langchain4j.model.scoring.ScoringModel;
ScoringModel scoringModel = /* provider-specific initialization */;
// Score relevance for reranking
String query = "How to configure authentication?";
List<String> documents = List.of(
"Authentication setup guide...",
"Database configuration...",
"User management API..."
);
// Batch scoring (more efficient)
Response<List<Double>> response = scoringModel.scoreAll(documents, query);
List<Double> scores = response.content();
// Rerank by score
List<ScoredDocument> scored = new ArrayList<>();
for (int i = 0; i < documents.size(); i++) {
scored.add(new ScoredDocument(documents.get(i), scores.get(i)));
}
// Sort by relevance
scored.sort((a, b) -> Double.compare(b.score(), a.score()));
// Return top results
List<String> topResults = scored.stream()
.limit(5)
.map(ScoredDocument::text)
.collect(Collectors.toList());import dev.langchain4j.rag.content.Content;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
// First retrieve with embedding search
List<Content> initialResults = contentRetriever.retrieve(query);
// Extract texts
List<String> texts = initialResults.stream()
.map(Content::textSegment)
.map(TextSegment::text)
.collect(Collectors.toList());
// Rerank with scoring model
Response<List<Double>> scores = scoringModel.scoreAll(texts, query);
// Combine and sort
List<ScoredContent> reranked = new ArrayList<>();
for (int i = 0; i < initialResults.size(); i++) {
reranked.add(new ScoredContent(initialResults.get(i), scores.content().get(i)));
}
reranked.sort((a, b) -> Double.compare(b.score(), a.score()));
// Use top reranked results
List<Content> finalResults = reranked.stream()
.limit(5)
.map(ScoredContent::content)
.collect(Collectors.toList());public class SafeChatService {
private final ChatModel chatModel;
private final ModerationModel moderationModel;
public String safeChat(String userMessage) {
// 1. Moderate input
Response<Moderation> inputModeration = moderationModel.moderate(userMessage);
if (inputModeration.content().flagged()) {
throw new UnsafeContentException("Input violates policies");
}
// 2. Generate response
String aiResponse = chatModel.chat(userMessage);
// 3. Moderate output
Response<Moderation> outputModeration = moderationModel.moderate(aiResponse);
if (outputModeration.content().flagged()) {
return "I apologize, but I cannot provide that response.";
}
return aiResponse;
}
}public Image generateWithFallback(String prompt) {
try {
Response<Image> response = imageModel.generate(prompt);
return response.content();
} catch (ContentFilteredException e) {
// Prompt triggered content filter
log.warn("Image generation blocked: " + e.getMessage());
return getDefaultImage();
} catch (Exception e) {
log.error("Image generation failed", e);
return getDefaultImage();
}
}public String transcribeAndClean(Audio audio) {
// Transcribe
Response<String> response = transcriptionModel.transcribe(audio);
String transcript = response.content();
// Post-process
transcript = removeFillerWords(transcript);
transcript = fixPunctuation(transcript);
transcript = correctCommonErrors(transcript);
return transcript;
}| Model Type | OpenAI | Anthropic | Azure | |
|---|---|---|---|---|
| ImageModel | ✅ DALL-E | ❌ | ✅ Imagen | ✅ |
| AudioTranscription | ✅ Whisper | ❌ | ✅ Speech-to-Text | ✅ |
| ModerationModel | ✅ | ❌ | ❌ | ✅ |
| ScoringModel | ❌ | ❌ | ✅ Vertex AI | ❌ |
// ✅ GOOD: Moderate before processing
if (moderationModel.moderate(userInput).content().flagged()) {
return "Content not allowed";
}// ✅ GOOD: Rerank for better relevance
List<Content> initial = embeddingSearch(query); // Fast, less precise
List<Content> reranked = scoringRerank(initial, query); // Slower, more precise// Check supported formats (typically MP3, WAV, M4A)
List<String> supportedFormats = List.of("mp3", "wav", "m4a", "flac");
String extension = getFileExtension(audioFile);
if (!supportedFormats.contains(extension.toLowerCase())) {
throw new UnsupportedFormatException("Audio format not supported: " + extension);
}Install with Tessl CLI
npx tessl i tessl/maven-dev-langchain4j--langchain4j-core@1.11.0