LangChain4j integration for Mistral AI providing chat completion, streaming, embedding, moderation, and code completion capabilities
Content moderation and safety classification for text and chat messages. Analyzes content across multiple harmful categories including sexual content, hate speech, harassment, violence, self-harm, dangerous content, PII, health, and legal content. Returns boolean flags and confidence scores for each category.
Analyze text or chat conversations for harmful or inappropriate content.
public class MistralAiModerationModel implements ModerationModel {
/**
* Public constructor for MistralAiModerationModel.
* Note: Unlike other model classes, this class takes a Builder in its constructor.
*
* @param builder Configured (non-null) Builder instance
*/
public MistralAiModerationModel(Builder builder) { ... }
/**
* Moderate a single text string.
*
* @param text Text (non-null) to analyze for harmful content
* @return Response containing Moderation result with category flags and scores
*/
public Response<Moderation> moderate(String text) { ... }
/**
* Moderate a conversation (list of chat messages).
*
* @param messages List (non-null) of ChatMessage objects to analyze
* @return Response containing Moderation result for the conversation
*/
public Response<Moderation> moderate(List<ChatMessage> messages) { ... }
}import dev.langchain4j.model.mistralai.MistralAiModerationModel;
import dev.langchain4j.model.mistralai.MistralAiChatModelName;
import dev.langchain4j.model.moderation.Moderation;
import dev.langchain4j.model.output.Response;
MistralAiModerationModel moderationModel = new MistralAiModerationModel.Builder()
.apiKey(System.getenv("MISTRAL_API_KEY"))
.modelName("mistral-moderation-latest") // String, not enum
.build();
String text = "This is a sample text to check for harmful content.";
Response<Moderation> response = moderationModel.moderate(text);
Moderation moderation = response.content();
if (moderation.flagged()) {
System.out.println("Content flagged as potentially harmful!");
} else {
System.out.println("Content appears safe.");
}import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.data.message.AiMessage;
import java.util.Arrays;
List<ChatMessage> conversation = Arrays.asList(
UserMessage.from("Hello, how are you?"),
AiMessage.from("I'm doing well, thank you! How can I help you today?"),
UserMessage.from("Can you help me with a programming question?")
);
Response<Moderation> response = moderationModel.moderate(conversation);
Moderation moderation = response.content();
System.out.println("Conversation flagged: " + moderation.flagged());String suspiciousText = "Example text that might contain harmful content";
Response<Moderation> response = moderationModel.moderate(suspiciousText);
Moderation moderation = response.content();
if (moderation.flagged()) {
System.out.println("Content flagged as potentially harmful!");
// Get flagged text if available
String flaggedText = moderation.flaggedText();
if (flaggedText != null) {
System.out.println("Flagged text: " + flaggedText);
}
}
// Note: The Moderation class only provides flagged() and flaggedText() methods.
// Detailed category scores (sexual, hate, violence, harassment, etc.) are available
// through internal MistralAiModerationResult APIs that are not exposed in the
// standard Moderation interface. For detailed category analysis, you would need
// to access internal implementation classes which is not recommended.public class ContentModerator {
private final MistralAiModerationModel moderationModel;
public ContentModerator(String apiKey) {
this.moderationModel = new MistralAiModerationModel.Builder()
.apiKey(apiKey)
.modelName(MistralAiChatModelName.MISTRAL_MODERATION_LATEST)
.build();
}
public ModerationResult checkContent(String content) {
Response<Moderation> response = moderationModel.moderate(content);
Moderation moderation = response.content();
if (!moderation.flagged()) {
return ModerationResult.safe(content);
}
// Content is flagged - block it
// Note: Detailed category scores are not available through the public API
return ModerationResult.blocked(content, moderation.flaggedText());
}
}
// Usage
ContentModerator moderator = new ContentModerator(apiKey);
ModerationResult result = moderator.checkContent(userInput);
if (result.isBlocked()) {
System.out.println("Content blocked due to: " + result.getViolations());
} else {
processContent(result.getContent());
}public class ChatModerationService {
private final MistralAiModerationModel moderationModel;
public boolean allowMessage(List<ChatMessage> conversationHistory,
String newMessage) {
// Add new message to history
List<ChatMessage> fullConversation = new ArrayList<>(conversationHistory);
fullConversation.add(UserMessage.from(newMessage));
// Moderate the conversation
Response<Moderation> response = moderationModel.moderate(fullConversation);
Moderation moderation = response.content();
if (moderation.flagged()) {
logViolation(newMessage, moderation);
return false;
}
return true;
}
private void logViolation(String message, Moderation moderation) {
logger.warn("Message flagged: {}", message);
logger.warn("Flagged text: {}", moderation.flaggedText());
}
}public class BatchContentModerator {
private final MistralAiModerationModel moderationModel;
public Map<String, Moderation> moderateBatch(List<String> texts) {
Map<String, Moderation> results = new HashMap<>();
for (String text : texts) {
try {
Response<Moderation> response = moderationModel.moderate(text);
results.put(text, response.content());
} catch (Exception e) {
logger.error("Failed to moderate text: {}", text, e);
}
}
return results;
}
public List<String> filterSafeContent(List<String> texts) {
return texts.stream()
.filter(text -> {
Response<Moderation> response = moderationModel.moderate(text);
return !response.content().flagged();
})
.collect(Collectors.toList());
}
}public static class Builder {
/**
* Set the Mistral AI API key (required).
*
* @param apiKey Your (non-null) Mistral AI API key
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public Builder apiKey(String apiKey) { ... }
/**
* Set the base URL for the Mistral AI API.
* Default: https://api.mistral.ai/v1
*
* @param baseUrl Custom (non-null) API endpoint URL
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public Builder baseUrl(String baseUrl) { ... }
/**
* Set the model name using string.
* Default: "mistral-moderation-latest"
* Note: The builder only accepts String, not enum.
*
* @param modelName Model (non-null) identifier string (e.g., "mistral-moderation-latest")
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public Builder modelName(String modelName) { ... }
/**
* Set request timeout.
* Default: 60 seconds
*
* @param timeout Duration (non-null) for request timeout
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public Builder timeout(Duration timeout) { ... }
/**
* Enable request logging.
*
* @param logRequests True (non-null) to log requests
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public Builder logRequests(Boolean logRequests) { ... }
/**
* Enable response logging.
*
* @param logResponses True (non-null) to log responses
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public Builder logResponses(Boolean logResponses) { ... }
/**
* Set custom SLF4J logger for logging.
*
* @param logger SLF (non-null)4J Logger instance
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public Builder logger(Logger logger) { ... }
/**
* Set maximum retry attempts on failure.
* Default: 2
*
* @param maxRetries Maximum (non-null) number of retries
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public Builder maxRetries(int maxRetries) { ... }
/**
* Set custom HTTP client builder.
*
* @param httpClientBuilder HttpClientBuilder (non-null) instance
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public Builder httpClientBuilder(HttpClientBuilder httpClientBuilder) { ... }
/**
* Build the MistralAiModerationModel instance.
*
* @return Configured MistralAiModerationModel
*/
public MistralAiModerationModel build() { ... }
}The moderation model analyzes content across these categories:
Content of a sexual nature, including explicit descriptions or adult content.
Hate speech, discriminatory language, or content promoting hatred toward individuals or groups based on protected characteristics.
Content that harasses, bullies, threatens, or intimidates individuals.
Content depicting, glorifying, or promoting violence, gore, or physical harm.
Content promoting, encouraging, or depicting self-harm or suicide.
Content related to dangerous activities, weapons, or illegal substances.
Content containing personal information like names, addresses, phone numbers, email addresses, or social security numbers.
Medical advice, health-related claims, or content that could impact health decisions.
Legal advice or content related to legal matters.
Check if content should be blocked based on moderation results:
Response<Moderation> response = moderationModel.moderate(text);
Moderation moderation = response.content();
if (moderation.flagged()) {
// Content is flagged - take appropriate action
blockContent();
logger.warn("Content blocked - flagged text: {}", moderation.flaggedText());
} else {
// Content is safe
processContent(text);
}
// Note: The flagged() method returns a boolean indicating if content is flagged.
// For detailed category-level control, you would need to access internal APIs.Apply consistent moderation policy:
public class PolicyBasedModerator {
private final MistralAiModerationModel model;
public PolicyBasedModerator(String apiKey) {
this.model = new MistralAiModerationModel.Builder()
.apiKey(apiKey)
.build();
}
public boolean isAllowed(String content) {
Response<Moderation> response = model.moderate(content);
Moderation moderation = response.content();
if (moderation.flagged()) {
logger.warn("Content blocked - flagged text: {}", moderation.flaggedText());
return false;
}
return true;
}
}
// Note: Category-specific policies (checking individual categories like violence,
// hate, sexual, etc.) require access to internal MistralAiModerationResult APIs
// which are not exposed through the standard Moderation interface.Collect user feedback to improve moderation:
public class FeedbackModerator {
private final MistralAiModerationModel model;
private final FalsePositiveTracker tracker;
public ModerationDecision moderate(String content, String userId) {
Response<Moderation> response = model.moderate(content);
Moderation moderation = response.content();
if (moderation.flagged()) {
// Log for review
String moderationId = logModeration(content, moderation, userId);
return ModerationDecision.blocked(moderationId);
}
return ModerationDecision.allowed();
}
public void reportFalsePositive(String moderationId, String userId) {
// Track false positives to adjust thresholds
tracker.recordFalsePositive(moderationId, userId);
}
}Use moderation alongside other safety mechanisms:
public class ComprehensiveSafetyCheck {
private final MistralAiModerationModel moderationModel;
private final SpamDetector spamDetector;
private final ProfanityFilter profanityFilter;
public SafetyResult checkContent(String content) {
// Check 1: Moderation API
Response<Moderation> modResponse = moderationModel.moderate(content);
if (modResponse.content().flagged()) {
return SafetyResult.blocked("Content moderation");
}
// Check 2: Spam detection
if (spamDetector.isSpam(content)) {
return SafetyResult.blocked("Spam");
}
// Check 3: Profanity filter
if (profanityFilter.containsProfanity(content)) {
return SafetyResult.warning("Profanity detected");
}
return SafetyResult.safe();
}
}Track moderation patterns and trends:
public class ModerationMetrics {
private final MistralAiModerationModel model;
private final MetricsCollector metrics;
public Moderation moderateWithMetrics(String content) {
long startTime = System.currentTimeMillis();
Response<Moderation> response = model.moderate(content);
Moderation moderation = response.content();
long duration = System.currentTimeMillis() - startTime;
// Record metrics
metrics.recordLatency("moderation", duration);
metrics.incrementCounter("moderation.total");
if (moderation.flagged()) {
metrics.incrementCounter("moderation.flagged");
} else {
metrics.incrementCounter("moderation.safe");
}
return moderation;
}
}Cache moderation results for identical content:
public class CachedModerationModel {
private final MistralAiModerationModel model;
private final Cache<String, Moderation> cache;
public CachedModerationModel(String apiKey, Duration cacheDuration) {
this.model = new MistralAiModerationModel.Builder()
.apiKey(apiKey)
.build();
this.cache = CacheBuilder.newBuilder()
.expireAfterWrite(cacheDuration)
.maximumSize(10000)
.build();
}
public Moderation moderate(String content) {
String key = hashContent(content);
try {
return cache.get(key, () -> {
Response<Moderation> response = model.moderate(content);
return response.content();
});
} catch (ExecutionException e) {
throw new RuntimeException("Moderation failed", e);
}
}
private String hashContent(String content) {
return DigestUtils.sha256Hex(content);
}
}// Filter user posts before publishing
public boolean canPublishPost(String postContent, String userId) {
Response<Moderation> response = moderationModel.moderate(postContent);
Moderation moderation = response.content();
if (moderation.flagged()) {
notifyUser(userId, "Your post contains content that violates our policies.");
logViolation(userId, postContent, moderation);
return false;
}
return true;
}// Real-time message filtering
public void onMessageReceived(String message, String senderId, String chatRoomId) {
Response<Moderation> response = moderationModel.moderate(message);
if (response.content().flagged()) {
// Don't deliver the message
warnUser(senderId, "Your message was blocked due to policy violations.");
alertModerators(chatRoomId, senderId, message);
} else {
broadcastMessage(chatRoomId, message, senderId);
}
}// Moderate user-generated content
public ContentStatus reviewContent(Content content) {
Response<Moderation> response = moderationModel.moderate(content.getText());
Moderation moderation = response.content();
if (!moderation.flagged()) {
return ContentStatus.APPROVED;
}
// Flag for human review
return ContentStatus.PENDING_REVIEW;
}// Ensure AI responses are appropriate
public String generateSafeResponse(String userQuery) {
// Generate response
ChatResponse aiResponse = chatModel.chat(
List.of(UserMessage.from(userQuery))
);
String responseText = aiResponse.content().text();
// Check if response is appropriate
Response<Moderation> moderation = moderationModel.moderate(responseText);
if (moderation.content().flagged()) {
// Regenerate with safety prompt or return fallback
return "I apologize, but I can't provide that information. How else can I help you?";
}
return responseText;
}Install with Tessl CLI
npx tessl i tessl/maven-dev-langchain4j--langchain4j-mistral-ai@1.11.0