Core classes and interfaces of LangChain4j providing foundational abstractions for LLM interaction, RAG, embeddings, agents, and observability
Package: dev.langchain4j.guardrail
Thread-Safety: Guardrail implementations should be thread-safe
Use Case: Input validation, output filtering, content moderation, safety checks
Guardrails provide a mechanism to validate, filter, and transform content at different stages of LLM interaction, ensuring safe and appropriate behavior.
Guardrails act as checkpoints in the LLM interaction pipeline:
package dev.langchain4j.guardrail;
import dev.langchain4j.data.message.ChatMessage;
/**
* Base interface for guardrails
* Thread-Safety: Implementations should be thread-safe
*/
public interface Guardrail {
/**
* Apply guardrail to messages
* @param messages List of chat messages to validate
* @return Guardrail result (allowed/blocked/transformed)
*/
GuardrailResult apply(List<ChatMessage> messages);
}package dev.langchain4j.guardrail;
/**
* Result of guardrail application
* Immutability: Immutable, thread-safe
*/
public class GuardrailResult {
private final boolean allowed;
private final String reason;
private final List<ChatMessage> transformedMessages;
/**
* Request/response is allowed
*/
public static GuardrailResult allowed() { /* ... */ }
/**
* Request/response is allowed with transformed messages
*/
public static GuardrailResult allowed(List<ChatMessage> transformedMessages) { /* ... */ }
/**
* Request/response is blocked
*/
public static GuardrailResult blocked(String reason) { /* ... */ }
public boolean isAllowed() { return allowed; }
public String reason() { return reason; }
public List<ChatMessage> transformedMessages() { return transformedMessages; }
}import dev.langchain4j.model.moderation.ModerationModel;
import dev.langchain4j.model.moderation.Moderation;
/**
* Uses moderation model to check content safety
*/
public class ModerationGuardrail implements Guardrail {
private final ModerationModel moderationModel;
public ModerationGuardrail(ModerationModel moderationModel) {
this.moderationModel = moderationModel;
}
@Override
public GuardrailResult apply(List<ChatMessage> messages) {
// Moderate all messages
Response<Moderation> response = moderationModel.moderate(messages);
Moderation moderation = response.content();
if (moderation.flagged()) {
return GuardrailResult.blocked(
"Content violates safety policies: " + moderation.flags()
);
}
return GuardrailResult.allowed();
}
}import java.util.regex.Pattern;
/**
* Redacts personally identifiable information
*/
public class PiiRedactionGuardrail implements Guardrail {
private static final Pattern EMAIL_PATTERN =
Pattern.compile("[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}");
private static final Pattern PHONE_PATTERN =
Pattern.compile("\\b\\d{3}[-.]?\\d{3}[-.]?\\d{4}\\b");
private static final Pattern SSN_PATTERN =
Pattern.compile("\\b\\d{3}-\\d{2}-\\d{4}\\b");
@Override
public GuardrailResult apply(List<ChatMessage> messages) {
List<ChatMessage> redacted = messages.stream()
.map(this::redactPii)
.collect(Collectors.toList());
return GuardrailResult.allowed(redacted);
}
private ChatMessage redactPii(ChatMessage message) {
if (message.text() == null) {
return message;
}
String text = message.text();
text = EMAIL_PATTERN.matcher(text).replaceAll("[EMAIL REDACTED]");
text = PHONE_PATTERN.matcher(text).replaceAll("[PHONE REDACTED]");
text = SSN_PATTERN.matcher(text).replaceAll("[SSN REDACTED]");
// Reconstruct message with redacted text
if (message instanceof UserMessage) {
return UserMessage.from(text);
} else if (message instanceof AiMessage) {
return AiMessage.from(text);
} else if (message instanceof SystemMessage) {
return SystemMessage.from(text);
}
return message;
}
}/**
* Enforces maximum message length
*/
public class LengthLimitGuardrail implements Guardrail {
private final int maxLength;
public LengthLimitGuardrail(int maxLength) {
this.maxLength = maxLength;
}
@Override
public GuardrailResult apply(List<ChatMessage> messages) {
for (ChatMessage message : messages) {
if (message.text() != null && message.text().length() > maxLength) {
return GuardrailResult.blocked(
"Message exceeds maximum length of " + maxLength + " characters"
);
}
}
return GuardrailResult.allowed();
}
}import java.util.Set;
/**
* Blocks messages containing forbidden keywords
*/
public class KeywordFilterGuardrail implements Guardrail {
private final Set<String> forbiddenKeywords;
private final boolean caseSensitive;
public KeywordFilterGuardrail(Set<String> forbiddenKeywords, boolean caseSensitive) {
this.forbiddenKeywords = caseSensitive
? forbiddenKeywords
: forbiddenKeywords.stream()
.map(String::toLowerCase)
.collect(Collectors.toSet());
this.caseSensitive = caseSensitive;
}
@Override
public GuardrailResult apply(List<ChatMessage> messages) {
for (ChatMessage message : messages) {
if (message.text() == null) continue;
String text = caseSensitive ? message.text() : message.text().toLowerCase();
for (String keyword : forbiddenKeywords) {
if (text.contains(keyword)) {
return GuardrailResult.blocked(
"Message contains forbidden content"
);
}
}
}
return GuardrailResult.allowed();
}
}/**
* Applies multiple guardrails in sequence
* Stops at first blocked result
*/
public class SequentialGuardrails implements Guardrail {
private final List<Guardrail> guardrails;
public SequentialGuardrails(Guardrail... guardrails) {
this.guardrails = Arrays.asList(guardrails);
}
@Override
public GuardrailResult apply(List<ChatMessage> messages) {
List<ChatMessage> currentMessages = messages;
for (Guardrail guardrail : guardrails) {
GuardrailResult result = guardrail.apply(currentMessages);
if (!result.isAllowed()) {
// First guardrail that blocks wins
return result;
}
// Use transformed messages for next guardrail
if (result.transformedMessages() != null) {
currentMessages = result.transformedMessages();
}
}
// All guardrails passed
return currentMessages == messages
? GuardrailResult.allowed()
: GuardrailResult.allowed(currentMessages);
}
}public class SafeChatService {
private final ChatModel chatModel;
private final Guardrail inputGuardrail;
private final Guardrail outputGuardrail;
public SafeChatService(ChatModel chatModel) {
this.chatModel = chatModel;
// Input validation: moderation + PII redaction + length limit
this.inputGuardrail = new SequentialGuardrails(
new ModerationGuardrail(moderationModel),
new PiiRedactionGuardrail(),
new LengthLimitGuardrail(10000)
);
// Output validation: moderation + formatting
this.outputGuardrail = new SequentialGuardrails(
new ModerationGuardrail(moderationModel),
new OutputFormattingGuardrail()
);
}
public String chat(String userInput) {
// 1. Validate input
UserMessage userMessage = UserMessage.from(userInput);
GuardrailResult inputResult = inputGuardrail.apply(List.of(userMessage));
if (!inputResult.isAllowed()) {
return "I cannot process that input: " + inputResult.reason();
}
// Use transformed messages if available (e.g., PII redacted)
List<ChatMessage> inputMessages = inputResult.transformedMessages() != null
? inputResult.transformedMessages()
: List.of(userMessage);
// 2. Get LLM response
ChatResponse response = chatModel.chat(inputMessages);
// 3. Validate output
GuardrailResult outputResult = outputGuardrail.apply(
List.of(response.aiMessage())
);
if (!outputResult.isAllowed()) {
return "I apologize, but I cannot provide that response.";
}
// Use transformed output if available
if (outputResult.transformedMessages() != null) {
return outputResult.transformedMessages().get(0).text();
}
return response.aiMessage().text();
}
}import com.google.common.util.concurrent.RateLimiter;
/**
* Rate limits requests per user
*/
public class RateLimitGuardrail implements Guardrail {
private final Map<String, RateLimiter> userLimiters = new ConcurrentHashMap<>();
private final double permitsPerSecond;
public RateLimitGuardrail(double permitsPerSecond) {
this.permitsPerSecond = permitsPerSecond;
}
@Override
public GuardrailResult apply(List<ChatMessage> messages) {
// Extract user ID from message metadata or context
String userId = extractUserId(messages);
RateLimiter limiter = userLimiters.computeIfAbsent(
userId,
k -> RateLimiter.create(permitsPerSecond)
);
if (!limiter.tryAcquire()) {
return GuardrailResult.blocked(
"Rate limit exceeded. Please try again later."
);
}
return GuardrailResult.allowed();
}
private String extractUserId(List<ChatMessage> messages) {
// Implementation depends on how user ID is tracked
return "default-user";
}
}/**
* Ensures conversation doesn't exceed context window
*/
public class ContextWindowGuardrail implements Guardrail {
private final int maxTokens;
private final Tokenizer tokenizer;
public ContextWindowGuardrail(int maxTokens, Tokenizer tokenizer) {
this.maxTokens = maxTokens;
this.tokenizer = tokenizer;
}
@Override
public GuardrailResult apply(List<ChatMessage> messages) {
int totalTokens = messages.stream()
.mapToInt(msg -> tokenizer.countTokens(msg.text()))
.sum();
if (totalTokens > maxTokens) {
// Truncate older messages
List<ChatMessage> truncated = truncateMessages(messages, maxTokens);
return GuardrailResult.allowed(truncated);
}
return GuardrailResult.allowed();
}
private List<ChatMessage> truncateMessages(List<ChatMessage> messages, int maxTokens) {
// Keep system message and recent messages
List<ChatMessage> result = new ArrayList<>();
if (!messages.isEmpty() && messages.get(0) instanceof SystemMessage) {
result.add(messages.get(0));
}
int tokens = result.isEmpty() ? 0 : tokenizer.countTokens(result.get(0).text());
// Add messages from end until we hit limit
for (int i = messages.size() - 1; i >= 0; i--) {
ChatMessage msg = messages.get(i);
int msgTokens = tokenizer.countTokens(msg.text());
if (tokens + msgTokens <= maxTokens) {
result.add(0, msg);
tokens += msgTokens;
} else {
break;
}
}
return result;
}
}/**
* Restricts LLM to specific topics using classifier
*/
public class TopicRestrictionGuardrail implements Guardrail {
private final ChatModel classifier;
private final Set<String> allowedTopics;
public TopicRestrictionGuardrail(ChatModel classifier, Set<String> allowedTopics) {
this.classifier = classifier;
this.allowedTopics = allowedTopics;
}
@Override
public GuardrailResult apply(List<ChatMessage> messages) {
// Get last user message
String userMessage = messages.stream()
.filter(m -> m instanceof UserMessage)
.reduce((first, second) -> second)
.map(ChatMessage::text)
.orElse("");
if (userMessage.isEmpty()) {
return GuardrailResult.allowed();
}
// Classify topic
String prompt = String.format("""
Classify the topic of this message.
Allowed topics: %s
Message: %s
Topic:
""", String.join(", ", allowedTopics), userMessage);
String topic = classifier.chat(prompt).toLowerCase();
if (!allowedTopics.stream().anyMatch(t -> topic.contains(t.toLowerCase()))) {
return GuardrailResult.blocked(
"This assistant is only available for topics: " +
String.join(", ", allowedTopics)
);
}
return GuardrailResult.allowed();
}
}// ✅ GOOD: Validate both input and output
GuardrailResult inputCheck = inputGuardrail.apply(userMessages);
// ... generate response ...
GuardrailResult outputCheck = outputGuardrail.apply(aiMessages);
// ❌ BAD: Only validate input
GuardrailResult inputCheck = inputGuardrail.apply(userMessages);
// No output validation - may return inappropriate content// ✅ GOOD: Safe default on error
@Override
public GuardrailResult apply(List<ChatMessage> messages) {
try {
return performCheck(messages);
} catch (Exception e) {
log.error("Guardrail error", e);
// Fail closed (block)
return GuardrailResult.blocked("Safety check failed");
}
}// ✅ GOOD: Helpful message
GuardrailResult.blocked("Your message is too long. Please keep it under 1000 characters.");
// ❌ BAD: Vague message
GuardrailResult.blocked("Blocked");// ✅ GOOD: Modular, reusable guardrails
Guardrail inputGuardrail = new SequentialGuardrails(
new RateLimitGuardrail(10.0),
new LengthLimitGuardrail(10000),
new ModerationGuardrail(moderationModel),
new PiiRedactionGuardrail()
);
// Each guardrail is testable independently// ✅ GOOD: Log for monitoring
GuardrailResult result = guardrail.apply(messages);
if (!result.isAllowed()) {
log.warn("Request blocked: {} - User: {}", result.reason(), userId);
metrics.increment("guardrail.blocked", "reason", result.reason());
}@Test
public void testPiiRedactionGuardrail() {
Guardrail guardrail = new PiiRedactionGuardrail();
UserMessage message = UserMessage.from(
"My email is john@example.com and phone is 555-123-4567"
);
GuardrailResult result = guardrail.apply(List.of(message));
assertTrue(result.isAllowed());
assertNotNull(result.transformedMessages());
String redacted = result.transformedMessages().get(0).text();
assertFalse(redacted.contains("john@example.com"));
assertFalse(redacted.contains("555-123-4567"));
assertTrue(redacted.contains("[EMAIL REDACTED]"));
assertTrue(redacted.contains("[PHONE REDACTED]"));
}| Pitfall | Solution |
|---|---|
| Only validating input | Validate both input and output |
| Throwing exceptions | Return GuardrailResult.blocked() |
| Not thread-safe | Use thread-safe data structures |
| Slow guardrails | Optimize or make async |
| No logging | Log blocked requests for monitoring |
| Vague block messages | Provide clear, actionable messages |
// For slow guardrails (e.g., external API calls)
CompletableFuture<GuardrailResult> future = CompletableFuture.supplyAsync(
() -> guardrail.apply(messages),
executor
);
GuardrailResult result = future.get(5, TimeUnit.SECONDS);// Cache guardrail results for identical inputs
LoadingCache<String, GuardrailResult> cache = Caffeine.newBuilder()
.maximumSize(1000)
.expireAfterWrite(10, TimeUnit.MINUTES)
.build(key -> guardrail.apply(parseMessages(key)));Install with Tessl CLI
npx tessl i tessl/maven-dev-langchain4j--langchain4j-core@1.11.0