AWS Bedrock integration for LangChain4j enabling Java applications to interact with various LLM providers through a unified interface
Process and respond to guardrail violations.
import dev.langchain4j.model.bedrock.BedrockChatResponseMetadata;
import dev.langchain4j.model.bedrock.GuardrailAssessmentSummary;
import dev.langchain4j.model.bedrock.GuardrailAssessment;
ChatResponse response = model.chat(request);
if (response.metadata() instanceof BedrockChatResponseMetadata metadata) {
GuardrailAssessmentSummary summary = metadata.guardrailAssessmentSummary();
if (summary != null) {
// Check input violations
List<GuardrailAssessment> inputAssessments = summary.inputAssessments();
// Check output violations
List<GuardrailAssessment> outputAssessments = summary.ouputAssessments();
}
}GuardrailAssessmentSummary summary = metadata.guardrailAssessmentSummary();
if (summary != null) {
// Input violations
for (GuardrailAssessment assessment : summary.inputAssessments()) {
GuardrailAssessment.Policy policy = assessment.policy();
GuardrailAssessment.Action action = assessment.action();
String name = assessment.name();
System.out.println("Input violation:");
System.out.println(" Policy: " + policy);
System.out.println(" Action: " + action);
System.out.println(" Name: " + name);
}
// Output violations
for (GuardrailAssessment assessment : summary.ouputAssessments()) {
System.out.println("Output violation:");
System.out.println(" Policy: " + assessment.policy());
System.out.println(" Action: " + assessment.action());
}
}for (GuardrailAssessment assessment : summary.inputAssessments()) {
if (assessment.action() == GuardrailAssessment.Action.BLOCKED) {
System.err.println("Input blocked by " + assessment.policy() + " policy");
System.err.println("Policy name: " + assessment.name());
// Show error to user
throw new IllegalArgumentException("Your input violates content policy");
}
}for (GuardrailAssessment assessment : summary.inputAssessments()) {
if (assessment.policy() == GuardrailAssessment.Policy.SENSITIVE) {
if (assessment.action() == GuardrailAssessment.Action.ANONYMIZED) {
System.out.println("PII detected and anonymized");
System.out.println("Policy: " + assessment.name());
// Response may contain: "My email is [EMAIL] and SSN is [SSN]"
}
}
}for (GuardrailAssessment assessment : summary.inputAssessments()) {
switch (assessment.policy()) {
case TOPIC:
if (assessment.action() == GuardrailAssessment.Action.BLOCKED) {
showUserMessage("I'm not able to discuss that topic.");
}
break;
case CONTENT:
if (assessment.action() == GuardrailAssessment.Action.BLOCKED) {
showUserMessage("Your message contains inappropriate content.");
}
break;
case WORD:
if (assessment.action() == GuardrailAssessment.Action.BLOCKED) {
showUserMessage("Please remove inappropriate language.");
}
break;
case SENSITIVE:
if (assessment.action() == GuardrailAssessment.Action.ANONYMIZED) {
showUserMessage("Personal information was detected and protected.");
} else if (assessment.action() == GuardrailAssessment.Action.BLOCKED) {
showUserMessage("Please don't share personal information.");
}
break;
case CONTEXT:
if (assessment.action() == GuardrailAssessment.Action.BLOCKED) {
showUserMessage("Response not grounded in provided context.");
}
break;
}
}import org.slf4j.Logger;
private void logAssessments(GuardrailAssessmentSummary summary) {
if (summary == null) return;
// Log input violations
for (GuardrailAssessment assessment : summary.inputAssessments()) {
logger.warn("Input violation: policy={}, action={}, name={}",
assessment.policy(), assessment.action(), assessment.name());
}
// Log output violations
for (GuardrailAssessment assessment : summary.ouputAssessments()) {
logger.warn("Output violation: policy={}, action={}, name={}",
assessment.policy(), assessment.action(), assessment.name());
}
}try {
ChatResponse response = model.chat(request);
// Always check for assessments
if (response.metadata() instanceof BedrockChatResponseMetadata metadata) {
GuardrailAssessmentSummary summary = metadata.guardrailAssessmentSummary();
handleAssessments(summary);
}
} catch (Exception e) {
// Guardrail blocking may throw exceptions
if (e.getMessage().contains("guardrail")) {
System.err.println("Content blocked by guardrail");
// Show user-friendly error
} else {
throw e;
}
}private void showGuardrailError(GuardrailAssessment assessment) {
String userMessage = switch (assessment.policy()) {
case TOPIC -> "I'm not able to discuss that topic.";
case CONTENT -> "Your message contains inappropriate content.";
case WORD -> "Please rephrase without inappropriate language.";
case SENSITIVE -> "Please don't share personal information.";
case CONTEXT -> "I can only provide information based on the given context.";
};
// Display to user (UI, console, log, etc.)
System.out.println(userMessage);
}public class GuardrailHandler {
private final Logger logger = LoggerFactory.getLogger(GuardrailHandler.class);
public void handleResponse(ChatResponse response) {
if (!(response.metadata() instanceof BedrockChatResponseMetadata metadata)) {
return;
}
GuardrailAssessmentSummary summary = metadata.guardrailAssessmentSummary();
if (summary == null) {
return;
}
// Handle input violations
for (GuardrailAssessment assessment : summary.inputAssessments()) {
logViolation("Input", assessment);
if (assessment.action() == GuardrailAssessment.Action.BLOCKED) {
throw new ContentViolationException(
"Input blocked by " + assessment.policy() + " policy"
);
}
}
// Handle output violations
for (GuardrailAssessment assessment : summary.ouputAssessments()) {
logViolation("Output", assessment);
if (assessment.action() == GuardrailAssessment.Action.BLOCKED) {
throw new ContentViolationException(
"Output blocked by " + assessment.policy() + " policy"
);
}
}
}
private void logViolation(String direction, GuardrailAssessment assessment) {
logger.warn("{} violation: policy={}, action={}, name={}",
direction, assessment.policy(), assessment.action(), assessment.name());
}
}Related: