LangChain4j integration for Mistral AI providing chat completion, streaming, embedding, moderation, and code completion capabilities
Comprehensive reference for model name enumerations, response metadata classes, and API enumerations used throughout the langchain4j-mistral-ai library.
Predefined identifiers for Mistral AI Chat Modelss.
public enum MistralAiChatModelName {
/** 7B parameter open-source model - Fast and efficient */
OPEN_MISTRAL_7B("open-mistral-7b"),
/** 8x7B mixture-of-experts open-source model */
OPEN_MIXTRAL_8x7B("open-mixtral-8x7b"),
/** 8x22B mixture-of-experts open-source model - Larger variant */
OPEN_MIXTRAL_8X22B("open-mixtral-8x22b"),
/** Small production model - Latest version */
MISTRAL_SMALL_LATEST("mistral-small-latest"),
/** Medium production model - Latest version */
MISTRAL_MEDIUM_LATEST("mistral-medium-latest"),
/** Large production model - Best performance, latest version */
MISTRAL_LARGE_LATEST("mistral-large-latest"),
/** Magistral small production model - Latest version */
MAGISTRAL_SMALL_LATEST("magistral-small-latest"),
/** Magistral medium production model - Latest version */
MAGISTRAL_MEDIUM_LATEST("magistral-medium-latest"),
/** Specialized [Moderation Model](./moderation-model.md) model - Latest version */
MISTRAL_MODERATION_LATEST("mistral-moderation-latest"),
/** Open Mistral Nemo variant */
OPEN_MISTRAL_NEMO("open-mistral-nemo"),
/** Codestral model for code tasks - Latest version */
CODESTRAL_LATEST("codestral-latest");
/**
* Get the string identifier for the model.
*
* @return Model identifier string
*/
public String toString() { ... }
}Usage:
import dev.langchain4j.model.mistralai.MistralAiChatModelName;
// Use in chat model builder
MistralAiChatModel chatModel = MistralAiChatModel.builder()
.apiKey(apiKey)
.modelName(MistralAiChatModelName.MISTRAL_LARGE_LATEST)
.build();
// Get string value
String modelId = MistralAiChatModelName.MISTRAL_LARGE_LATEST.toString();
// Returns: "mistral-large-latest"Predefined identifiers for Mistral AI Embedding Models.
public enum MistralAiEmbeddingModelName {
/** Mistral embedding model - 1024 dimensions */
MISTRAL_EMBED("mistral-embed");
/**
* Get the string identifier for the model.
*
* @return Model identifier string
*/
public String toString() { ... }
}Usage:
import dev.langchain4j.model.mistralai.MistralAiEmbeddingModelName;
MistralAiEmbeddingModel embeddingModel = MistralAiEmbeddingModel.builder()
.apiKey(apiKey)
.modelName(MistralAiEmbeddingModelName.MISTRAL_EMBED)
.build();Predefined identifiers for Fill-In-Middle Code Completion models.
public enum MistralAiFimModelName {
/** Codestral for code completion - Latest version */
CODESTRAL_LATEST("codestral-latest"),
/** Open Codestral Mamba variant */
OPEN_CODESTRAL_MAMBA("open-codestral-mamba");
/**
* Get the string identifier for the model.
*
* @return Model identifier string
*/
public String toString() { ... }
}Usage:
import dev.langchain4j.model.mistralai.MistralAiFimModelName;
MistralAiFimModel fimModel = MistralAiFimModel.builder()
.apiKey(apiKey)
.modelName(MistralAiFimModelName.CODESTRAL_LATEST)
.build();Extended metadata for chat responses, including Mistral AI-specific information.
public class MistralAiChatResponseMetadata extends ChatResponseMetadata {
/**
* Get the raw HTTP response for debugging and auditing.
* Returns SuccessfulHttpResponse object containing status code, headers, and body.
*
* @return SuccessfulHttpResponse object containing response details
*/
public SuccessfulHttpResponse rawHttpResponse() { ... }
/**
* Get raw server-sent events for streaming responses.
* Returns List of ServerSentEvent objects received during streaming.
*
* @return List of ServerSentEvent objects (null for non-streaming responses)
*/
public List<ServerSentEvent> rawServerSentEvents() { ... }
/**
* Create a builder for modifying this metadata.
*
* @return Builder instance with current values
*/
public Builder toBuilder() { ... }
/**
* Create a new builder for constructing MistralAiChatResponseMetadata.
*
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public static Builder builder() { ... }
}Inherited from ChatResponseMetadata:
/**
* Get token usage information.
*
* @return TokenUsage with prompt, completion, and total token counts
*/
public TokenUsage tokenUsage() { ... }
/**
* Get the reason generation finished.
*
* @return FinishReason (STOP, LENGTH, TOOL_CALLS, CONTENT_FILTER, etc.)
*/
public FinishReason finishReason() { ... }Builder:
public static class Builder extends ChatResponseMetadata.Builder<Builder> {
/**
* Set the raw HTTP response.
*
* @param rawHttpResponse SuccessfulHttpResponse (non-null) object
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public Builder rawHttpResponse(SuccessfulHttpResponse rawHttpResponse) { ... }
/**
* Set the raw server-sent events.
*
* @param rawServerSentEvents List (non-null) of ServerSentEvent objects
* @return Builder instance
* @throws IllegalArgumentException if parameter validation fails
*/
public Builder rawServerSentEvents(List<ServerSentEvent> rawServerSentEvents) { ... }
/**
* Build the MistralAiChatResponseMetadata instance.
*
* @return Configured MistralAiChatResponseMetadata
*/
public MistralAiChatResponseMetadata build() { ... }
}Usage:
ChatResponse response = chatModel.chat(messages);
// Access standard metadata
TokenUsage usage = response.tokenUsage();
FinishReason reason = response.finishReason();
// Access Mistral-specific metadata
if (response.metadata() instanceof MistralAiChatResponseMetadata) {
MistralAiChatResponseMetadata metadata =
(MistralAiChatResponseMetadata) response.metadata();
SuccessfulHttpResponse rawHttp = metadata.rawHttpResponse();
List<ServerSentEvent> sseEvents = metadata.rawServerSentEvents();
// Use for debugging or auditing
logger.debug("Raw HTTP response: {}", rawHttp);
}Token usage information returned in API responses.
public class MistralAiUsage {
/**
* Get the number of tokens in the prompt.
*
* @return Prompt token count
*/
public Integer promptTokens() { ... }
/**
* Get the number of tokens in the completion.
*
* @return Completion token count
*/
public Integer completionTokens() { ... }
/**
* Get the total number of tokens used.
*
* @return Total token count (prompt + completion)
*/
public Integer totalTokens() { ... }
/**
* Create MistralAiUsage from LangChain4j TokenUsage.
*
* @param tokenUsage TokenUsage (non-null) from LangChain4j
* @return MistralAiUsage instance
*/
public static MistralAiUsage from(TokenUsage tokenUsage) { ... }
}Usage:
// From response metadata
ChatResponse response = chatModel.chat(messages);
TokenUsage usage = response.tokenUsage();
System.out.println("Prompt tokens: " + usage.inputTokenCount());
System.out.println("Completion tokens: " + usage.outputTokenCount());
System.out.println("Total tokens: " + usage.totalTokenCount());
// Cost estimation
double costPerToken = 0.0001;
double totalCost = usage.totalTokenCount() * costPerToken;
System.out.println("Estimated cost: $" + totalCost);Message roles in chat conversations.
public enum MistralAiRole {
/** User message role */
USER("user"),
/** AI assistant message role */
ASSISTANT("assistant"),
/** System message role for instructions */
SYSTEM("system");
/**
* Get the string identifier for the role.
*
* @return Role identifier string
*/
public String toString() { ... }
}Usage:
These are internal API types typically mapped automatically from LangChain4j message types:
// LangChain4j messages are automatically mapped to MistralAiRole
UserMessage.from("Hello") // → MistralAiRole.USER
AiMessage.from("Hi there") // → MistralAiRole.ASSISTANT
SystemMessage.from("You are...") // → MistralAiRole.SYSTEMTypes of tools available for function calling.
public enum MistralAiToolType {
/** Function tool type */
FUNCTION("function");
/**
* Get the string identifier for the tool type.
*
* @return Tool type identifier string
*/
public String toString() { ... }
}Usage:
// Typically used internally when defining tools
// LangChain4j ToolSpecification is automatically mapped to FUNCTION typeTool choice strategies for controlling when the model calls tools/functions.
public enum MistralAiToolChoiceName {
/** Model automatically decides when to call tools */
AUTO("auto"),
/** Model must call at least one tool */
ANY("any"),
/** Model cannot call any tools */
NONE("none");
/**
* Get the string identifier for the tool choice.
*
* @return Tool choice identifier string
*/
public String toString() { ... }
}Usage:
Tool choice is typically controlled through LangChain4j's API rather than directly:
// AUTO: Model decides (default behavior)
ChatRequestParameters params = ChatRequestParameters.builder()
.toolSpecifications(tools)
.build();
ChatRequest request = ChatRequest.builder()
.messages(messages)
.parameters(params)
.build();
ChatResponse response = chatModel.chat(request);
// ANY: Force tool call by providing tools
// Model will always attempt to call at least one tool
// NONE: Don't provide tools
ChatResponse response = chatModel.chat(messages);Response output format types.
public enum MistralAiResponseFormatType {
/** Plain text response format */
TEXT("text"),
/** JSON object response format */
JSON_OBJECT("json_object");
/**
* Get the string identifier for the response format type.
*
* @return Response format type identifier string
*/
public String toString() { ... }
}Usage:
import dev.langchain4j.model.mistralai.internal.api.MistralAiResponseFormat;
import dev.langchain4j.model.mistralai.internal.api.MistralAiResponseFormatType;
// Text response (default)
// No need to set explicitly
// JSON object response
MistralAiResponseFormat jsonFormat = MistralAiResponseFormat.builder()
.type(MistralAiResponseFormatType.JSON_OBJECT)
.build();
MistralAiChatModel chatModel = MistralAiChatModel.builder()
.apiKey(apiKey)
.responseFormat(jsonFormat)
.build();These types are part of the internal API and are primarily used for mapping between LangChain4j and Mistral AI formats. They are documented here for completeness but should not typically be instantiated directly by users.
Complete model metadata class with all methods.
public class MistralAiModelCard {
/**
* Get the model identifier.
*
* @return Model ID string (e.g., "mistral-large-latest")
*/
public String getId() { ... }
/**
* Get the object type (always "model").
*
* @return Object type string
*/
public String getObject() { ... }
/**
* Get the creation timestamp (Unix timestamp).
*
* @return Creation time as Unix timestamp (Integer, not Long)
*/
public Integer getCreated() { ... }
/**
* Get the owner/organization.
*
* @return Owner identifier
*/
public String getOwnerBy() { ... }
/**
* Get root model identifier.
*
* @return Root model string
*/
public String getRoot() { ... }
/**
* Get parent model identifier.
*
* @return Parent model string
*/
public String getParent() { ... }
/**
* Get model permissions.
*
* @return List of MistralAiModelPermission objects
*/
public List<MistralAiModelPermission> getPermission() { ... }
/**
* Create a builder for constructing MistralAiModelCard instances.
*
* @return MistralAiModelCardBuilder instance
*/
public static MistralAiModelCardBuilder builder() { ... }
/**
* Check equality with another object.
*
* @param obj Object (non-null) to compare with
* @return True if equal
*/
public boolean equals(Object obj) { ... }
/**
* Get hash code.
*
* @return Hash code value
*/
public int hashCode() { ... }
/**
* Get string representation.
*
* @return String representation of model card
*/
public String toString() { ... }
}Model permission information (used internally by model cards).
public class MistralAiModelPermission {
/**
* Get the permission identifier.
*
* @return Permission ID
*/
public String getId() { ... }
/**
* Get the object type.
*
* @return Object type string
*/
public String getObject() { ... }
/**
* Get the creation timestamp.
*
* @return Creation time as Unix timestamp (Integer, not Long)
*/
public Integer getCreated() { ... }
/**
* Check if organization can create engines.
*
* @return True if allowed
*/
public Boolean getAllowCreateEngine() { ... }
/**
* Check if sampling is allowed.
*
* @return True if allowed
*/
public Boolean getAllowSampling() { ... }
/**
* Check if logprobs are allowed.
*
* @return True if allowed
*/
public Boolean getAllowLogprobs() { ... }
/**
* Check if search indices are allowed.
*
* @return True if allowed
*/
public Boolean getAllowSearchIndices() { ... }
/**
* Check if view is allowed.
*
* @return True if allowed
*/
public Boolean getAllowView() { ... }
/**
* Check if fine-tuning is allowed.
*
* @return True if allowed
*/
public Boolean getAllowFineTuning() { ... }
/**
* Get the organization identifier.
*
* @return Organization ID
*/
public String getOrganization() { ... }
/**
* Get the group identifier.
*
* @return Group ID
*/
public String getGroup() { ... }
/**
* Check if this is a blocking permission.
*
* @return True if blocking
*/
public Boolean getIsBlocking() { ... }
/**
* Create a builder for constructing MistralAiModelPermission instances.
*
* @return MistralAiModelPermissionBuilder instance
*/
public static MistralAiModelPermissionBuilder builder() { ... }
/**
* Check equality with another object.
*
* @param obj Object (non-null) to compare with
* @return True if equal
*/
public boolean equals(Object obj) { ... }
/**
* Get hash code.
*
* @return Hash code value
*/
public int hashCode() { ... }
/**
* Get string representation.
*
* @return String representation
*/
public String toString() { ... }
}// Typically handled automatically by LangChain4j
UserMessage userMsg = UserMessage.from("Hello");
// Internally converted to MistralAiChatMessage with MistralAiTextContent
// Multimodal content
UserMessage multimodal = UserMessage.from(
TextContent.from("What's in this image?"),
ImageContent.from(image)
);
// Internally converted to MistralAiChatMessage with multiple content items// Define tools using LangChain4j API
// Note: JsonSchemaProperty is not available in this library
ToolSpecification weatherTool = ToolSpecification.builder()
.name("get_weather")
.description("Get weather for a location")
// Use appropriate ToolSpecification.builder() methods for parameters
.build();
// Internally converted to MistralAiTool with MistralAiFunction
ChatRequestParameters params = ChatRequestParameters.builder()
.toolSpecifications(weatherTool)
.build();
ChatRequest request = ChatRequest.builder()
.messages(messages)
.parameters(params)
.build();
ChatResponse response = chatModel.chat(request);
// Check for tool calls in response
if (response.aiMessage().hasToolExecutionRequests()) {
for (ToolExecutionRequest request : response.aiMessage().toolExecutionRequests()) {
// Tool execution request from MistralAiToolCall
String toolName = request.name();
String arguments = request.arguments();
}
}Response<Moderation> modResponse = moderationModel.moderate(text);
Moderation moderation = modResponse.content();
if (moderation.flagged()) {
// The Moderation class only provides flagged() and flaggedText() methods
System.out.println("Content flagged: " + moderation.flagged());
System.out.println("Flagged text: " + moderation.flaggedText());
// Note: Detailed category scores (from MistralAiCategories and MistralAiCategoryScores)
// are only available through internal MistralAiModerationResult APIs, not through
// the standard Moderation interface
}Always prefer enum constants over string literals for type safety:
// Good: Type-safe, refactor-friendly
.modelName(MistralAiChatModelName.MISTRAL_LARGE_LATEST)
// Acceptable: For custom or new models not yet in enum
.modelName("custom-model-name")
// Avoid: Error-prone
.modelName("mistral-large-latest") // Typos not caught at compile timeAlways check instance type before casting:
ChatResponse response = chatModel.chat(messages);
// Safe casting
if (response.metadata() instanceof MistralAiChatResponseMetadata mistralMetadata) {
String rawHttp = mistralMetadata.rawHttpResponse();
// Use Mistral-specific metadata
}
// Or use pattern matching (Java 16+)
if (response.metadata() instanceof MistralAiChatResponseMetadata(
var rawHttp,
var sseEvents)) {
// Use deconstructed values
}Always monitor token usage for cost control:
ChatResponse response = chatModel.chat(messages);
TokenUsage usage = response.tokenUsage();
// Log for monitoring
logger.info("Tokens used - Input: {}, Output: {}, Total: {}",
usage.inputTokenCount(),
usage.outputTokenCount(),
usage.totalTokenCount());
// Track costs
costTracker.recordUsage(usage.totalTokenCount());
// Alert on high usage
if (usage.totalTokenCount() > 10000) {
alerting.sendAlert("High token usage detected");
}Always check finish reason to understand response completion:
ChatResponse response = chatModel.chat(messages);
switch (response.finishReason()) {
case STOP -> {
// Normal completion
processResponse(response.content());
}
case LENGTH -> {
// Hit max tokens limit
logger.warn("Response truncated due to token limit");
handleTruncatedResponse(response.content());
}
case TOOL_CALLS -> {
// Model wants to call tools
executeTools(response.content().toolExecutionRequests());
}
case CONTENT_FILTER -> {
// Content was filtered
logger.warn("Response filtered by content policy");
handleFilteredContent();
}
default -> {
logger.warn("Unknown finish reason: {}", response.finishReason());
}
}Model Enums
├── MistralAiChatModelName (11 models)
├── MistralAiEmbeddingModelName (1 model)
└── MistralAiFimModelName (2 models)
API Enums
├── MistralAiRole (USER, ASSISTANT, SYSTEM)
├── MistralAiToolType (FUNCTION)
├── MistralAiToolChoiceName (AUTO, ANY, NONE)
└── MistralAiResponseFormatType (TEXT, JSON_OBJECT)
Response Metadata
├── MistralAiChatResponseMetadata
│ ├── rawHttpResponse: String
│ └── rawServerSentEvents: List<String>
└── MistralAiUsage
├── promptTokens: Integer
├── completionTokens: Integer
└── totalTokens: Integer
Internal API Types (35+ classes)
├── Requests (4 classes)
├── Responses (4 classes)
├── Messages (7 classes)
├── Tools (7 classes)
├── Models (3 classes)
├── Moderation (3 classes)
└── Embeddings (1 class)Install with Tessl CLI
npx tessl i tessl/maven-dev-langchain4j--langchain4j-mistral-ai