CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/maven-dev-langchain4j--langchain4j-mistral-ai

LangChain4j integration for Mistral AI providing chat completion, streaming, embedding, moderation, and code completion capabilities

Overview
Eval results
Files

code-completion.mddocs/

Code Completion

Fill-in-the-middle (FIM) code completion capabilities for IDE integrations and code generation tasks. Mistral's Codestral models specialize in understanding code context with prefix and suffix to generate appropriate completions, making them ideal for autocomplete, code suggestion, and inline code generation features.

Capabilities

Synchronous Code Completion

Generate code completions in a single blocking API call.

public class MistralAiFimModel implements LanguageModel {
    /**
     * Create a new builder for configuring MistralAiFimModel.
     *
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public static Builder builder() { ... }

    /**
     * Generate code completion from a prompt.
     *
     * @param prompt Code (non-null) prefix (context before cursor)
     * @return Response containing generated code string
     */
    public Response<String> generate(String prompt) { ... }

    /**
     * Generate fill-in-the-middle code completion.
     *
     * @param prompt Code (non-null) prefix (context before cursor)
     * @param suffix Code (non-null) suffix (context after cursor)
     * @return Response containing generated code to fill the middle
     */
    public Response<String> generate(String prompt, String suffix) { ... }
}

Streaming Code Completion

Generate code completions with real-time token streaming for progressive display in IDEs.

public class MistralAiStreamingFimModel implements StreamingLanguageModel {
    /**
     * Create a new builder for configuring MistralAiStreamingFimModel.
     *
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public static Builder builder() { ... }

    /**
     * Generate streaming code completion from a prompt.
     *
     * @param prompt Code (non-null) prefix (context before cursor)
     * @param handler StreamingResponseHandler (non-null) to receive tokens
     */
    public void generate(String prompt, StreamingResponseHandler<String> handler) { ... }

    /**
     * Generate streaming fill-in-the-middle code completion.
     *
     * @param prompt Code (non-null) prefix (context before cursor)
     * @param suffix Code (non-null) suffix (context after cursor)
     * @param handler StreamingResponseHandler (non-null) to receive tokens
     */
    public void generate(String prompt, String suffix, StreamingResponseHandler<String> handler) { ... }
}

Usage Examples

Basic Code Completion

import dev.langchain4j.model.mistralai.MistralAiFimModel;
import dev.langchain4j.model.mistralai.MistralAiFimModelName;
import dev.langchain4j.model.output.Response;

MistralAiFimModel fimModel = MistralAiFimModel.builder()
    .apiKey(System.getenv("MISTRAL_API_KEY"))
    .modelName(MistralAiFimModelName.CODESTRAL_LATEST)
    .build();

String prompt = "def fibonacci(n):\n    ";
Response<String> response = fimModel.generate(prompt);
String completion = response.content();
System.out.println(completion);
// Possible output: "if n <= 1:\n        return n\n    return fibonacci(n-1) + fibonacci(n-2)"

Fill-in-the-Middle

Complete code with both prefix and suffix context:

String prefix = "public class Calculator {\n    public int add(int a, int b) {\n        ";
String suffix = "\n    }\n}";

Response<String> response = fimModel.generate(prefix, suffix);
String middle = response.content();
System.out.println("Generated: " + middle);
// Output: "return a + b;"

IDE Autocomplete Integration

public class CodeCompletionProvider {
    private final MistralAiFimModel fimModel;

    public CodeCompletionProvider(String apiKey) {
        this.fimModel = MistralAiFimModel.builder()
            .apiKey(apiKey)
            .modelName(MistralAiFimModelName.CODESTRAL_LATEST)
            .temperature(0.2)  // Lower temperature for more deterministic completions
            .maxTokens(100)     // Limit completion length
            .build();
    }

    public String getCompletion(String fileContent, int cursorPosition) {
        String prefix = fileContent.substring(0, cursorPosition);
        String suffix = fileContent.substring(cursorPosition);

        Response<String> response = fimModel.generate(prefix, suffix);
        return response.content();
    }
}

// Usage
CodeCompletionProvider provider = new CodeCompletionProvider(apiKey);
String fileContent = "def process_data(data):\n    result = []\n    for item in data:\n        ";
int cursor = fileContent.length();
String completion = provider.getCompletion(fileContent, cursor);
System.out.println(completion);

Streaming Completion for Real-time Display

import dev.langchain4j.model.mistralai.MistralAiStreamingFimModel;
import dev.langchain4j.model.StreamingResponseHandler;

MistralAiStreamingFimModel streamingFim = MistralAiStreamingFimModel.builder()
    .apiKey(System.getenv("MISTRAL_API_KEY"))
    .modelName(MistralAiFimModelName.CODESTRAL_LATEST)
    .build();

String prefix = "function calculateTotal(items) {\n    let total = 0;\n    ";
String suffix = "\n    return total;\n}";

StringBuilder completion = new StringBuilder();

streamingFim.generate(prefix, suffix, new StreamingResponseHandler<String>() {
    @Override
    public void onNext(String token) {
        completion.append(token);
        updateEditorDisplay(token);  // Update IDE UI progressively
    }

    @Override
    public void onComplete(Response<String> response) {
        System.out.println("\nCompletion finished");
        System.out.println("Full completion: " + completion.toString());
        System.out.println("Tokens: " + response.tokenUsage().totalTokenCount());
    }

    @Override
    public void onError(Throwable error) {
        System.err.println("Error: " + error.getMessage());
        showErrorInEditor(error);
    }
});

Function/Method Implementation

Generate complete function bodies:

String prefix = """
class DataProcessor:
    def __init__(self, data):
        self.data = data

    def filter_by_threshold(self, threshold):
        \"\"\"Filter data points above threshold\"\"\"
        """;

String suffix = """

    def calculate_statistics(self):
        \"\"\"Calculate mean and standard deviation\"\"\"
        pass
""";

Response<String> response = fimModel.generate(prefix, suffix);
System.out.println("Generated method:");
System.out.println(response.content());

Multi-language Support

Codestral supports multiple programming languages:

// Java
String javaPrefix = "public class LinkedList {\n    private Node head;\n    \n    public void add(int value) {\n        ";
Response<String> javaCompletion = fimModel.generate(javaPrefix);

// Python
String pythonPrefix = "class BinaryTree:\n    def __init__(self):\n        self.root = None\n    \n    def insert(self, value):\n        ";
Response<String> pythonCompletion = fimModel.generate(pythonPrefix);

// JavaScript
String jsPrefix = "const express = require('express');\nconst app = express();\n\napp.get('/api/users', (req, res) => {\n    ";
Response<String> jsCompletion = fimModel.generate(jsPrefix);

// TypeScript
String tsPrefix = "interface User {\n    id: number;\n    name: string;\n}\n\nfunction getUserById(id: number): User | null {\n    ";
Response<String> tsCompletion = fimModel.generate(tsPrefix);

Builder Configuration Options

Both synchronous and streaming FIM models share the same configuration options:

public static class Builder {
    /**
     * Set the Mistral AI API key (required).
     *
     * @param apiKey Your (non-null) Mistral AI API key
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder apiKey(String apiKey) { ... }

    /**
     * Set the base URL for the Mistral AI API.
     * Default: https://api.mistral.ai/v1
     *
     * @param baseUrl Custom (non-null) API endpoint URL
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder baseUrl(String baseUrl) { ... }

    /**
     * Set the model name using enum.
     * Default: CODESTRAL_LATEST
     *
     * @param modelName MistralAiFimModelName (non-null) enum value
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder modelName(MistralAiFimModelName modelName) { ... }

    /**
     * Set the model name using string.
     *
     * @param modelName Model (non-null) identifier string
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder modelName(String modelName) { ... }

    /**
     * Set sampling temperature (0.0 to 1.0).
     * Lower values for more deterministic code completion.
     * Default: 0.0
     *
     * @param temperature Temperature (non-null) value
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder temperature(Double temperature) { ... }

    /**
     * Set top-p sampling parameter (0.0 to 1.0).
     *
     * @param topP Top (non-null)-p value
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder topP(Double topP) { ... }

    /**
     * Set maximum number of tokens to generate.
     * Recommended: 50-200 for inline completions.
     *
     * @param maxTokens Maximum (non-null) tokens
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder maxTokens(Integer maxTokens) { ... }

    /**
     * Set random seed for reproducible outputs.
     *
     * @param randomSeed Random (non-null) seed value
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder randomSeed(Integer randomSeed) { ... }

    /**
     * Set minimum number of tokens to generate.
     *
     * @param minTokens Minimum (non-null) tokens to generate
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder minTokens(Integer minTokens) { ... }

    /**
     * Set custom stop tokens to terminate generation.
     * Note: FIM models use stop() method, while [Chat Models](./chat-models.md)s use stopSequences() method.
     *
     * @param stop List (non-null) of stop token strings
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder stop(List<String> stop) { ... }

    /**
     * Set request timeout.
     * Default: 60 seconds
     *
     * @param timeout Duration (non-null) for request timeout
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder timeout(Duration timeout) { ... }

    /**
     * Enable request logging.
     *
     * @param logRequests True (non-null) to log requests
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder logRequests(Boolean logRequests) { ... }

    /**
     * Enable response logging.
     *
     * @param logResponses True (non-null) to log responses
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder logResponses(Boolean logResponses) { ... }

    /**
     * Set custom SLF4J logger for logging.
     *
     * @param logger SLF (non-null)4J Logger instance
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder logger(Logger logger) { ... }

    /**
     * Set maximum retry attempts on failure.
     * Default: 2
     *
     * @param maxRetries Maximum (non-null) number of retries
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder maxRetries(Integer maxRetries) { ... }

    /**
     * Set custom HTTP client builder.
     *
     * @param httpClientBuilder HttpClientBuilder (non-null) instance
     * @return Builder instance
     * @throws IllegalArgumentException if parameter validation fails
     */
    public Builder httpClientBuilder(HttpClientBuilder httpClientBuilder) { ... }

    /**
     * Build the MistralAiFimModel or MistralAiStreamingFimModel instance.
     *
     * @return Configured model instance
     */
    public MistralAiFimModel build() { ... } // For MistralAiFimModel
    public MistralAiStreamingFimModel build() { ... } // For MistralAiStreamingFimModel
}

Available Models

codestral-latest

Latest version of Codestral, optimized for code completion tasks:

  • Languages: Python, Java, JavaScript, TypeScript, C++, Go, Rust, and 80+ more
  • Context window: 32,768 tokens
  • Best for: IDE autocomplete, code generation, fill-in-the-middle

open-codestral-mamba

Open-source Codestral variant with Mamba architecture:

  • Languages: Same broad language support
  • Best for: Similar use cases with open-source preference

Best Practices

Temperature Settings

For code completion, use lower temperatures for more deterministic and accurate results:

MistralAiFimModel fimModel = MistralAiFimModel.builder()
    .apiKey(apiKey)
    .temperature(0.0)  // Most deterministic (recommended for production)
    .build();

// For creative code generation or examples:
MistralAiFimModel creativeFim = MistralAiFimModel.builder()
    .apiKey(apiKey)
    .temperature(0.3)  // Slightly more varied suggestions
    .build();

Token Limits

Set appropriate max tokens based on completion type:

// Inline completion (single line or expression)
.maxTokens(50)

// Function/method body
.maxTokens(200)

// Complete class or module
.maxTokens(500)

Stop Sequences

Use stop sequences to prevent over-generation:

// For Python
MistralAiFimModel pythonFim = MistralAiFimModel.builder()
    .apiKey(apiKey)
    .stop(Arrays.asList("\n\n", "\nclass ", "\ndef "))
    .build();

// For Java
MistralAiFimModel javaFim = MistralAiFimModel.builder()
    .apiKey(apiKey)
    .stop(Arrays.asList("\n\n", "\npublic ", "\nprivate "))
    .build();

Context Window Management

Provide relevant context without exceeding limits:

private String getRelevantContext(String fileContent, int cursorPos, int maxTokens) {
    // Estimate ~4 characters per token
    int maxChars = maxTokens * 4;

    // Get surrounding context
    int prefixStart = Math.max(0, cursorPos - maxChars / 2);
    int suffixEnd = Math.min(fileContent.length(), cursorPos + maxChars / 2);

    String prefix = fileContent.substring(prefixStart, cursorPos);
    String suffix = fileContent.substring(cursorPos, suffixEnd);

    return prefix; // Return prefix for generation
}

Debouncing in IDEs

Avoid excessive API calls during typing:

public class DebouncedCompletionProvider {
    private final MistralAiFimModel fimModel;
    private final ScheduledExecutorService scheduler = Executors.newScheduledThreadPool(1);
    private ScheduledFuture<?> pendingCompletion;

    public void requestCompletion(String prefix, String suffix,
                                   Consumer<String> callback) {
        // Cancel any pending completion
        if (pendingCompletion != null) {
            pendingCompletion.cancel(false);
        }

        // Schedule new completion after delay
        pendingCompletion = scheduler.schedule(() -> {
            Response<String> response = fimModel.generate(prefix, suffix);
            callback.accept(response.content());
        }, 300, TimeUnit.MILLISECONDS);
    }
}

Caching for Performance

Cache completions for frequently seen patterns:

import java.util.concurrent.ConcurrentHashMap;
import java.security.MessageDigest;

public class CachedFimModel {
    private final MistralAiFimModel model;
    private final Map<String, String> cache = new ConcurrentHashMap<>();

    public String generate(String prefix, String suffix) {
        String key = hash(prefix + "|" + suffix);

        return cache.computeIfAbsent(key, k -> {
            Response<String> response = model.generate(prefix, suffix);
            return response.content();
        });
    }

    private String hash(String input) {
        try {
            MessageDigest md = MessageDigest.getInstance("MD5");
            byte[] digest = md.digest(input.getBytes());
            return Base64.getEncoder().encodeToString(digest);
        } catch (Exception e) {
            return input;
        }
    }
}

Error Handling

Handle API failures gracefully in IDE integrations:

public Optional<String> getCompletionSafely(String prefix, String suffix) {
    try {
        Response<String> response = fimModel.generate(prefix, suffix);
        return Optional.of(response.content());
    } catch (Exception e) {
        logger.warn("Code completion failed: {}", e.getMessage());
        return Optional.empty();
    }
}

// Usage
Optional<String> completion = getCompletionSafely(prefix, suffix);
completion.ifPresent(this::displayInEditor);

Quality Filtering

Filter out low-quality or irrelevant completions:

private boolean isQualityCompletion(String completion, String prefix) {
    // Filter empty or whitespace-only
    if (completion.trim().isEmpty()) {
        return false;
    }

    // Filter if too similar to prefix (model just repeating)
    if (completion.trim().equals(prefix.trim())) {
        return false;
    }

    // Filter incomplete code (adjust based on language)
    if (completion.trim().endsWith("...")) {
        return false;
    }

    // Filter if contains error markers
    if (completion.contains("ERROR") || completion.contains("FIXME")) {
        return false;
    }

    return true;
}

Response<String> response = fimModel.generate(prefix, suffix);
String completion = response.content();

if (isQualityCompletion(completion, prefix)) {
    displayCompletion(completion);
}

Language Detection

Adjust parameters based on detected language:

public class LanguageAwareFimProvider {
    private final MistralAiFimModel fimModel;

    public String complete(String prefix, String suffix, String language) {
        List<String> stopTokens = getStopTokens(language);
        int maxTokens = getMaxTokens(language);

        MistralAiFimModel languageModel = MistralAiFimModel.builder()
            .apiKey(apiKey)
            .modelName(MistralAiFimModelName.CODESTRAL_LATEST)
            .stop(stopTokens)
            .maxTokens(maxTokens)
            .temperature(0.0)
            .build();

        Response<String> response = languageModel.generate(prefix, suffix);
        return response.content();
    }

    private List<String> getStopTokens(String language) {
        return switch (language.toLowerCase()) {
            case "python" -> Arrays.asList("\n\n", "\nclass ", "\ndef ");
            case "java" -> Arrays.asList("\n\n", "\npublic ", "\nprivate ");
            case "javascript", "typescript" -> Arrays.asList("\n\n", "\nfunction ", "\nconst ");
            default -> Arrays.asList("\n\n");
        };
    }

    private int getMaxTokens(String language) {
        // Verbose languages might need more tokens
        return switch (language.toLowerCase()) {
            case "java", "c#" -> 150;
            case "python", "javascript" -> 100;
            default -> 100;
        };
    }
}

Integration Patterns

LSP (Language Server Protocol) Integration

public class MistralCodeCompletionServer implements LanguageServer {
    private final MistralAiFimModel fimModel;

    @Override
    public CompletableFuture<Either<List<CompletionItem>, CompletionList>>
    completion(CompletionParams params) {
        return CompletableFuture.supplyAsync(() -> {
            String prefix = getTextBeforeCursor(params);
            String suffix = getTextAfterCursor(params);

            Response<String> response = fimModel.generate(prefix, suffix);
            String completion = response.content();

            CompletionItem item = new CompletionItem(completion);
            item.setKind(CompletionItemKind.Snippet);
            item.setDetail("Mistral AI");

            return Either.forLeft(Collections.singletonList(item));
        });
    }
}

VS Code Extension Pattern

// Backend service for VS Code extension
@RestController
public class CompletionController {
    private final MistralAiStreamingFimModel streamingFim;

    @PostMapping("/complete")
    public SseEmitter streamCompletion(@RequestBody CompletionRequest request) {
        SseEmitter emitter = new SseEmitter();

        streamingFim.generate(
            request.getPrefix(),
            request.getSuffix(),
            new StreamingResponseHandler<String>() {
                @Override
                public void onNext(String token) {
                    try {
                        emitter.send(token);
                    } catch (IOException e) {
                        emitter.completeWithError(e);
                    }
                }

                @Override
                public void onComplete(Response<String> response) {
                    emitter.complete();
                }

                @Override
                public void onError(Throwable error) {
                    emitter.completeWithError(error);
                }
            }
        );

        return emitter;
    }
}

Install with Tessl CLI

npx tessl i tessl/maven-dev-langchain4j--langchain4j-mistral-ai

docs

chat-models.md

code-completion.md

configuration.md

embedding-model.md

index.md

model-discovery.md

moderation-model.md

spi.md

types-and-enums.md

tile.json