Python Client SDK for the Mistral AI API with chat completions, embeddings, fine-tuning, and agent capabilities.
—
Generate code completions using fill-in-the-middle models for code editing and completion tasks. FIM is specialized for code generation where you have prefix and suffix context.
Generate code completions with prefix and suffix context.
def complete(
model: str,
prompt: str,
suffix: Optional[str] = None,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
max_tokens: Optional[int] = None,
min_tokens: Optional[int] = None,
stream: Optional[bool] = None,
stop: Optional[Union[str, List[str]]] = None,
random_seed: Optional[int] = None,
**kwargs
) -> FIMCompletionResponse:
"""
Create a fill-in-the-middle completion.
Parameters:
- model: Model identifier (e.g., "codestral-latest")
- prompt: The code prefix before the insertion point
- suffix: The code suffix after the insertion point
- temperature: Sampling temperature (0.0 to 1.0)
- top_p: Nucleus sampling parameter
- max_tokens: Maximum tokens to generate
- min_tokens: Minimum tokens to generate
- stream: Enable streaming responses
- stop: Stop sequences for generation
- random_seed: Seed for reproducible outputs
Returns:
FIMCompletionResponse with generated code completion
"""
def stream(
model: str,
prompt: str,
suffix: Optional[str] = None,
**kwargs
) -> Iterator[CompletionChunk]:
"""
Stream a fill-in-the-middle completion.
Parameters:
- model: Model identifier
- prompt: The code prefix
- suffix: The code suffix
Returns:
Iterator of CompletionChunk objects with streaming completion
"""from mistralai import Mistral
client = Mistral(api_key="your-api-key")
# Complete a function
prefix = """
def calculate_fibonacci(n):
if n <= 1:
return n
# TODO: implement fibonacci calculation
"""
suffix = """
return result
"""
response = client.fim.complete(
model="codestral-latest",
prompt=prefix,
suffix=suffix,
max_tokens=150,
temperature=0.1 # Low temperature for code
)
print("Generated code:")
print(response.choices[0].text)# Complete a class method
prefix = """
class DataProcessor:
def __init__(self, data):
self.data = data
def process(self):
# TODO: implement data processing
"""
suffix = """
return processed_data
def save(self, filename):
with open(filename, 'w') as f:
json.dump(self.processed_data, f)
"""
response = client.fim.complete(
model="codestral-latest",
prompt=prefix,
suffix=suffix,
max_tokens=200
)
complete_code = prefix + response.choices[0].text + suffix
print("Complete class:")
print(complete_code)prefix = "def merge_sort(arr):\n if len(arr) <= 1:\n return arr\n "
suffix = "\n return merge(left_sorted, right_sorted)"
print("Generating code...")
print(prefix, end="")
stream = client.fim.stream(
model="codestral-latest",
prompt=prefix,
suffix=suffix,
max_tokens=100
)
for chunk in stream:
if chunk.choices[0].text:
print(chunk.choices[0].text, end="", flush=True)
print(suffix)class FIMCompletionRequest:
model: str
prompt: str
suffix: Optional[str]
temperature: Optional[float]
top_p: Optional[float]
max_tokens: Optional[int]
min_tokens: Optional[int]
stream: Optional[bool]
stop: Optional[Union[str, List[str]]]
random_seed: Optional[int]
class FIMCompletionStreamRequest:
model: str
prompt: str
suffix: Optional[str]
temperature: Optional[float]
top_p: Optional[float]
max_tokens: Optional[int]
min_tokens: Optional[int]
stop: Optional[Union[str, List[str]]]
random_seed: Optional[int]class FIMCompletionResponse:
id: str
object: str
created: int
model: str
choices: List[FIMCompletionChoice]
usage: Optional[UsageInfo]
class FIMCompletionChoice:
index: int
text: str
finish_reason: Optional[str]Install with Tessl CLI
npx tessl i tessl/pypi-mistralai