Python Client SDK for the Mistral AI API with chat completions, embeddings, fine-tuning, and agent capabilities.
npx @tessl/cli install tessl/pypi-mistralai@1.9.0A comprehensive Python client SDK for interacting with the Mistral AI API. Provides access to chat completions, embeddings, fine-tuning, agents, file management, and advanced AI capabilities with both synchronous and asynchronous support.
pip install mistralaifrom mistralai import MistralFor specific components:
from mistralai.models import (
ChatCompletionRequest,
ChatCompletionResponse,
SystemMessage,
UserMessage,
AssistantMessage
)from mistralai import Mistral
from mistralai.models import UserMessage
# Initialize client
client = Mistral(api_key="your-api-key")
# Simple chat completion
messages = [
UserMessage(content="Hello, how are you?")
]
response = client.chat.complete(
model="mistral-small-latest",
messages=messages
)
print(response.choices[0].message.content)
# Async usage
async with Mistral(api_key="your-api-key") as client:
response = await client.chat.complete(
model="mistral-small-latest",
messages=messages
)
print(response.choices[0].message.content)The SDK is built around a main Mistral class that provides access to specialized sub-APIs:
The design enables both simple usage patterns and advanced customization while maintaining type safety and comprehensive error handling.
Create text completions using Mistral's language models with support for conversations, function calling, streaming responses, and structured outputs.
def complete(
model: str,
messages: List[Union[SystemMessage, UserMessage, AssistantMessage, ToolMessage]],
temperature: Optional[float] = None,
top_p: Optional[float] = None,
max_tokens: Optional[int] = None,
stream: Optional[bool] = None,
tools: Optional[List[Tool]] = None,
tool_choice: Optional[Union[str, ToolChoice]] = None,
response_format: Optional[ResponseFormat] = None,
**kwargs
) -> ChatCompletionResponse: ...
def stream(
model: str,
messages: List[Union[SystemMessage, UserMessage, AssistantMessage, ToolMessage]],
**kwargs
) -> Iterator[CompletionChunk]: ...Generate completions using pre-configured AI agents with specialized tools and context.
def complete(
messages: List[Union[SystemMessage, UserMessage, AssistantMessage, ToolMessage]],
agent_id: str,
max_tokens: Optional[int] = None,
stream: Optional[bool] = False,
tools: Optional[List[Tool]] = None,
tool_choice: Optional[Union[str, ToolChoice]] = None,
**kwargs
) -> ChatCompletionResponse: ...
def stream(
messages: List[Union[SystemMessage, UserMessage, AssistantMessage, ToolMessage]],
agent_id: str,
max_tokens: Optional[int] = None,
stream: Optional[bool] = True,
**kwargs
) -> Iterator[CompletionEvent]: ...Generate vector embeddings for text input with support for different models and output formats.
def create(
model: str,
inputs: Union[str, List[str]],
output_dimension: Optional[int] = None,
output_dtype: Optional[str] = None,
encoding_format: Optional[str] = None,
**kwargs
) -> EmbeddingResponse: ...List and manage available models including base models and fine-tuned models.
def list(**kwargs) -> ModelList: ...
def retrieve(model_id: str, **kwargs) -> Union[BaseModelCard, FTModelCard]: ...
def delete(model_id: str, **kwargs) -> DeleteModelOut: ...Upload, manage, and process files for use with fine-tuning, agents, and other AI capabilities.
def upload(
file: Union[File, FileTypedDict],
purpose: Optional[FilePurpose] = None,
**kwargs
) -> UploadFileOut: ...
def list(
page: Optional[int] = 0,
page_size: Optional[int] = 100,
sample_type: Optional[List[SampleType]] = None,
source: Optional[List[Source]] = None,
search: Optional[str] = None,
purpose: Optional[FilePurpose] = None,
**kwargs
) -> ListFilesOut: ...
def retrieve(file_id: str, **kwargs) -> RetrieveFileOut: ...
def delete(file_id: str, **kwargs) -> DeleteFileOut: ...
def download(file_id: str, **kwargs) -> httpx.Response: ...
def get_signed_url(
file_id: str,
expiry: Optional[int] = 24,
**kwargs
) -> FileSignedURL: ...Create and manage fine-tuning jobs to customize models for specific use cases.
def create(
model: str,
training_files: List[TrainingFile],
validation_files: Optional[List[TrainingFile]] = None,
hyperparameters: Optional[dict] = None,
**kwargs
) -> CompletionDetailedJobOut: ...
def list(**kwargs) -> JobsOut: ...
def get(job_id: str, **kwargs) -> CompletionDetailedJobOut: ...
def cancel(job_id: str, **kwargs) -> CompletionDetailedJobOut: ...Submit and manage batch processing jobs for handling large volumes of requests efficiently.
def create(
input_files: List[str],
endpoint: str,
completion_window: str,
**kwargs
) -> BatchJobOut: ...
def list(**kwargs) -> BatchJobsOut: ...
def get(batch_id: str, **kwargs) -> BatchJobOut: ...
def cancel(batch_id: str, **kwargs) -> BatchJobOut: ...Generate code completions using fill-in-the-middle models for code editing and completion tasks.
def complete(
model: str,
prompt: str,
suffix: Optional[str] = None,
temperature: Optional[float] = None,
max_tokens: Optional[int] = None,
**kwargs
) -> FIMCompletionResponse: ...Process documents and images to extract text and structured data using optical character recognition.
def process(
model: str,
document: Document,
pages: Optional[List[int]] = None,
**kwargs
) -> OCRResponse: ...Moderate content and classify text using Mistral's safety and classification models.
def moderate(
inputs: List[Union[str, Dict]],
model: Optional[str] = None,
**kwargs
) -> ModerationResponse: ...
def classify(
inputs: List[str],
model: str,
**kwargs
) -> ClassificationResponse: ...Transcribe audio files to text with support for various audio formats and streaming.
def transcribe(
file: Union[str, BinaryIO],
model: str,
language: Optional[str] = None,
**kwargs
) -> TranscriptionResponse: ...
def transcribe_stream(
stream: Iterator[bytes],
model: str,
**kwargs
) -> Iterator[TranscriptionStreamEvents]: ...Experimental and preview APIs providing access to advanced features including enhanced conversations, document libraries, and beta agent capabilities.
# Conversations API
def start(
inputs: Union[ConversationInputs, dict],
instructions: Optional[str] = None,
tools: Optional[List[Tool]] = None,
**kwargs
) -> ConversationResponse: ...
def start_stream(
inputs: Union[ConversationInputs, dict],
**kwargs
) -> Iterator[ConversationEvents]: ...
# Libraries API
def create(
name: str,
description: Optional[str] = None,
**kwargs
) -> LibraryOut: ...
def list(**kwargs) -> ListLibraryOut: ...
# Beta Agents API (enhanced agent management)
def create(
name: str,
model: str,
**kwargs
) -> Agent: ...
def update_version(
agent_id: str,
version_data: dict,
**kwargs
) -> Agent: ...class Mistral:
def __init__(
self,
api_key: Optional[Union[str, Callable[[], str]]] = None,
server_url: Optional[str] = None,
client: Optional[HttpClient] = None,
async_client: Optional[AsyncHttpClient] = None,
retry_config: Optional[RetryConfig] = None,
timeout_ms: Optional[int] = None,
debug_logger: Optional[Logger] = None,
) -> None: ...
def __enter__(self) -> "Mistral": ...
def __exit__(self, exc_type, exc_val, exc_tb) -> None: ...
async def __aenter__(self) -> "Mistral": ...
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None: ...class SystemMessage:
content: Union[str, List[SystemMessageContentChunk]]
role: Optional[Literal["system"]] = "system"
class UserMessage:
content: Optional[Union[str, List[ContentChunk]]]
role: Optional[Literal["user"]] = "user"
class AssistantMessage:
content: Optional[Union[str, List[ContentChunk]]]
tool_calls: Optional[List[ToolCall]]
prefix: Optional[bool] = False
role: Optional[Literal["assistant"]] = "assistant"
class ToolMessage:
content: Optional[Union[str, List[ContentChunk]]]
tool_call_id: Optional[str]
name: Optional[str]
role: Optional[Literal["tool"]] = "tool"
# Content chunk types for multimodal support
ContentChunk = Union[
TextChunk,
ImageURLChunk,
DocumentURLChunk,
FileChunk,
AudioChunk,
ReferenceChunk,
ThinkChunk
]
class TextChunk:
type: Literal["text"]
text: str
class ImageURLChunk:
type: Literal["image_url"]
image_url: str
class DocumentURLChunk:
type: Literal["document_url"]
document_url: str
class FileChunk:
type: Literal["file"]
file: str
class AudioChunk:
type: Literal["input_audio"]
input_audio: dict
class ReferenceChunk:
type: Literal["reference"]
reference: str
class ThinkChunk:
type: Literal["thinking"]
thinking: strclass FunctionTool:
type: Literal["function"]
function: Function
class CodeInterpreterTool:
type: Literal["code_interpreter"]
class WebSearchTool:
type: Literal["web_search"]
class WebSearchPremiumTool:
type: Literal["web_search_premium"]
class DocumentLibraryTool:
type: Literal["document_library"]
document_library: dict
class ImageGenerationTool:
type: Literal["image_generation"]class ResponseFormat:
type: Literal["json_object", "text"]
schema: Optional[dict]
class ToolChoice:
type: Literal["auto", "none", "any"]
function: Optional[FunctionName]
class RetryConfig:
strategy: str
backoff: dict
retries: intclass ChatCompletionResponse:
id: str
object: str
created: int
model: str
choices: List[ChatCompletionChoice]
usage: Optional[UsageInfo]
class EmbeddingResponse:
id: str
object: str
data: List[EmbeddingResponseData]
model: str
usage: Optional[UsageInfo]
class UsageInfo:
prompt_tokens: int
completion_tokens: Optional[int]
total_tokens: int