CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-mistralai

Python Client SDK for the Mistral AI API with chat completions, embeddings, fine-tuning, and agent capabilities.

Pending
Overview
Eval results
Files

fine-tuning.mddocs/

Fine-tuning

Create and manage fine-tuning jobs to customize models for specific use cases. Fine-tuning allows you to create specialized models by training on your own data while building on Mistral's foundation models.

Capabilities

Create Fine-tuning Job

Start a new fine-tuning job with training data and configuration.

def create(
    model: str,
    training_files: List[TrainingFile],
    validation_files: Optional[List[TrainingFile]] = None,
    hyperparameters: Optional[dict] = None,
    suffix: Optional[str] = None,
    integrations: Optional[List[dict]] = None,
    repositories: Optional[List[dict]] = None,
    **kwargs
) -> CompletionDetailedJobOut:
    """
    Create a fine-tuning job.

    Parameters:
    - model: Base model to fine-tune (e.g., "mistral-small-latest")
    - training_files: List of training data files
    - validation_files: Optional validation data files
    - hyperparameters: Training hyperparameters
    - suffix: Optional suffix for the fine-tuned model name
    - integrations: External integrations (e.g., W&B)
    - repositories: Repository configurations

    Returns:
    CompletionDetailedJobOut with job details and status
    """

def list(
    page: Optional[int] = None,
    page_size: Optional[int] = None,
    model: Optional[str] = None,
    created_after: Optional[int] = None,
    created_by_me: Optional[bool] = None,
    status: Optional[str] = None,
    wandb_project: Optional[str] = None,
    wandb_name: Optional[str] = None,
    suffix: Optional[str] = None,
    **kwargs
) -> JobsOut:
    """
    List fine-tuning jobs.

    Parameters:
    - page: Page number for pagination
    - page_size: Number of jobs per page
    - model: Filter by base model
    - created_after: Filter by creation timestamp
    - created_by_me: Filter by current user's jobs
    - status: Filter by job status
    - wandb_project: Filter by W&B project
    - wandb_name: Filter by W&B run name
    - suffix: Filter by model suffix

    Returns:
    JobsOut with list of fine-tuning jobs
    """

def get(job_id: str, **kwargs) -> CompletionDetailedJobOut:
    """
    Retrieve a fine-tuning job.

    Parameters:
    - job_id: Unique identifier of the fine-tuning job

    Returns:
    CompletionDetailedJobOut with detailed job information
    """

def cancel(job_id: str, **kwargs) -> CompletionDetailedJobOut:
    """
    Cancel a fine-tuning job.

    Parameters:
    - job_id: Unique identifier of the job to cancel

    Returns:
    Updated job information with cancelled status
    """

def start(job_id: str, **kwargs) -> CompletionDetailedJobOut:
    """
    Start a created fine-tuning job.

    Parameters:
    - job_id: Unique identifier of the job to start

    Returns:
    Updated job information with running status
    """

Model Management

Manage fine-tuned models including archiving and updating.

def archive_model(model_id: str, **kwargs) -> ArchiveFTModelOut:
    """
    Archive a fine-tuned model.

    Parameters:
    - model_id: ID of the fine-tuned model to archive

    Returns:
    Archive confirmation with model details
    """

def unarchive_model(model_id: str, **kwargs) -> UnarchiveFTModelOut:
    """
    Unarchive a fine-tuned model.

    Parameters:
    - model_id: ID of the archived model to restore

    Returns:
    Unarchive confirmation with model details
    """

def update_model(
    model_id: str,
    name: Optional[str] = None,
    description: Optional[str] = None,
    **kwargs
) -> CompletionFTModelOut:
    """
    Update fine-tuned model metadata.

    Parameters:
    - model_id: ID of the model to update
    - name: New name for the model
    - description: New description

    Returns:
    Updated model information
    """

Usage Examples

Prepare Training Data

import json

# Create training data in JSONL format
training_data = [
    {
        "messages": [
            {"role": "user", "content": "What is the capital of France?"},
            {"role": "assistant", "content": "The capital of France is Paris."}
        ]
    },
    {
        "messages": [
            {"role": "user", "content": "Explain photosynthesis."},
            {"role": "assistant", "content": "Photosynthesis is the process by which plants convert sunlight, carbon dioxide, and water into glucose and oxygen."}
        ]
    }
]

# Save to JSONL file
with open("training_data.jsonl", "w") as f:
    for item in training_data:
        f.write(json.dumps(item) + "\n")

Upload and Start Fine-tuning

from mistralai import Mistral
from mistralai.models import TrainingFile

client = Mistral(api_key="your-api-key")

# Upload training file
upload_result = client.files.upload(
    file="training_data.jsonl",
    purpose="fine-tune"
)

# Create fine-tuning job
training_file = TrainingFile(
    file_id=upload_result.id,
    weight=1.0
)

job = client.fine_tuning.create(
    model="mistral-small-latest",
    training_files=[training_file],
    hyperparameters={
        "training_steps": 100,
        "learning_rate": 0.0001,
        "weight_decay": 0.1
    },
    suffix="my-custom-model"
)

print(f"Created fine-tuning job: {job.id}")
print(f"Status: {job.status}")

Monitor Fine-tuning Progress

import time

# Start the job
client.fine_tuning.start(job.id)

# Monitor progress
while True:
    job_status = client.fine_tuning.get(job.id)
    print(f"Job status: {job_status.status}")
    
    if job_status.status in ["SUCCEEDED", "FAILED", "CANCELLED"]:
        break
    
    time.sleep(30)  # Check every 30 seconds

if job_status.status == "SUCCEEDED":
    print(f"Fine-tuned model ID: {job_status.fine_tuned_model}")
    print("Fine-tuning completed successfully!")
else:
    print(f"Fine-tuning failed with status: {job_status.status}")

Use Fine-tuned Model

from mistralai.models import UserMessage

# Use the fine-tuned model
if job_status.status == "SUCCEEDED":
    response = client.chat.complete(
        model=job_status.fine_tuned_model,
        messages=[UserMessage(content="What is machine learning?")],
        temperature=0.7
    )
    
    print("Fine-tuned model response:")
    print(response.choices[0].message.content)

Integration with Weights & Biases

from mistralai.models import WandbIntegration

# Create job with W&B integration
wandb_integration = WandbIntegration(
    type="wandb",
    project="my-fine-tuning-project",
    name="experiment-1",
    api_key="your-wandb-api-key"
)

job = client.fine_tuning.create(
    model="mistral-small-latest",
    training_files=[training_file],
    integrations=[wandb_integration],
    suffix="wandb-tracked-model"
)

Types

Job Types

class CompletionDetailedJobOut:
    id: str
    object: str
    model: str
    status: str
    job_type: str
    created_at: int
    modified_at: int
    training_files: List[TrainingFile]
    validation_files: Optional[List[TrainingFile]]
    hyperparameters: dict
    fine_tuned_model: Optional[str]
    integrations: Optional[List[dict]]
    trained_tokens: Optional[int]
    epochs: Optional[int]
    expected_duration_seconds: Optional[int]

class JobsOut:
    data: List[CompletionJobOut]
    object: str
    total: int

class CompletionJobOut:
    id: str
    object: str
    model: str
    status: str
    job_type: str
    created_at: int
    modified_at: int
    fine_tuned_model: Optional[str]

Training Configuration

class TrainingFile:
    file_id: str
    weight: Optional[float]

class CompletionTrainingParameters:
    training_steps: Optional[int]
    learning_rate: Optional[float]
    weight_decay: Optional[float]
    warmup_fraction: Optional[float]
    epochs: Optional[int]
    fim_ratio: Optional[float]
    seq_len: Optional[int]

Model Types

class CompletionFTModelOut:
    id: str
    object: str
    created: int
    owned_by: str
    root: str
    archived: bool
    name: Optional[str]
    description: Optional[str]
    max_context_length: Optional[int]
    aliases: Optional[List[str]]
    capabilities: Optional[List[str]]
    job: str

class ArchiveFTModelOut:
    id: str
    object: str
    archived: bool

class UnarchiveFTModelOut:
    id: str
    object: str
    archived: bool

Job Status Values

  • QUEUED: Job is waiting to start
  • RUNNING: Job is currently training
  • SUCCEEDED: Job completed successfully
  • FAILED: Job failed during training
  • CANCELLED: Job was cancelled by user
  • CANCELLING: Job is in the process of being cancelled

Best Practices

  • Use high-quality, diverse training data
  • Start with smaller datasets to test hyperparameters
  • Monitor training metrics through integrations
  • Validate model performance before production use
  • Archive unused models to manage costs
  • Use descriptive suffixes for model identification

Install with Tessl CLI

npx tessl i tessl/pypi-mistralai

docs

agents.md

audio.md

batch.md

beta.md

chat-completions.md

classification.md

embeddings.md

files.md

fim.md

fine-tuning.md

index.md

models.md

ocr.md

tile.json