CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-together

Python client for Together's Cloud Platform providing comprehensive AI model APIs

Overview
Eval results
Files

fine-tuning.mddocs/

Fine-tuning

Custom model training with supervised fine-tuning and direct preference optimization. Create specialized models tailored to specific domains, tasks, or behavioral requirements with comprehensive job management and model downloading capabilities.

Capabilities

Create Fine-tuning Job

Start a fine-tuning job with custom training configuration.

def create(
    training_file: str,
    model: str,
    n_epochs: Optional[int] = None,
    n_checkpoints: Optional[int] = None,
    batch_size: Optional[Union[str, int]] = None,
    learning_rate: Optional[float] = None,
    suffix: Optional[str] = None,
    wandb_api_key: Optional[str] = None,
    training_type: Optional[str] = None,
    **kwargs
) -> FinetuneResponse:
    """
    Create a fine-tuning job.

    Args:
        training_file: ID of uploaded training file
        model: Base model to fine-tune
        n_epochs: Number of training epochs
        n_checkpoints: Number of checkpoints to save
        batch_size: Batch size ('max' or integer)
        learning_rate: Learning rate for training
        suffix: Custom suffix for model name
        wandb_api_key: Weights & Biases API key for tracking
        training_type: Type of training ('FullTrainingType' or 'LoRATrainingType')

    Returns:
        FinetuneResponse with job information
    """

List Fine-tuning Jobs

List all fine-tuning jobs with status and metadata.

def list() -> FinetuneList:
    """
    List all fine-tuning jobs.

    Returns:
        FinetuneList containing job information
    """

Retrieve Fine-tuning Job

Get detailed information about a specific fine-tuning job.

def retrieve(id: str) -> FinetuneResponse:
    """
    Retrieve fine-tuning job details.

    Args:
        id: Fine-tuning job ID

    Returns:
        FinetuneResponse with job details
    """

Cancel Fine-tuning Job

Cancel a running fine-tuning job.

def cancel(id: str) -> FinetuneResponse:
    """
    Cancel a fine-tuning job.

    Args:
        id: Fine-tuning job ID

    Returns:
        FinetuneResponse with updated job status
    """

List Job Events

Get training progress events and logs for a fine-tuning job.

def list_events(id: str) -> FinetuneListEvents:
    """
    List events for a fine-tuning job.

    Args:
        id: Fine-tuning job ID

    Returns:
        FinetuneListEvents with training logs
    """

Download Fine-tuned Model

Download trained model weights and artifacts.

def download(
    id: str,
    checkpoint: Optional[str] = None,
    output: Optional[str] = None
) -> FinetuneDownloadResult:
    """
    Download fine-tuned model or checkpoint.

    Args:
        id: Fine-tuning job ID
        checkpoint: Specific checkpoint to download
        output: Output directory path

    Returns:
        FinetuneDownloadResult with download information
    """

Async Fine-tuning Operations

All fine-tuning operations support asynchronous execution.

async def create(training_file: str, model: str, **kwargs) -> FinetuneResponse: ...
async def list() -> FinetuneList: ...
async def retrieve(id: str) -> FinetuneResponse: ...
async def cancel(id: str) -> FinetuneResponse: ...
async def list_events(id: str) -> FinetuneListEvents: ...
async def download(id: str, **kwargs) -> FinetuneDownloadResult: ...

Usage Examples

Basic Fine-tuning

from together import Together

client = Together()

# First, upload training data
training_file = client.files.upload(
    file="training_data.jsonl",
    purpose="fine-tune"
)

# Create fine-tuning job
finetune_job = client.fine_tuning.create(
    training_file=training_file.id,
    model="meta-llama/Llama-3.2-3B-Instruct",
    n_epochs=3,
    n_checkpoints=1,
    batch_size="max",
    learning_rate=1e-5,
    suffix="my-custom-model"
)

print(f"Fine-tuning job created: {finetune_job.id}")
print(f"Status: {finetune_job.status}")
print(f"Model name: {finetune_job.fine_tuned_model}")

Advanced Fine-tuning Configuration

# Create fine-tuning job with advanced settings
finetune_job = client.fine_tuning.create(
    training_file=training_file.id,
    model="meta-llama/Llama-3.2-3B-Instruct",
    n_epochs=5,
    n_checkpoints=3,
    batch_size=16,
    learning_rate=5e-6,
    suffix="domain-specialist",
    wandb_api_key="your-wandb-api-key",  # For experiment tracking
    training_type="FullTrainingType"  # Full model training vs LoRA
)

print(f"Advanced fine-tuning job: {finetune_job.id}")
print(f"Training type: {finetune_job.training_type}")
print(f"Hyperparameters: {finetune_job.hyperparameters}")

Monitor Fine-tuning Progress

import time

def monitor_finetune_job(client: Together, job_id: str):
    """Monitor fine-tuning job progress."""
    
    while True:
        job = client.fine_tuning.retrieve(job_id)
        print(f"Status: {job.status}")
        
        if job.status == "succeeded":
            print(f"Fine-tuning completed!")
            print(f"Fine-tuned model: {job.fine_tuned_model}")
            break
        elif job.status == "failed":
            print(f"Fine-tuning failed: {job.error}")
            break
        elif job.status == "cancelled":
            print("Fine-tuning was cancelled")
            break
        
        # Get latest events
        events = client.fine_tuning.list_events(job_id)
        if events.data:
            latest_event = events.data[-1]
            print(f"Latest: {latest_event.message}")
        
        time.sleep(30)  # Check every 30 seconds

# Monitor the job
monitor_finetune_job(client, finetune_job.id)

List and Manage Jobs

# List all fine-tuning jobs
jobs = client.fine_tuning.list()

print(f"Total fine-tuning jobs: {len(jobs.data)}")

for job in jobs.data:
    print(f"ID: {job.id}")
    print(f"Model: {job.model}")
    print(f"Status: {job.status}")
    print(f"Created: {job.created_at}")
    if job.fine_tuned_model:
        print(f"Fine-tuned model: {job.fine_tuned_model}")
    print("---")

# Find specific jobs
running_jobs = [job for job in jobs.data if job.status == "running"]
completed_jobs = [job for job in jobs.data if job.status == "succeeded"]

print(f"Running jobs: {len(running_jobs)}")
print(f"Completed jobs: {len(completed_jobs)}")

Download Trained Model

# Download the fine-tuned model
job_id = "ft-c66a5c18-1d6d-43c9-94bd-32d756425b4b"

download_result = client.fine_tuning.download(
    id=job_id,
    output="./my-finetuned-model"
)

print(f"Model downloaded to: {download_result.output_path}")
print(f"Files downloaded: {download_result.files}")

# Download specific checkpoint
checkpoint_result = client.fine_tuning.download(
    id=job_id,
    checkpoint="checkpoint-1000",
    output="./checkpoint-1000"
)

print(f"Checkpoint downloaded: {checkpoint_result.checkpoint}")

Training Data Preparation

import json

def prepare_conversation_data(conversations: list, output_file: str):
    """Prepare conversation data for fine-tuning."""
    
    training_examples = []
    
    for conversation in conversations:
        # Format for instruction fine-tuning
        example = {
            "messages": conversation["messages"],
            "model": "meta-llama/Llama-3.2-3B-Instruct"
        }
        training_examples.append(example)
    
    # Write JSONL format
    with open(output_file, 'w') as f:
        for example in training_examples:
            f.write(json.dumps(example) + '\n')
    
    print(f"Prepared {len(training_examples)} training examples")
    return output_file

def prepare_completion_data(prompts_and_completions: list, output_file: str):
    """Prepare prompt-completion pairs for fine-tuning."""
    
    training_examples = []
    
    for prompt, completion in prompts_and_completions:
        example = {
            "prompt": prompt,
            "completion": completion
        }
        training_examples.append(example)
    
    with open(output_file, 'w') as f:
        for example in training_examples:
            f.write(json.dumps(example) + '\n')
    
    print(f"Prepared {len(training_examples)} prompt-completion pairs")
    return output_file

# Example usage
conversation_data = [
    {
        "messages": [
            {"role": "user", "content": "Explain quantum computing"},
            {"role": "assistant", "content": "Quantum computing leverages quantum mechanics..."}
        ]
    },
    {
        "messages": [
            {"role": "user", "content": "What is machine learning?"},
            {"role": "assistant", "content": "Machine learning is a subset of AI..."}
        ]
    }
]

training_file = prepare_conversation_data(conversation_data, "custom_training.jsonl")

Complete Fine-tuning Workflow

def complete_finetuning_workflow(
    client: Together,
    training_data_file: str,
    base_model: str,
    job_name: str
):
    """Complete workflow from data upload to model deployment."""
    
    # Step 1: Upload training data
    print("1. Uploading training data...")
    training_file = client.files.upload(
        file=training_data_file,
        purpose="fine-tune"
    )
    print(f"   Uploaded: {training_file.id}")
    
    # Step 2: Create fine-tuning job
    print("2. Creating fine-tuning job...")
    job = client.fine_tuning.create(
        training_file=training_file.id,
        model=base_model,
        n_epochs=3,
        batch_size="max",
        learning_rate=1e-5,
        suffix=job_name
    )
    print(f"   Job created: {job.id}")
    
    # Step 3: Monitor progress
    print("3. Monitoring progress...")
    while True:
        job_status = client.fine_tuning.retrieve(job.id)
        print(f"   Status: {job_status.status}")
        
        if job_status.status == "succeeded":
            print(f"   ✅ Fine-tuning completed!")
            print(f"   Model: {job_status.fine_tuned_model}")
            break
        elif job_status.status in ["failed", "cancelled"]:
            print(f"   ❌ Fine-tuning {job_status.status}")
            return None
        
        time.sleep(60)  # Check every minute
    
    # Step 4: Download model (optional)
    print("4. Downloading model...")
    download_result = client.fine_tuning.download(
        id=job.id,
        output=f"./models/{job_name}"
    )
    print(f"   Downloaded to: {download_result.output_path}")
    
    return job_status.fine_tuned_model

# Run complete workflow
model_id = complete_finetuning_workflow(
    client=client,
    training_data_file="domain_specific_data.jsonl",
    base_model="meta-llama/Llama-3.2-3B-Instruct",
    job_name="domain-expert"
)

if model_id:
    print(f"Ready to use model: {model_id}")

Types

Request Types

class FinetuneRequest:
    training_file: str
    model: str
    n_epochs: Optional[int] = None
    n_checkpoints: Optional[int] = None
    batch_size: Optional[Union[str, int]] = None
    learning_rate: Optional[float] = None
    suffix: Optional[str] = None
    wandb_api_key: Optional[str] = None
    training_type: Optional[str] = None

Response Types

class FinetuneResponse:
    id: str
    object: str
    model: str
    created_at: int
    finished_at: Optional[int]
    fine_tuned_model: Optional[str]
    status: str
    trained_tokens: Optional[int]
    training_file: str
    validation_file: Optional[str]
    hyperparameters: dict
    error: Optional[str]
    training_type: Optional[str]

class FinetuneList:
    object: str
    data: List[FinetuneResponse]

class FinetuneListEvents:
    object: str
    data: List[FinetuneEvent]

class FinetuneEvent:
    object: str
    created_at: int
    level: str
    message: str

class FinetuneDownloadResult:
    id: str
    output_path: str
    checkpoint: Optional[str]
    files: List[str]

Training Configuration Types

class TrainingType:
    """Base training type configuration"""
    pass

class FullTrainingType(TrainingType):
    """Full model fine-tuning configuration"""
    type: Literal["FullTrainingType"]

class LoRATrainingType(TrainingType):
    """LoRA (Low-Rank Adaptation) training configuration"""
    type: Literal["LoRATrainingType"]
    lora_r: Optional[int] = None
    lora_alpha: Optional[int] = None
    lora_dropout: Optional[float] = None

class TrainingMethodSFT:
    """Supervised Fine-Tuning method"""
    type: Literal["SFT"]

class TrainingMethodDPO:
    """Direct Preference Optimization method"""
    type: Literal["DPO"]
    beta: Optional[float] = None
    reference_model: Optional[str] = None

Install with Tessl CLI

npx tessl i tessl/pypi-together

docs

audio.md

batch.md

chat-completions.md

code-interpreter.md

completions.md

embeddings.md

endpoints.md

evaluation.md

files.md

fine-tuning.md

images.md

index.md

models.md

rerank.md

tile.json