CtrlK
BlogDocsLog inGet started
Tessl Logo

tessl/pypi-together

Python client for Together's Cloud Platform providing comprehensive AI model APIs

Overview
Eval results
Files

models.mddocs/

Models

Discovery and information retrieval for available AI models across different categories including language models, code models, vision models, and specialized models for various AI tasks.

Capabilities

Model Listing

List all available models with detailed information and capabilities.

def list() -> List[ModelObject]:
    """
    List all available models.

    Returns:
        List of ModelObject instances with model information
    """

Async Model Listing

Asynchronous model listing for concurrent operations.

async def list() -> List[ModelObject]:
    """
    Asynchronously list all available models.

    Returns:
        List of ModelObject instances with model information
    """

Usage Examples

List All Models

from together import Together

client = Together()

models = client.models.list()

print(f"Total available models: {len(models)}")

for model in models[:10]:  # Show first 10 models
    print(f"ID: {model.id}")
    print(f"Type: {model.type}")
    print(f"Created: {model.created}")
    if hasattr(model, 'description'):
        print(f"Description: {model.description}")
    print("---")

Filter Models by Type

models = client.models.list()

# Categorize models by type
language_models = []
code_models = []
vision_models = []
embedding_models = []

for model in models:
    model_id = model.id.lower()
    
    if 'code' in model_id or 'codellama' in model_id:
        code_models.append(model)
    elif 'vision' in model_id or 'clip' in model_id:
        vision_models.append(model)
    elif 'embed' in model_id or 'bert' in model_id:
        embedding_models.append(model)
    else:
        language_models.append(model)

print(f"Language models: {len(language_models)}")
print(f"Code models: {len(code_models)}")
print(f"Vision models: {len(vision_models)}")
print(f"Embedding models: {len(embedding_models)}")

Find Specific Models

def find_models_by_keyword(models: list, keyword: str):
    """Find models containing a specific keyword."""
    matching_models = []
    
    for model in models:
        if keyword.lower() in model.id.lower():
            matching_models.append(model)
    
    return matching_models

models = client.models.list()

# Find Llama models
llama_models = find_models_by_keyword(models, "llama")
print(f"Found {len(llama_models)} Llama models:")
for model in llama_models[:5]:
    print(f"  - {model.id}")

# Find Stable Diffusion models
sd_models = find_models_by_keyword(models, "stable-diffusion")
print(f"Found {len(sd_models)} Stable Diffusion models:")
for model in sd_models:
    print(f"  - {model.id}")

Model Capabilities Analysis

def analyze_model_capabilities(models: list):
    """Analyze available model capabilities."""
    capabilities = {
        'chat': [],
        'completion': [],
        'embedding': [],
        'image_generation': [],
        'code_generation': [],
        'vision': [],
        'audio': []
    }
    
    for model in models:
        model_id = model.id.lower()
        
        # Categorize by capabilities
        if any(keyword in model_id for keyword in ['chat', 'instruct', 'conversation']):
            capabilities['chat'].append(model.id)
        elif any(keyword in model_id for keyword in ['code', 'codellama', 'programming']):
            capabilities['code_generation'].append(model.id)
        elif any(keyword in model_id for keyword in ['embed', 'bert', 'retrieval']):
            capabilities['embedding'].append(model.id)
        elif any(keyword in model_id for keyword in ['stable-diffusion', 'dall-e', 'midjourney']):
            capabilities['image_generation'].append(model.id)
        elif any(keyword in model_id for keyword in ['vision', 'clip', 'visual']):
            capabilities['vision'].append(model.id)
        elif any(keyword in model_id for keyword in ['whisper', 'speech', 'audio']):
            capabilities['audio'].append(model.id)
        else:
            capabilities['completion'].append(model.id)
    
    return capabilities

models = client.models.list()
capabilities = analyze_model_capabilities(models)

for capability, model_list in capabilities.items():
    print(f"{capability.title()}: {len(model_list)} models")
    if model_list:
        print(f"  Examples: {', '.join(model_list[:3])}")
    print()

Model Information Display

def display_model_info(model):
    """Display detailed information about a model."""
    print(f"Model ID: {model.id}")
    print(f"Object Type: {model.object}")
    print(f"Created: {model.created}")
    print(f"Owned By: {model.owned_by}")
    
    # Display additional attributes if available
    attributes = ['description', 'context_length', 'tokenizer', 'license']
    for attr in attributes:
        if hasattr(model, attr):
            value = getattr(model, attr)
            if value:
                print(f"{attr.title()}: {value}")
    
    print("---")

# Display information for specific models
models = client.models.list()
popular_models = [
    "meta-llama/Llama-3.2-3B-Instruct-Turbo",
    "codellama/CodeLlama-34b-Python-hf",
    "stabilityai/stable-diffusion-xl-base-1.0"
]

for model_id in popular_models:
    matching_models = [m for m in models if m.id == model_id]
    if matching_models:
        display_model_info(matching_models[0])

Model Recommendation System

def recommend_model(task_type: str, models: list):
    """Recommend models based on task type."""
    recommendations = {
        'chat': [
            'meta-llama/Llama-3.2-3B-Instruct-Turbo',
            'meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo',
            'Qwen/Qwen2.5-VL-72B-Instruct'
        ],
        'code': [
            'codellama/CodeLlama-34b-Python-hf',
            'codellama/CodeLlama-13b-Instruct-hf',
            'WizardLM/WizardCoder-Python-34B-V1.0'
        ],
        'embedding': [
            'togethercomputer/m2-bert-80M-8k-retrieval',
            'BAAI/bge-large-en-v1.5',
            'WhereIsAI/UAE-Large-V1'
        ],
        'image': [
            'stabilityai/stable-diffusion-xl-base-1.0',
            'stabilityai/stable-diffusion-2-1',
            'prompthero/openjourney'
        ]
    }
    
    if task_type not in recommendations:
        return []
    
    # Filter to only include available models
    available_model_ids = [m.id for m in models]
    available_recommendations = [
        model_id for model_id in recommendations[task_type]
        if model_id in available_model_ids
    ]
    
    return available_recommendations

models = client.models.list()

# Get recommendations for different tasks
for task in ['chat', 'code', 'embedding', 'image']:
    recommended = recommend_model(task, models)
    print(f"{task.title()} task recommendations:")
    for model_id in recommended:
        print(f"  - {model_id}")
    print()

Async Model Operations

import asyncio
from together import AsyncTogether

async def analyze_models_async():
    client = AsyncTogether()
    
    # Get model list asynchronously
    models = await client.models.list()
    
    # Analyze model distribution
    model_stats = {
        'total': len(models),
        'by_provider': {},
        'by_type': {}
    }
    
    for model in models:
        # Extract provider from model ID
        if '/' in model.id:
            provider = model.id.split('/')[0]
        else:
            provider = 'unknown'
        
        model_stats['by_provider'][provider] = model_stats['by_provider'].get(provider, 0) + 1
        
        # Categorize by type
        model_type = getattr(model, 'type', 'unknown')
        model_stats['by_type'][model_type] = model_stats['by_type'].get(model_type, 0) + 1
    
    return model_stats

# Run async analysis
stats = asyncio.run(analyze_models_async())
print(f"Total models: {stats['total']}")
print(f"Top providers: {dict(list(stats['by_provider'].items())[:5])}")
print(f"Model types: {stats['by_type']}")

Types

Response Types

class ModelObject:
    id: str
    object: str
    created: int
    owned_by: str
    type: Optional[str] = None
    description: Optional[str] = None
    context_length: Optional[int] = None
    tokenizer: Optional[str] = None
    license: Optional[str] = None

Popular Model Categories

Language Models

  • meta-llama/Llama-3.2-3B-Instruct-Turbo - Fast instruction-following
  • meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo - Multi-modal with vision
  • Qwen/Qwen2.5-VL-72B-Instruct - Large vision-language model

Code Models

  • codellama/CodeLlama-34b-Python-hf - Python code generation
  • codellama/CodeLlama-13b-Instruct-hf - Code instruction following
  • WizardLM/WizardCoder-Python-34B-V1.0 - Advanced Python coding

Embedding Models

  • togethercomputer/m2-bert-80M-8k-retrieval - Retrieval optimized
  • BAAI/bge-large-en-v1.5 - High-quality English embeddings
  • WhereIsAI/UAE-Large-V1 - General-purpose embeddings

Image Generation Models

  • stabilityai/stable-diffusion-xl-base-1.0 - High-quality image generation
  • stabilityai/stable-diffusion-2-1 - Versatile diffusion model
  • prompthero/openjourney - Artistic and creative styles

Install with Tessl CLI

npx tessl i tessl/pypi-together

docs

audio.md

batch.md

chat-completions.md

code-interpreter.md

completions.md

embeddings.md

endpoints.md

evaluation.md

files.md

fine-tuning.md

images.md

index.md

models.md

rerank.md

tile.json