0
# Models
1
2
Discovery and information retrieval for available AI models across different categories including language models, code models, vision models, and specialized models for various AI tasks.
3
4
## Capabilities
5
6
### Model Listing
7
8
List all available models with detailed information and capabilities.
9
10
```python { .api }
11
def list() -> List[ModelObject]:
12
"""
13
List all available models.
14
15
Returns:
16
List of ModelObject instances with model information
17
"""
18
```
19
20
### Async Model Listing
21
22
Asynchronous model listing for concurrent operations.
23
24
```python { .api }
25
async def list() -> List[ModelObject]:
26
"""
27
Asynchronously list all available models.
28
29
Returns:
30
List of ModelObject instances with model information
31
"""
32
```
33
34
## Usage Examples
35
36
### List All Models
37
38
```python
39
from together import Together
40
41
client = Together()
42
43
models = client.models.list()
44
45
print(f"Total available models: {len(models)}")
46
47
for model in models[:10]: # Show first 10 models
48
print(f"ID: {model.id}")
49
print(f"Type: {model.type}")
50
print(f"Created: {model.created}")
51
if hasattr(model, 'description'):
52
print(f"Description: {model.description}")
53
print("---")
54
```
55
56
### Filter Models by Type
57
58
```python
59
models = client.models.list()
60
61
# Categorize models by type
62
language_models = []
63
code_models = []
64
vision_models = []
65
embedding_models = []
66
67
for model in models:
68
model_id = model.id.lower()
69
70
if 'code' in model_id or 'codellama' in model_id:
71
code_models.append(model)
72
elif 'vision' in model_id or 'clip' in model_id:
73
vision_models.append(model)
74
elif 'embed' in model_id or 'bert' in model_id:
75
embedding_models.append(model)
76
else:
77
language_models.append(model)
78
79
print(f"Language models: {len(language_models)}")
80
print(f"Code models: {len(code_models)}")
81
print(f"Vision models: {len(vision_models)}")
82
print(f"Embedding models: {len(embedding_models)}")
83
```
84
85
### Find Specific Models
86
87
```python
88
def find_models_by_keyword(models: list, keyword: str):
89
"""Find models containing a specific keyword."""
90
matching_models = []
91
92
for model in models:
93
if keyword.lower() in model.id.lower():
94
matching_models.append(model)
95
96
return matching_models
97
98
models = client.models.list()
99
100
# Find Llama models
101
llama_models = find_models_by_keyword(models, "llama")
102
print(f"Found {len(llama_models)} Llama models:")
103
for model in llama_models[:5]:
104
print(f" - {model.id}")
105
106
# Find Stable Diffusion models
107
sd_models = find_models_by_keyword(models, "stable-diffusion")
108
print(f"Found {len(sd_models)} Stable Diffusion models:")
109
for model in sd_models:
110
print(f" - {model.id}")
111
```
112
113
### Model Capabilities Analysis
114
115
```python
116
def analyze_model_capabilities(models: list):
117
"""Analyze available model capabilities."""
118
capabilities = {
119
'chat': [],
120
'completion': [],
121
'embedding': [],
122
'image_generation': [],
123
'code_generation': [],
124
'vision': [],
125
'audio': []
126
}
127
128
for model in models:
129
model_id = model.id.lower()
130
131
# Categorize by capabilities
132
if any(keyword in model_id for keyword in ['chat', 'instruct', 'conversation']):
133
capabilities['chat'].append(model.id)
134
elif any(keyword in model_id for keyword in ['code', 'codellama', 'programming']):
135
capabilities['code_generation'].append(model.id)
136
elif any(keyword in model_id for keyword in ['embed', 'bert', 'retrieval']):
137
capabilities['embedding'].append(model.id)
138
elif any(keyword in model_id for keyword in ['stable-diffusion', 'dall-e', 'midjourney']):
139
capabilities['image_generation'].append(model.id)
140
elif any(keyword in model_id for keyword in ['vision', 'clip', 'visual']):
141
capabilities['vision'].append(model.id)
142
elif any(keyword in model_id for keyword in ['whisper', 'speech', 'audio']):
143
capabilities['audio'].append(model.id)
144
else:
145
capabilities['completion'].append(model.id)
146
147
return capabilities
148
149
models = client.models.list()
150
capabilities = analyze_model_capabilities(models)
151
152
for capability, model_list in capabilities.items():
153
print(f"{capability.title()}: {len(model_list)} models")
154
if model_list:
155
print(f" Examples: {', '.join(model_list[:3])}")
156
print()
157
```
158
159
### Model Information Display
160
161
```python
162
def display_model_info(model):
163
"""Display detailed information about a model."""
164
print(f"Model ID: {model.id}")
165
print(f"Object Type: {model.object}")
166
print(f"Created: {model.created}")
167
print(f"Owned By: {model.owned_by}")
168
169
# Display additional attributes if available
170
attributes = ['description', 'context_length', 'tokenizer', 'license']
171
for attr in attributes:
172
if hasattr(model, attr):
173
value = getattr(model, attr)
174
if value:
175
print(f"{attr.title()}: {value}")
176
177
print("---")
178
179
# Display information for specific models
180
models = client.models.list()
181
popular_models = [
182
"meta-llama/Llama-3.2-3B-Instruct-Turbo",
183
"codellama/CodeLlama-34b-Python-hf",
184
"stabilityai/stable-diffusion-xl-base-1.0"
185
]
186
187
for model_id in popular_models:
188
matching_models = [m for m in models if m.id == model_id]
189
if matching_models:
190
display_model_info(matching_models[0])
191
```
192
193
### Model Recommendation System
194
195
```python
196
def recommend_model(task_type: str, models: list):
197
"""Recommend models based on task type."""
198
recommendations = {
199
'chat': [
200
'meta-llama/Llama-3.2-3B-Instruct-Turbo',
201
'meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo',
202
'Qwen/Qwen2.5-VL-72B-Instruct'
203
],
204
'code': [
205
'codellama/CodeLlama-34b-Python-hf',
206
'codellama/CodeLlama-13b-Instruct-hf',
207
'WizardLM/WizardCoder-Python-34B-V1.0'
208
],
209
'embedding': [
210
'togethercomputer/m2-bert-80M-8k-retrieval',
211
'BAAI/bge-large-en-v1.5',
212
'WhereIsAI/UAE-Large-V1'
213
],
214
'image': [
215
'stabilityai/stable-diffusion-xl-base-1.0',
216
'stabilityai/stable-diffusion-2-1',
217
'prompthero/openjourney'
218
]
219
}
220
221
if task_type not in recommendations:
222
return []
223
224
# Filter to only include available models
225
available_model_ids = [m.id for m in models]
226
available_recommendations = [
227
model_id for model_id in recommendations[task_type]
228
if model_id in available_model_ids
229
]
230
231
return available_recommendations
232
233
models = client.models.list()
234
235
# Get recommendations for different tasks
236
for task in ['chat', 'code', 'embedding', 'image']:
237
recommended = recommend_model(task, models)
238
print(f"{task.title()} task recommendations:")
239
for model_id in recommended:
240
print(f" - {model_id}")
241
print()
242
```
243
244
### Async Model Operations
245
246
```python
247
import asyncio
248
from together import AsyncTogether
249
250
async def analyze_models_async():
251
client = AsyncTogether()
252
253
# Get model list asynchronously
254
models = await client.models.list()
255
256
# Analyze model distribution
257
model_stats = {
258
'total': len(models),
259
'by_provider': {},
260
'by_type': {}
261
}
262
263
for model in models:
264
# Extract provider from model ID
265
if '/' in model.id:
266
provider = model.id.split('/')[0]
267
else:
268
provider = 'unknown'
269
270
model_stats['by_provider'][provider] = model_stats['by_provider'].get(provider, 0) + 1
271
272
# Categorize by type
273
model_type = getattr(model, 'type', 'unknown')
274
model_stats['by_type'][model_type] = model_stats['by_type'].get(model_type, 0) + 1
275
276
return model_stats
277
278
# Run async analysis
279
stats = asyncio.run(analyze_models_async())
280
print(f"Total models: {stats['total']}")
281
print(f"Top providers: {dict(list(stats['by_provider'].items())[:5])}")
282
print(f"Model types: {stats['by_type']}")
283
```
284
285
## Types
286
287
### Response Types
288
289
```python { .api }
290
class ModelObject:
291
id: str
292
object: str
293
created: int
294
owned_by: str
295
type: Optional[str] = None
296
description: Optional[str] = None
297
context_length: Optional[int] = None
298
tokenizer: Optional[str] = None
299
license: Optional[str] = None
300
```
301
302
## Popular Model Categories
303
304
### Language Models
305
- `meta-llama/Llama-3.2-3B-Instruct-Turbo` - Fast instruction-following
306
- `meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo` - Multi-modal with vision
307
- `Qwen/Qwen2.5-VL-72B-Instruct` - Large vision-language model
308
309
### Code Models
310
- `codellama/CodeLlama-34b-Python-hf` - Python code generation
311
- `codellama/CodeLlama-13b-Instruct-hf` - Code instruction following
312
- `WizardLM/WizardCoder-Python-34B-V1.0` - Advanced Python coding
313
314
### Embedding Models
315
- `togethercomputer/m2-bert-80M-8k-retrieval` - Retrieval optimized
316
- `BAAI/bge-large-en-v1.5` - High-quality English embeddings
317
- `WhereIsAI/UAE-Large-V1` - General-purpose embeddings
318
319
### Image Generation Models
320
- `stabilityai/stable-diffusion-xl-base-1.0` - High-quality image generation
321
- `stabilityai/stable-diffusion-2-1` - Versatile diffusion model
322
- `prompthero/openjourney` - Artistic and creative styles