or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

clients.mdconvenience-functions.mddata-types.mdindex.md

data-types.mddocs/

0

# Data Types and Models

1

2

Comprehensive Pydantic data models for all API interactions including requests, responses, configuration options, and type definitions for messages, tools, and images. All models inherit from `SubscriptableBaseModel` allowing both attribute and dictionary-style access.

3

4

## Capabilities

5

6

### Configuration Models

7

8

Configuration classes for customizing model behavior and performance parameters.

9

10

```python { .api }

11

class Options:

12

"""Configuration options for model parameters."""

13

14

# Load-time options

15

numa: bool = None

16

num_ctx: int = None

17

num_batch: int = None

18

num_gpu: int = None

19

main_gpu: int = None

20

low_vram: bool = None

21

f16_kv: bool = None

22

logits_all: bool = None

23

vocab_only: bool = None

24

use_mmap: bool = None

25

use_mlock: bool = None

26

embedding_only: bool = None

27

num_thread: int = None

28

29

# Runtime options

30

num_keep: int = None

31

seed: int = None

32

num_predict: int = None

33

top_k: int = None

34

top_p: float = None

35

tfs_z: float = None

36

typical_p: float = None

37

repeat_last_n: int = None

38

temperature: float = None

39

repeat_penalty: float = None

40

presence_penalty: float = None

41

frequency_penalty: float = None

42

mirostat: int = None

43

mirostat_tau: float = None

44

mirostat_eta: float = None

45

penalize_newline: bool = None

46

stop: list[str] = None

47

```

48

49

### Message and Communication Models

50

51

Data structures for chat messages, tool interactions, and multimodal content.

52

53

```python { .api }

54

class Image:

55

"""Image data for multimodal models."""

56

value: str | bytes | Path

57

58

def __init__(self, value: str | bytes | Path):

59

"""

60

Create an Image object.

61

62

Parameters:

63

- value: Image as base64 string, raw bytes, or file path

64

"""

65

66

class Message:

67

"""Chat message structure."""

68

role: str

69

content: str = None

70

thinking: str = None

71

images: list[Image] = None

72

tool_name: str = None

73

tool_calls: list[ToolCall] = None

74

75

class ToolCall:

76

"""Tool call specification within a message."""

77

function: Function

78

79

class Function:

80

"""Function call details."""

81

name: str

82

arguments: dict

83

84

class Tool:

85

"""Tool definition for function calling."""

86

type: str = 'function'

87

function: Function = None

88

89

class Function:

90

"""Function specification for tools."""

91

name: str

92

description: str = None

93

parameters: Parameters = None

94

95

class Parameters:

96

"""Function parameters schema."""

97

type: str = 'object'

98

properties: dict[str, Property] = None

99

required: list[str] = None

100

101

class Property:

102

"""Parameter property definition."""

103

type: str

104

description: str = None

105

enum: list = None

106

107

```

108

109

### Utility Functions

110

111

Functions for converting Python functions to Tool objects for function calling.

112

113

```python { .api }

114

def convert_function_to_tool(func: callable) -> Tool:

115

"""

116

Convert a Python function to a Tool object for function calling.

117

118

This function analyzes the function signature and docstring to create

119

a properly formatted Tool object that can be used with chat operations.

120

121

Parameters:

122

- func (callable): Python function to convert to tool

123

124

Returns:

125

Tool: Tool object with function schema derived from the input function

126

"""

127

```

128

129

### Response Models

130

131

Response data structures returned by Ollama API operations.

132

133

```python { .api }

134

class GenerateResponse:

135

"""Response from generate requests."""

136

model: str

137

created_at: str

138

response: str

139

thinking: str = None

140

context: list[int] = None

141

done: bool

142

done_reason: str = None

143

total_duration: int = None

144

load_duration: int = None

145

prompt_eval_count: int = None

146

prompt_eval_duration: int = None

147

eval_count: int = None

148

eval_duration: int = None

149

150

class ChatResponse:

151

"""Response from chat requests."""

152

model: str

153

created_at: str

154

message: Message

155

done: bool

156

done_reason: str = None

157

total_duration: int = None

158

load_duration: int = None

159

prompt_eval_count: int = None

160

prompt_eval_duration: int = None

161

eval_count: int = None

162

eval_duration: int = None

163

164

class EmbedResponse:

165

"""Response from embed requests."""

166

model: str

167

embeddings: list[list[float]]

168

total_duration: int = None

169

load_duration: int = None

170

prompt_eval_count: int = None

171

172

class EmbeddingsResponse:

173

"""Response from embeddings requests (deprecated)."""

174

embedding: list[float]

175

176

class StatusResponse:

177

"""Generic status response."""

178

status: str = None

179

180

class ProgressResponse(StatusResponse):

181

"""Progress response for streaming operations."""

182

completed: int = None

183

total: int = None

184

digest: str = None

185

```

186

187

### Model Information Models

188

189

Data structures for model metadata, capabilities, and system information.

190

191

```python { .api }

192

class ListResponse:

193

"""Response from list requests."""

194

models: list[Model]

195

196

class Model:

197

"""Model information in list responses."""

198

name: str

199

model: str

200

modified_at: str

201

size: int

202

digest: str

203

details: Details = None

204

expires_at: str = None

205

size_vram: int = None

206

207

class Details:

208

"""Detailed model information."""

209

parent_model: str = None

210

format: str = None

211

family: str = None

212

families: list[str] = None

213

parameter_size: str = None

214

quantization_level: str = None

215

216

class ShowResponse:

217

"""Response from show requests."""

218

modified_at: str = None

219

template: str = None

220

modelfile: str = None

221

license: str = None

222

details: Details = None

223

modelinfo: dict = None

224

parameters: str = None

225

capabilities: Capabilities = None

226

227

class Details:

228

"""Detailed model specifications."""

229

parent_model: str = None

230

format: str = None

231

family: str = None

232

families: list[str] = None

233

parameter_size: str = None

234

quantization_level: str = None

235

236

class Capabilities:

237

"""Model capability information."""

238

completion: bool = None

239

chat: bool = None

240

embedding: bool = None

241

vision: bool = None

242

tools: bool = None

243

244

class ProcessResponse:

245

"""Response from ps (process status) requests."""

246

models: list[Model]

247

248

class Model:

249

"""Running model information."""

250

name: str

251

model: str

252

size: int

253

size_vram: int

254

expires_at: str

255

digest: str

256

```

257

258

### Exception Models

259

260

Exception classes for error handling and status reporting.

261

262

```python { .api }

263

class RequestError(Exception):

264

"""Exception for request-related errors."""

265

error: str

266

267

def __init__(self, error: str):

268

"""

269

Create a RequestError.

270

271

Parameters:

272

- error: Error description

273

"""

274

self.error = error

275

super().__init__(error)

276

277

class ResponseError(Exception):

278

"""Exception for response-related errors."""

279

error: str

280

status_code: int

281

282

def __init__(self, error: str, status_code: int = -1):

283

"""

284

Create a ResponseError.

285

286

Parameters:

287

- error: Error description

288

- status_code: HTTP status code

289

"""

290

self.error = error

291

self.status_code = status_code

292

super().__init__(error)

293

```

294

295

## Usage Examples

296

297

### Working with Messages

298

299

```python

300

from ollama import Message, Image

301

302

# Text message

303

message = Message(

304

role='user',

305

content='Hello, how are you?'

306

)

307

308

# Message with images (multimodal)

309

message = Message(

310

role='user',

311

content='What do you see in this image?',

312

images=[Image(value='path/to/image.jpg')]

313

)

314

315

# Assistant message with tool calls

316

message = Message(

317

role='assistant',

318

content='I need to get the weather information.',

319

tool_calls=[

320

Message.ToolCall(

321

function=Message.ToolCall.Function(

322

name='get_weather',

323

arguments={'city': 'Paris'}

324

)

325

)

326

]

327

)

328

```

329

330

### Configuration with Options

331

332

```python

333

from ollama import Options

334

335

# Conservative generation

336

conservative_options = Options(

337

temperature=0.1,

338

top_p=0.9,

339

repeat_penalty=1.1,

340

stop=['</end>', '\n\n']

341

)

342

343

# Creative generation

344

creative_options = Options(

345

temperature=0.9,

346

top_p=0.95,

347

top_k=40,

348

repeat_penalty=1.0

349

)

350

351

# Performance optimization

352

performance_options = Options(

353

num_ctx=4096,

354

num_batch=512,

355

num_gpu=2,

356

f16_kv=True,

357

use_mmap=True

358

)

359

```

360

361

### Tool Definition

362

363

```python

364

from ollama import Tool

365

366

# Manual tool definition

367

weather_tool = Tool(

368

type='function',

369

function=Tool.Function(

370

name='get_weather',

371

description='Get current weather for a city',

372

parameters=Tool.Function.Parameters(

373

type='object',

374

properties={

375

'city': Tool.Function.Parameters.Property(

376

type='string',

377

description='City name'

378

),

379

'units': Tool.Function.Parameters.Property(

380

type='string',

381

description='Temperature units',

382

enum=['celsius', 'fahrenheit']

383

)

384

},

385

required=['city']

386

)

387

)

388

)

389

390

# Automatic tool creation from function

391

from ollama._utils import convert_function_to_tool

392

393

def calculate_area(length: float, width: float) -> float:

394

"""Calculate the area of a rectangle."""

395

return length * width

396

397

area_tool = convert_function_to_tool(calculate_area)

398

```

399

400

### Response Handling

401

402

```python

403

import ollama

404

405

# Generate with response handling

406

response = ollama.generate(

407

model='llama3.2',

408

prompt='Tell me a joke'

409

)

410

411

# Access response data

412

print(f"Model: {response['model']}")

413

print(f"Response: {response['response']}")

414

print(f"Tokens: {response.get('eval_count', 'Unknown')}")

415

416

if response['done']:

417

duration = response.get('total_duration', 0) / 1_000_000 # Convert to ms

418

print(f"Duration: {duration:.2f}ms")

419

420

# Chat response handling

421

chat_response = ollama.chat(

422

model='llama3.2',

423

messages=[{'role': 'user', 'content': 'Hello!'}]

424

)

425

426

message = chat_response['message']

427

print(f"Role: {message['role']}")

428

print(f"Content: {message['content']}")

429

430

# Handle tool calls if present

431

if message.get('tool_calls'):

432

for tool_call in message['tool_calls']:

433

func = tool_call['function']

434

print(f"Tool: {func['name']}")

435

print(f"Args: {func['arguments']}")

436

```

437

438

### Error Handling

439

440

```python

441

import ollama

442

from ollama import RequestError, ResponseError

443

444

try:

445

response = ollama.generate(

446

model='nonexistent-model',

447

prompt='Hello'

448

)

449

except RequestError as e:

450

print(f"Request error: {e.error}")

451

except ResponseError as e:

452

print(f"Response error: {e.error} (status: {e.status_code})")

453

except Exception as e:

454

print(f"Unexpected error: {e}")

455

```

456

457

### Model Information Processing

458

459

```python

460

import ollama

461

462

# List and process models

463

models_response = ollama.list()

464

for model in models_response['models']:

465

size_gb = model['size'] / (1024**3)

466

print(f"{model['name']}: {size_gb:.1f}GB")

467

468

if model.get('details'):

469

details = model['details']

470

print(f" Family: {details.get('family', 'Unknown')}")

471

print(f" Parameters: {details.get('parameter_size', 'Unknown')}")

472

473

# Show detailed model info

474

model_info = ollama.show('llama3.2')

475

if model_info.get('capabilities'):

476

caps = model_info['capabilities']

477

features = []

478

if caps.get('chat'): features.append('chat')

479

if caps.get('embedding'): features.append('embeddings')

480

if caps.get('vision'): features.append('vision')

481

if caps.get('tools'): features.append('tools')

482

print(f"Capabilities: {', '.join(features)}")

483

484

# Process running models

485

running = ollama.ps()

486

total_vram = sum(model['size_vram'] for model in running['models'])

487

print(f"Total VRAM usage: {total_vram / (1024**3):.1f}GB")

488

```

489

490

### Embedding Analysis

491

492

```python

493

import ollama

494

import numpy as np

495

496

# Generate embeddings

497

texts = ["Hello world", "Goodbye world", "Machine learning"]

498

responses = []

499

500

for text in texts:

501

response = ollama.embed(

502

model='nomic-embed-text',

503

input=text

504

)

505

responses.append(response)

506

507

# Work with embedding vectors

508

embeddings = [resp['embeddings'][0] for resp in responses]

509

510

# Calculate cosine similarities

511

def cosine_similarity(a, b):

512

return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))

513

514

for i, text1 in enumerate(texts):

515

for j, text2 in enumerate(texts[i+1:], i+1):

516

sim = cosine_similarity(embeddings[i], embeddings[j])

517

print(f"{text1} <-> {text2}: {sim:.3f}")

518

```