or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

async-operations.mdclient-operations.mdcontainer-operations.mddatabase-operations.mdindex.mdscript-operations.mduser-management.md

container-operations.mddocs/

0

# Container Operations

1

2

Container-level operations for managing JSON documents, including CRUD operations, querying, batch processing, change feed monitoring, and conflict resolution. The ContainerProxy provides the interface for all document and container-scoped operations.

3

4

## Capabilities

5

6

### Container Information

7

8

Read container properties and configuration.

9

10

```python { .api }

11

def read(self, populate_query_metrics: bool = None, **kwargs):

12

"""

13

Read container properties and configuration.

14

15

Parameters:

16

- populate_query_metrics: Include query metrics in response

17

- session_token: Session token for consistency

18

19

Returns:

20

Container properties as dictionary

21

"""

22

23

@property

24

def is_system_key(self) -> bool:

25

"""

26

Check if container uses system-generated partition key.

27

28

Returns:

29

True if system partition key, False otherwise

30

"""

31

32

@property

33

def scripts(self) -> ScriptsProxy:

34

"""

35

Get scripts proxy for stored procedures, triggers, and UDFs.

36

37

Returns:

38

ScriptsProxy instance for this container

39

"""

40

```

41

42

### Item Operations

43

44

Create, read, update, and delete JSON documents within the container.

45

46

```python { .api }

47

def create_item(self, body: dict, populate_query_metrics: bool = None, pre_trigger_include: str = None, post_trigger_include: str = None, **kwargs):

48

"""

49

Create a new item in the container.

50

51

Parameters:

52

- body: Item data as dictionary (must include 'id' field)

53

- populate_query_metrics: Include query metrics

54

- pre_trigger_include: Pre-trigger to execute

55

- post_trigger_include: Post-trigger to execute

56

- enable_automatic_id_generation: Auto-generate ID if missing

57

- session_token: Session token for consistency

58

- initial_headers: Custom headers

59

60

Returns:

61

Created item as dictionary

62

63

Raises:

64

CosmosResourceExistsError: If item with same ID already exists

65

"""

66

67

def read_item(self, item: str, partition_key: str, populate_query_metrics: bool = None, **kwargs):

68

"""

69

Read a specific item by ID and partition key.

70

71

Parameters:

72

- item: Item ID

73

- partition_key: Partition key value

74

- populate_query_metrics: Include query metrics

75

- session_token: Session token for consistency

76

77

Returns:

78

Item as dictionary

79

80

Raises:

81

CosmosResourceNotFoundError: If item doesn't exist

82

"""

83

84

def upsert_item(self, body: dict, populate_query_metrics: bool = None, pre_trigger_include: str = None, post_trigger_include: str = None, **kwargs):

85

"""

86

Create or replace an item.

87

88

Parameters:

89

- body: Item data as dictionary (must include 'id' field)

90

- populate_query_metrics: Include query metrics

91

- pre_trigger_include: Pre-trigger to execute

92

- post_trigger_include: Post-trigger to execute

93

- enable_automatic_id_generation: Auto-generate ID if missing

94

- session_token: Session token for consistency

95

- initial_headers: Custom headers

96

97

Returns:

98

Upserted item as dictionary

99

"""

100

101

def replace_item(self, item: str, body: dict, populate_query_metrics: bool = None, pre_trigger_include: str = None, post_trigger_include: str = None, **kwargs):

102

"""

103

Replace an existing item.

104

105

Parameters:

106

- item: Item ID or item dictionary with 'id' field

107

- body: Updated item data

108

- populate_query_metrics: Include query metrics

109

- pre_trigger_include: Pre-trigger to execute

110

- post_trigger_include: Post-trigger to execute

111

- session_token: Session token for consistency

112

- etag: ETag for conditional operations

113

- match_condition: Match condition for conditional operations

114

115

Returns:

116

Replaced item as dictionary

117

118

Raises:

119

CosmosResourceNotFoundError: If item doesn't exist

120

CosmosAccessConditionFailedError: If conditional operation fails

121

"""

122

123

def patch_item(self, item: str, partition_key: str, patch_operations: list, filter_predicate: str = None, pre_trigger_include: str = None, post_trigger_include: str = None, **kwargs):

124

"""

125

Patch an item with specific operations.

126

127

Parameters:

128

- item: Item ID

129

- partition_key: Partition key value

130

- patch_operations: List of patch operations

131

- filter_predicate: Filter condition for patch

132

- pre_trigger_include: Pre-trigger to execute

133

- post_trigger_include: Post-trigger to execute

134

- session_token: Session token for consistency

135

- etag: ETag for conditional operations

136

- match_condition: Match condition for conditional operations

137

138

Returns:

139

Patched item as dictionary

140

"""

141

142

def delete_item(self, item: str, partition_key: str, populate_query_metrics: bool = None, pre_trigger_include: str = None, post_trigger_include: str = None, **kwargs):

143

"""

144

Delete an item.

145

146

Parameters:

147

- item: Item ID or item dictionary with 'id' field

148

- partition_key: Partition key value

149

- populate_query_metrics: Include query metrics

150

- pre_trigger_include: Pre-trigger to execute

151

- post_trigger_include: Post-trigger to execute

152

- session_token: Session token for consistency

153

- etag: ETag for conditional operations

154

- match_condition: Match condition for conditional operations

155

156

Raises:

157

CosmosResourceNotFoundError: If item doesn't exist

158

CosmosAccessConditionFailedError: If conditional operation fails

159

"""

160

161

def delete_all_items_by_partition_key(self, partition_key: str, **kwargs):

162

"""

163

Delete all items in a specific partition.

164

165

Parameters:

166

- partition_key: Partition key value

167

- session_token: Session token for consistency

168

169

Returns:

170

CosmosList with deletion results

171

"""

172

```

173

174

### Batch Operations

175

176

Execute multiple operations atomically within a single partition.

177

178

```python { .api }

179

def execute_item_batch(self, batch_operations: list, partition_key: str, **kwargs):

180

"""

181

Execute a batch of operations atomically within a partition.

182

183

Parameters:

184

- batch_operations: List of batch operation objects

185

- partition_key: Partition key value (all operations must use same partition)

186

- session_token: Session token for consistency

187

- enable_automatic_id_generation: Auto-generate IDs for create operations

188

189

Returns:

190

CosmosList with results for each operation

191

192

Raises:

193

CosmosBatchOperationError: If any operation in the batch fails

194

"""

195

```

196

197

### Querying Operations

198

199

Query items using SQL-like syntax with support for cross-partition queries.

200

201

```python { .api }

202

def query_items(self, query: str = None, parameters: list = None, partition_key: str = None, enable_cross_partition_query: bool = None, max_item_count: int = None, enable_scan_in_query: bool = None, populate_query_metrics: bool = None, **kwargs):

203

"""

204

Query items using SQL syntax.

205

206

Parameters:

207

- query: SQL query string

208

- parameters: Query parameters as [{"name": "@param", "value": value}]

209

- partition_key: Single partition key to query

210

- enable_cross_partition_query: Enable cross-partition queries

211

- max_item_count: Maximum items per page

212

- enable_scan_in_query: Enable scan operations

213

- populate_query_metrics: Include query metrics

214

- session_token: Session token for consistency

215

- initial_headers: Custom headers

216

- max_integrated_cache_staleness: Cache staleness tolerance

217

218

Returns:

219

Iterable of query results

220

"""

221

222

def read_all_items(self, max_item_count: int = None, populate_query_metrics: bool = None, **kwargs):

223

"""

224

Read all items in the container.

225

226

Parameters:

227

- max_item_count: Maximum items per page

228

- populate_query_metrics: Include query metrics

229

- session_token: Session token for consistency

230

- initial_headers: Custom headers

231

232

Returns:

233

Iterable of all items

234

"""

235

```

236

237

### Change Feed Operations

238

239

Monitor changes to items in the container using change feed.

240

241

```python { .api }

242

def query_items_change_feed(self, **kwargs):

243

"""

244

Query the change feed for item modifications.

245

246

Parameters:

247

- partition_key_range_id: Specific partition key range

248

- feed_range: Specific feed range

249

- is_start_from_beginning: Start from beginning of change feed

250

- continuation: Continuation token for pagination

251

- max_item_count: Maximum items per page

252

- start_time: Start time for change feed

253

- session_token: Session token for consistency

254

255

Returns:

256

Iterable of changed items with continuation token

257

"""

258

```

259

260

### Batch Operations

261

262

Execute multiple operations atomically within a single partition.

263

264

```python { .api }

265

def execute_item_batch(self, batch_operations: list, partition_key: str, **kwargs):

266

"""

267

Execute a batch of operations atomically.

268

269

Parameters:

270

- batch_operations: List of batch operation dictionaries

271

- partition_key: Partition key value (all operations must be in same partition)

272

- session_token: Session token for consistency

273

- initial_headers: Custom headers

274

275

Returns:

276

CosmosList with batch operation results

277

278

Raises:

279

CosmosBatchOperationError: If batch operation fails

280

"""

281

```

282

283

### Feed Range Operations

284

285

Work with feed ranges for parallel processing and fine-grained control.

286

287

```python { .api }

288

def read_feed_ranges(self, **kwargs):

289

"""

290

Get feed ranges for the container.

291

292

Parameters:

293

- session_token: Session token for consistency

294

295

Returns:

296

List of feed range dictionaries

297

"""

298

299

def feed_range_from_partition_key(self, partition_key: str):

300

"""

301

Get feed range containing the specified partition key.

302

303

Parameters:

304

- partition_key: Partition key value

305

306

Returns:

307

Feed range dictionary

308

"""

309

310

def is_feed_range_subset(self, parent_feed_range: dict, child_feed_range: dict) -> bool:

311

"""

312

Check if child feed range is subset of parent feed range.

313

314

Parameters:

315

- parent_feed_range: Parent feed range

316

- child_feed_range: Child feed range

317

318

Returns:

319

True if child is subset of parent

320

"""

321

```

322

323

### Session Token Management

324

325

Manage session tokens for session consistency.

326

327

```python { .api }

328

def get_latest_session_token(self, partition_key: str, **kwargs) -> str:

329

"""

330

Get the latest session token for a partition.

331

332

Parameters:

333

- partition_key: Partition key value

334

335

Returns:

336

Session token string

337

"""

338

```

339

340

### Conflict Resolution

341

342

Handle conflicts in multi-region scenarios.

343

344

```python { .api }

345

def list_conflicts(self, max_item_count: int = None, **kwargs):

346

"""

347

List conflicts in the container.

348

349

Parameters:

350

- max_item_count: Maximum conflicts to return

351

- session_token: Session token for consistency

352

353

Returns:

354

Iterable of conflict items

355

"""

356

357

def query_conflicts(self, query: str, parameters: list = None, max_item_count: int = None, **kwargs):

358

"""

359

Query conflicts using SQL syntax.

360

361

Parameters:

362

- query: SQL query string

363

- parameters: Query parameters

364

- max_item_count: Maximum items per page

365

- session_token: Session token for consistency

366

367

Returns:

368

Iterable of query results

369

"""

370

371

def get_conflict(self, conflict: str, partition_key: str, **kwargs):

372

"""

373

Get a specific conflict.

374

375

Parameters:

376

- conflict: Conflict ID

377

- partition_key: Partition key value

378

- session_token: Session token for consistency

379

380

Returns:

381

Conflict item

382

"""

383

384

def delete_conflict(self, conflict: str, **kwargs):

385

"""

386

Delete a conflict.

387

388

Parameters:

389

- conflict: Conflict ID or conflict dictionary

390

- session_token: Session token for consistency

391

- etag: ETag for conditional operations

392

- match_condition: Match condition for conditional operations

393

"""

394

```

395

396

### Throughput Management

397

398

Manage container-level throughput and auto-scaling.

399

400

```python { .api }

401

def get_throughput(self, **kwargs) -> ThroughputProperties:

402

"""

403

Get current throughput settings for the container.

404

405

Parameters:

406

- session_token: Session token for consistency

407

408

Returns:

409

ThroughputProperties with current throughput configuration

410

411

Raises:

412

CosmosResourceNotFoundError: If throughput not configured

413

"""

414

415

def replace_throughput(self, throughput: ThroughputProperties, **kwargs):

416

"""

417

Replace throughput settings for the container.

418

419

Parameters:

420

- throughput: New throughput configuration

421

- session_token: Session token for consistency

422

423

Returns:

424

ThroughputProperties with updated configuration

425

"""

426

427

def read_offer(self, **kwargs):

428

"""

429

Read throughput offer (deprecated, use get_throughput).

430

431

Returns:

432

Offer properties

433

"""

434

```

435

436

## Usage Examples

437

438

### Basic Item Operations

439

440

```python

441

# Get container client

442

container = database.get_container_client("Products")

443

444

# Create an item

445

product = {

446

"id": "product1",

447

"name": "Laptop",

448

"category": "Electronics",

449

"price": 999.99,

450

"inStock": True

451

}

452

453

created_item = container.create_item(body=product)

454

print(f"Created item: {created_item['id']}")

455

456

# Read an item

457

item = container.read_item(item="product1", partition_key="Electronics")

458

print(f"Item name: {item['name']}")

459

460

# Update an item

461

item["price"] = 899.99

462

updated_item = container.replace_item(item=item["id"], body=item)

463

464

# Upsert (create or replace)

465

new_product = {

466

"id": "product2",

467

"name": "Mouse",

468

"category": "Electronics",

469

"price": 29.99

470

}

471

container.upsert_item(body=new_product)

472

473

# Delete an item

474

container.delete_item(item="product1", partition_key="Electronics")

475

```

476

477

### Patch Operations

478

479

```python

480

# Patch operations for partial updates

481

patch_ops = [

482

{"op": "replace", "path": "/price", "value": 799.99},

483

{"op": "add", "path": "/tags", "value": ["sale", "featured"]},

484

{"op": "remove", "path": "/oldField"}

485

]

486

487

patched_item = container.patch_item(

488

item="product2",

489

partition_key="Electronics",

490

patch_operations=patch_ops

491

)

492

```

493

494

### Querying

495

496

```python

497

# Simple query

498

items = list(container.query_items(

499

query="SELECT * FROM c WHERE c.category = 'Electronics'",

500

enable_cross_partition_query=True

501

))

502

503

# Parameterized query

504

items = list(container.query_items(

505

query="SELECT * FROM c WHERE c.price BETWEEN @min AND @max",

506

parameters=[

507

{"name": "@min", "value": 100},

508

{"name": "@max", "value": 1000}

509

],

510

enable_cross_partition_query=True

511

))

512

513

# Query specific partition

514

items = list(container.query_items(

515

query="SELECT * FROM c WHERE c.inStock = true",

516

partition_key="Electronics"

517

))

518

519

# Read all items with pagination

520

for item in container.read_all_items(max_item_count=100):

521

print(f"Item: {item['id']}")

522

```

523

524

### Batch Operations

525

526

```python

527

# Batch operations within same partition

528

batch_operations = [

529

{

530

"operation_type": "create",

531

"id": "batch1",

532

"resource_body": {"id": "batch1", "category": "Electronics", "name": "Item 1"}

533

},

534

{

535

"operation_type": "create",

536

"id": "batch2",

537

"resource_body": {"id": "batch2", "category": "Electronics", "name": "Item 2"}

538

},

539

{

540

"operation_type": "delete",

541

"id": "old_item",

542

"partition_key": "Electronics"

543

}

544

]

545

546

try:

547

results = container.execute_item_batch(

548

batch_operations=batch_operations,

549

partition_key="Electronics"

550

)

551

print(f"Batch completed successfully: {len(results)} operations")

552

except CosmosBatchOperationError as e:

553

print(f"Batch failed: {e}")

554

```

555

556

### Change Feed

557

558

```python

559

# Read change feed from beginning

560

changes = container.query_items_change_feed(is_start_from_beginning=True)

561

562

for change in changes:

563

if "_lsn" in change: # Not end of changes

564

print(f"Changed item: {change.get('id', 'N/A')}")

565

566

# Read change feed with continuation

567

continuation = None

568

while True:

569

changes = container.query_items_change_feed(

570

continuation=continuation,

571

max_item_count=10

572

)

573

574

items = list(changes)

575

if not items:

576

break

577

578

for item in items:

579

print(f"Change: {item['id']}")

580

581

# Get continuation for next batch

582

continuation = changes.get_continuation()

583

```

584

585

### Feed Ranges

586

587

```python

588

# Get feed ranges for parallel processing

589

feed_ranges = container.read_feed_ranges()

590

print(f"Container has {len(feed_ranges)} feed ranges")

591

592

# Process each feed range in parallel

593

import concurrent.futures

594

595

def process_feed_range(feed_range):

596

changes = container.query_items_change_feed(

597

feed_range=feed_range,

598

is_start_from_beginning=True

599

)

600

return list(changes)

601

602

with concurrent.futures.ThreadPoolExecutor() as executor:

603

futures = [executor.submit(process_feed_range, fr) for fr in feed_ranges]

604

605

for future in concurrent.futures.as_completed(futures):

606

changes = future.result()

607

print(f"Processed {len(changes)} changes from feed range")

608

```