or run

npx @tessl/cli init
Log in

Version

Tile

Overview

Evals

Files

Files

docs

core-resources.mdhelm-integration.mdindex.mdkustomize-integration.mdnetworking-resources.mdprovider-configuration.mdrbac-resources.mdstorage-resources.mdworkload-resources.mdyaml-deployment.md

workload-resources.mddocs/

0

# Workload Resources (apps/v1)

1

2

The Apps API group provides high-level controllers for managing application deployments, scaling, and lifecycle operations. These resources offer declarative management of containerized applications with advanced deployment strategies.

3

4

## Package Import

5

6

```typescript { .api }

7

import { apps } from "@pulumi/kubernetes";

8

import * as k8s from "@pulumi/kubernetes";

9

10

// Direct apps imports

11

import { Deployment, StatefulSet, DaemonSet, ReplicaSet } from "@pulumi/kubernetes/apps/v1";

12

```

13

14

## Deployment

15

16

Deployment provides declarative updates for Pods and ReplicaSets with advanced deployment strategies like rolling updates and rollbacks.

17

18

```typescript { .api }

19

class Deployment extends pulumi.CustomResource {

20

constructor(name: string, args?: DeploymentArgs, opts?: pulumi.CustomResourceOptions)

21

22

public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): Deployment

23

24

// Output properties

25

public readonly apiVersion!: pulumi.Output<"apps/v1">;

26

public readonly kind!: pulumi.Output<"Deployment">;

27

public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;

28

public readonly spec!: pulumi.Output<outputs.apps.v1.DeploymentSpec>;

29

public readonly status!: pulumi.Output<outputs.apps.v1.DeploymentStatus>;

30

}

31

32

interface DeploymentArgs {

33

apiVersion?: pulumi.Input<"apps/v1">;

34

kind?: pulumi.Input<"Deployment">;

35

metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;

36

spec?: pulumi.Input<inputs.apps.v1.DeploymentSpec>;

37

}

38

```

39

40

### Deployment Lifecycle Management

41

42

The Deployment resource waits for the following conditions before marking as ready:

43

1. The Deployment is available (Available condition is true)

44

2. The desired number of replicas are updated and available

45

3. No old ReplicaSets are running

46

47

Default timeout is 10 minutes, configurable via `customTimeouts`.

48

49

### Deployment Strategies

50

51

```typescript { .api }

52

// Deployment strategy types

53

interface DeploymentStrategy {

54

type?: "Recreate" | "RollingUpdate";

55

rollingUpdate?: {

56

maxSurge?: pulumi.Input<number | string>;

57

maxUnavailable?: pulumi.Input<number | string>;

58

};

59

}

60

```

61

62

### Deployment Usage Examples

63

64

```typescript { .api }

65

// Basic deployment with rolling updates

66

const webDeployment = new k8s.apps.v1.Deployment("web-app", {

67

spec: {

68

replicas: 3,

69

selector: {

70

matchLabels: {

71

app: "web-app",

72

},

73

},

74

template: {

75

metadata: {

76

labels: {

77

app: "web-app",

78

version: "v1.0",

79

},

80

},

81

spec: {

82

containers: [{

83

name: "web-server",

84

image: "nginx:1.21",

85

ports: [{

86

containerPort: 80,

87

}],

88

resources: {

89

requests: {

90

cpu: "100m",

91

memory: "128Mi",

92

},

93

limits: {

94

cpu: "500m",

95

memory: "512Mi",

96

},

97

},

98

}],

99

},

100

},

101

strategy: {

102

type: "RollingUpdate",

103

rollingUpdate: {

104

maxSurge: 1,

105

maxUnavailable: 1,

106

},

107

},

108

},

109

});

110

111

// Deployment with advanced configuration

112

const apiDeployment = new k8s.apps.v1.Deployment("api-server", {

113

spec: {

114

replicas: 5,

115

selector: {

116

matchLabels: {

117

app: "api-server",

118

tier: "backend",

119

},

120

},

121

template: {

122

metadata: {

123

labels: {

124

app: "api-server",

125

tier: "backend",

126

version: "v2.1",

127

},

128

annotations: {

129

"prometheus.io/scrape": "true",

130

"prometheus.io/port": "8080",

131

},

132

},

133

spec: {

134

containers: [{

135

name: "api",

136

image: "myapp/api:v2.1.0",

137

ports: [{

138

containerPort: 8080,

139

name: "http",

140

}, {

141

containerPort: 9090,

142

name: "metrics",

143

}],

144

env: [{

145

name: "DATABASE_URL",

146

valueFrom: {

147

secretKeyRef: {

148

name: "db-credentials",

149

key: "url",

150

},

151

},

152

}, {

153

name: "REDIS_HOST",

154

valueFrom: {

155

configMapKeyRef: {

156

name: "app-config",

157

key: "redis.host",

158

},

159

},

160

}],

161

livenessProbe: {

162

httpGet: {

163

path: "/health",

164

port: 8080,

165

},

166

initialDelaySeconds: 30,

167

periodSeconds: 10,

168

},

169

readinessProbe: {

170

httpGet: {

171

path: "/ready",

172

port: 8080,

173

},

174

initialDelaySeconds: 5,

175

periodSeconds: 5,

176

},

177

}],

178

serviceAccountName: "api-service-account",

179

},

180

},

181

strategy: {

182

type: "RollingUpdate",

183

rollingUpdate: {

184

maxSurge: "25%",

185

maxUnavailable: "25%",

186

},

187

},

188

},

189

});

190

191

// Blue-Green deployment using Recreate strategy

192

const blueGreenDeployment = new k8s.apps.v1.Deployment("blue-green-app", {

193

spec: {

194

replicas: 3,

195

selector: {

196

matchLabels: {

197

app: "blue-green-app",

198

},

199

},

200

template: {

201

metadata: {

202

labels: {

203

app: "blue-green-app",

204

},

205

},

206

spec: {

207

containers: [{

208

name: "app",

209

image: "myapp:blue",

210

ports: [{

211

containerPort: 8080,

212

}],

213

}],

214

},

215

},

216

strategy: {

217

type: "Recreate", // All pods terminated before new ones created

218

},

219

},

220

});

221

```

222

223

## StatefulSet

224

225

StatefulSet manages the deployment and scaling of a set of Pods with persistent identity and storage.

226

227

```typescript { .api }

228

class StatefulSet extends pulumi.CustomResource {

229

constructor(name: string, args?: StatefulSetArgs, opts?: pulumi.CustomResourceOptions)

230

231

public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): StatefulSet

232

233

// Output properties

234

public readonly apiVersion!: pulumi.Output<"apps/v1">;

235

public readonly kind!: pulumi.Output<"StatefulSet">;

236

public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;

237

public readonly spec!: pulumi.Output<outputs.apps.v1.StatefulSetSpec>;

238

public readonly status!: pulumi.Output<outputs.apps.v1.StatefulSetStatus>;

239

}

240

241

interface StatefulSetArgs {

242

apiVersion?: pulumi.Input<"apps/v1">;

243

kind?: pulumi.Input<"StatefulSet">;

244

metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;

245

spec?: pulumi.Input<inputs.apps.v1.StatefulSetSpec>;

246

}

247

```

248

249

### StatefulSet Features

250

251

- **Ordered Deployment**: Pods are created sequentially (web-0, web-1, web-2)

252

- **Stable Network Identity**: Each pod gets a predictable hostname

253

- **Persistent Storage**: Each pod can have its own persistent volume

254

- **Ordered Termination**: Pods are terminated in reverse order

255

256

### StatefulSet Usage Examples

257

258

```typescript { .api }

259

// Database cluster StatefulSet

260

const database = new k8s.apps.v1.StatefulSet("postgres-cluster", {

261

spec: {

262

serviceName: "postgres-headless", // Headless service for stable DNS

263

replicas: 3,

264

selector: {

265

matchLabels: {

266

app: "postgres",

267

},

268

},

269

template: {

270

metadata: {

271

labels: {

272

app: "postgres",

273

},

274

},

275

spec: {

276

containers: [{

277

name: "postgres",

278

image: "postgres:13",

279

env: [{

280

name: "POSTGRES_DB",

281

value: "mydb",

282

}, {

283

name: "POSTGRES_USER",

284

valueFrom: {

285

secretKeyRef: {

286

name: "postgres-secret",

287

key: "username",

288

},

289

},

290

}, {

291

name: "POSTGRES_PASSWORD",

292

valueFrom: {

293

secretKeyRef: {

294

name: "postgres-secret",

295

key: "password",

296

},

297

},

298

}],

299

ports: [{

300

containerPort: 5432,

301

name: "postgres",

302

}],

303

volumeMounts: [{

304

name: "postgres-storage",

305

mountPath: "/var/lib/postgresql/data",

306

}],

307

}],

308

},

309

},

310

volumeClaimTemplates: [{

311

metadata: {

312

name: "postgres-storage",

313

},

314

spec: {

315

accessModes: ["ReadWriteOnce"],

316

storageClassName: "fast-ssd",

317

resources: {

318

requests: {

319

storage: "20Gi",

320

},

321

},

322

},

323

}],

324

podManagementPolicy: "OrderedReady", // or "Parallel"

325

updateStrategy: {

326

type: "RollingUpdate",

327

rollingUpdate: {

328

partition: 0, // Update all pods

329

},

330

},

331

},

332

});

333

334

// Redis cluster StatefulSet

335

const redisCluster = new k8s.apps.v1.StatefulSet("redis-cluster", {

336

spec: {

337

serviceName: "redis-cluster-headless",

338

replicas: 6, // 3 masters + 3 replicas

339

selector: {

340

matchLabels: {

341

app: "redis-cluster",

342

},

343

},

344

template: {

345

metadata: {

346

labels: {

347

app: "redis-cluster",

348

},

349

},

350

spec: {

351

containers: [{

352

name: "redis",

353

image: "redis:7-alpine",

354

command: ["redis-server"],

355

args: [

356

"/etc/redis/redis.conf",

357

"--cluster-enabled", "yes",

358

"--cluster-config-file", "/data/nodes.conf",

359

"--cluster-node-timeout", "5000",

360

"--appendonly", "yes",

361

],

362

ports: [{

363

containerPort: 6379,

364

name: "redis",

365

}, {

366

containerPort: 16379,

367

name: "cluster",

368

}],

369

volumeMounts: [{

370

name: "redis-data",

371

mountPath: "/data",

372

}, {

373

name: "redis-config",

374

mountPath: "/etc/redis",

375

}],

376

}],

377

volumes: [{

378

name: "redis-config",

379

configMap: {

380

name: "redis-config",

381

},

382

}],

383

},

384

},

385

volumeClaimTemplates: [{

386

metadata: {

387

name: "redis-data",

388

},

389

spec: {

390

accessModes: ["ReadWriteOnce"],

391

resources: {

392

requests: {

393

storage: "10Gi",

394

},

395

},

396

},

397

}],

398

},

399

});

400

```

401

402

## DaemonSet

403

404

DaemonSet ensures that all (or some) Nodes run a copy of a Pod. Typically used for cluster-level services.

405

406

```typescript { .api }

407

class DaemonSet extends pulumi.CustomResource {

408

constructor(name: string, args?: DaemonSetArgs, opts?: pulumi.CustomResourceOptions)

409

410

public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): DaemonSet

411

412

// Output properties

413

public readonly apiVersion!: pulumi.Output<"apps/v1">;

414

public readonly kind!: pulumi.Output<"DaemonSet">;

415

public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;

416

public readonly spec!: pulumi.Output<outputs.apps.v1.DaemonSetSpec>;

417

public readonly status!: pulumi.Output<outputs.apps.v1.DaemonSetStatus>;

418

}

419

420

interface DaemonSetArgs {

421

apiVersion?: pulumi.Input<"apps/v1">;

422

kind?: pulumi.Input<"DaemonSet">;

423

metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;

424

spec?: pulumi.Input<inputs.apps.v1.DaemonSetSpec>;

425

}

426

```

427

428

### DaemonSet Usage Examples

429

430

```typescript { .api }

431

// Logging agent DaemonSet

432

const loggingAgent = new k8s.apps.v1.DaemonSet("fluentd-logger", {

433

spec: {

434

selector: {

435

matchLabels: {

436

app: "fluentd-logger",

437

},

438

},

439

template: {

440

metadata: {

441

labels: {

442

app: "fluentd-logger",

443

},

444

},

445

spec: {

446

serviceAccount: "fluentd",

447

tolerations: [

448

// Allow scheduling on master nodes

449

{

450

key: "node-role.kubernetes.io/master",

451

effect: "NoSchedule",

452

},

453

// Allow scheduling on nodes with any taint

454

{

455

operator: "Exists",

456

effect: "NoSchedule",

457

},

458

],

459

containers: [{

460

name: "fluentd",

461

image: "fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch",

462

env: [{

463

name: "FLUENT_ELASTICSEARCH_HOST",

464

value: "elasticsearch.logging.svc.cluster.local",

465

}, {

466

name: "FLUENT_ELASTICSEARCH_PORT",

467

value: "9200",

468

}],

469

resources: {

470

requests: {

471

cpu: "100m",

472

memory: "200Mi",

473

},

474

limits: {

475

cpu: "200m",

476

memory: "400Mi",

477

},

478

},

479

volumeMounts: [{

480

name: "varlog",

481

mountPath: "/var/log",

482

readOnly: true,

483

}, {

484

name: "dockercontainers",

485

mountPath: "/var/lib/docker/containers",

486

readOnly: true,

487

}],

488

}],

489

volumes: [{

490

name: "varlog",

491

hostPath: {

492

path: "/var/log",

493

},

494

}, {

495

name: "dockercontainers",

496

hostPath: {

497

path: "/var/lib/docker/containers",

498

},

499

}],

500

hostNetwork: true, // Use host networking

501

dnsPolicy: "ClusterFirstWithHostNet",

502

},

503

},

504

updateStrategy: {

505

type: "RollingUpdate",

506

rollingUpdate: {

507

maxUnavailable: 1,

508

},

509

},

510

},

511

});

512

513

// Monitoring agent DaemonSet with node selector

514

const nodeExporter = new k8s.apps.v1.DaemonSet("node-exporter", {

515

spec: {

516

selector: {

517

matchLabels: {

518

app: "node-exporter",

519

},

520

},

521

template: {

522

metadata: {

523

labels: {

524

app: "node-exporter",

525

},

526

annotations: {

527

"prometheus.io/scrape": "true",

528

"prometheus.io/port": "9100",

529

},

530

},

531

spec: {

532

nodeSelector: {

533

"kubernetes.io/os": "linux", // Only run on Linux nodes

534

},

535

containers: [{

536

name: "node-exporter",

537

image: "prom/node-exporter:v1.3.1",

538

args: [

539

"--path.procfs=/host/proc",

540

"--path.sysfs=/host/sys",

541

"--path.rootfs=/host/root",

542

"--collector.filesystem.mount-points-exclude",

543

"^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)",

544

],

545

ports: [{

546

name: "metrics",

547

containerPort: 9100,

548

hostPort: 9100,

549

}],

550

volumeMounts: [{

551

name: "proc",

552

mountPath: "/host/proc",

553

readOnly: true,

554

}, {

555

name: "sys",

556

mountPath: "/host/sys",

557

readOnly: true,

558

}, {

559

name: "root",

560

mountPath: "/host/root",

561

readOnly: true,

562

}],

563

}],

564

volumes: [{

565

name: "proc",

566

hostPath: {

567

path: "/proc",

568

},

569

}, {

570

name: "sys",

571

hostPath: {

572

path: "/sys",

573

},

574

}, {

575

name: "root",

576

hostPath: {

577

path: "/",

578

},

579

}],

580

hostNetwork: true,

581

hostPID: true,

582

},

583

},

584

},

585

});

586

```

587

588

## ReplicaSet

589

590

ReplicaSet maintains a stable set of replica Pods running at any given time. Usually managed by Deployments.

591

592

```typescript { .api }

593

class ReplicaSet extends pulumi.CustomResource {

594

constructor(name: string, args?: ReplicaSetArgs, opts?: pulumi.CustomResourceOptions)

595

596

public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): ReplicaSet

597

598

// Output properties

599

public readonly apiVersion!: pulumi.Output<"apps/v1">;

600

public readonly kind!: pulumi.Output<"ReplicaSet">;

601

public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;

602

public readonly spec!: pulumi.Output<outputs.apps.v1.ReplicaSetSpec>;

603

public readonly status!: pulumi.Output<outputs.apps.v1.ReplicaSetStatus>;

604

}

605

606

interface ReplicaSetArgs {

607

apiVersion?: pulumi.Input<"apps/v1">;

608

kind?: pulumi.Input<"ReplicaSet">;

609

metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;

610

spec?: pulumi.Input<inputs.apps.v1.ReplicaSetSpec>;

611

}

612

```

613

614

### ReplicaSet Usage Examples

615

616

```typescript { .api }

617

// Standalone ReplicaSet (rarely used directly)

618

const webReplicaSet = new k8s.apps.v1.ReplicaSet("web-rs", {

619

spec: {

620

replicas: 3,

621

selector: {

622

matchLabels: {

623

app: "web",

624

tier: "frontend",

625

},

626

},

627

template: {

628

metadata: {

629

labels: {

630

app: "web",

631

tier: "frontend",

632

},

633

},

634

spec: {

635

containers: [{

636

name: "nginx",

637

image: "nginx:1.21",

638

ports: [{

639

containerPort: 80,

640

}],

641

}],

642

},

643

},

644

},

645

});

646

```

647

648

## Batch Workloads (batch/v1)

649

650

### Job

651

652

Job creates one or more Pods and ensures that a specified number of them successfully terminate.

653

654

```typescript { .api }

655

class Job extends pulumi.CustomResource {

656

constructor(name: string, args?: JobArgs, opts?: pulumi.CustomResourceOptions)

657

658

public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): Job

659

660

// Output properties

661

public readonly apiVersion!: pulumi.Output<"batch/v1">;

662

public readonly kind!: pulumi.Output<"Job">;

663

public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;

664

public readonly spec!: pulumi.Output<outputs.batch.v1.JobSpec>;

665

public readonly status!: pulumi.Output<outputs.batch.v1.JobStatus>;

666

}

667

668

interface JobArgs {

669

apiVersion?: pulumi.Input<"batch/v1">;

670

kind?: pulumi.Input<"Job">;

671

metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;

672

spec?: pulumi.Input<inputs.batch.v1.JobSpec>;

673

}

674

```

675

676

### Job Usage Examples

677

678

```typescript { .api }

679

// Database migration job

680

const migrationJob = new k8s.batch.v1.Job("db-migration", {

681

spec: {

682

template: {

683

spec: {

684

containers: [{

685

name: "migrate",

686

image: "myapp/migration:v1.0",

687

command: ["./migrate"],

688

args: ["--up"],

689

env: [{

690

name: "DATABASE_URL",

691

valueFrom: {

692

secretKeyRef: {

693

name: "db-credentials",

694

key: "url",

695

},

696

},

697

}],

698

}],

699

restartPolicy: "OnFailure",

700

},

701

},

702

backoffLimit: 3, // Retry up to 3 times

703

completions: 1, // Only need 1 successful completion

704

parallelism: 1, // Run 1 pod at a time

705

},

706

});

707

708

// Parallel processing job

709

const dataProcessingJob = new k8s.batch.v1.Job("data-processing", {

710

spec: {

711

template: {

712

spec: {

713

containers: [{

714

name: "processor",

715

image: "myapp/data-processor:v2.1",

716

env: [{

717

name: "WORKER_ID",

718

valueFrom: {

719

fieldRef: {

720

fieldPath: "metadata.name",

721

},

722

},

723

}],

724

resources: {

725

requests: {

726

cpu: "500m",

727

memory: "1Gi",

728

},

729

},

730

}],

731

restartPolicy: "OnFailure",

732

},

733

},

734

completions: 10, // Need 10 successful completions

735

parallelism: 3, // Run up to 3 pods in parallel

736

backoffLimit: 6, // Allow retries

737

activeDeadlineSeconds: 3600, // Timeout after 1 hour

738

},

739

});

740

```

741

742

### CronJob

743

744

CronJob creates Jobs on a repeating schedule.

745

746

```typescript { .api }

747

class CronJob extends pulumi.CustomResource {

748

constructor(name: string, args?: CronJobArgs, opts?: pulumi.CustomResourceOptions)

749

750

public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): CronJob

751

752

// Output properties

753

public readonly apiVersion!: pulumi.Output<"batch/v1">;

754

public readonly kind!: pulumi.Output<"CronJob">;

755

public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;

756

public readonly spec!: pulumi.Output<outputs.batch.v1.CronJobSpec>;

757

public readonly status!: pulumi.Output<outputs.batch.v1.CronJobStatus>;

758

}

759

760

interface CronJobArgs {

761

apiVersion?: pulumi.Input<"batch/v1">;

762

kind?: pulumi.Input<"CronJob">;

763

metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;

764

spec?: pulumi.Input<inputs.batch.v1.CronJobSpec>;

765

}

766

```

767

768

### CronJob Usage Examples

769

770

```typescript { .api }

771

// Daily backup job

772

const backupCronJob = new k8s.batch.v1.CronJob("daily-backup", {

773

spec: {

774

schedule: "0 2 * * *", // Daily at 2 AM

775

jobTemplate: {

776

spec: {

777

template: {

778

spec: {

779

containers: [{

780

name: "backup",

781

image: "myapp/backup:latest",

782

command: ["./backup.sh"],

783

env: [{

784

name: "BACKUP_TARGET",

785

value: "s3://my-backups/daily",

786

}, {

787

name: "AWS_ACCESS_KEY_ID",

788

valueFrom: {

789

secretKeyRef: {

790

name: "aws-credentials",

791

key: "access-key-id",

792

},

793

},

794

}],

795

volumeMounts: [{

796

name: "data",

797

mountPath: "/data",

798

readOnly: true,

799

}],

800

}],

801

volumes: [{

802

name: "data",

803

persistentVolumeClaim: {

804

claimName: "app-data",

805

},

806

}],

807

restartPolicy: "OnFailure",

808

},

809

},

810

},

811

},

812

successfulJobsHistoryLimit: 3,

813

failedJobsHistoryLimit: 1,

814

concurrencyPolicy: "Forbid", // Don't allow concurrent jobs

815

},

816

});

817

818

// Log cleanup job

819

const logCleanupJob = new k8s.batch.v1.CronJob("log-cleanup", {

820

spec: {

821

schedule: "0 0 * * 0", // Weekly on Sunday at midnight

822

jobTemplate: {

823

spec: {

824

template: {

825

spec: {

826

containers: [{

827

name: "cleanup",

828

image: "busybox",

829

command: ["sh"],

830

args: ["-c", "find /logs -name '*.log' -mtime +7 -delete"],

831

volumeMounts: [{

832

name: "logs",

833

mountPath: "/logs",

834

}],

835

}],

836

volumes: [{

837

name: "logs",

838

hostPath: {

839

path: "/var/log/app",

840

},

841

}],

842

restartPolicy: "OnFailure",

843

},

844

},

845

},

846

},

847

concurrencyPolicy: "Replace", // Replace if previous job still running

848

},

849

});

850

```

851

852

## Advanced Workload Patterns

853

854

### Multi-Container Pods

855

856

```typescript { .api }

857

// Sidecar pattern with main app and logging agent

858

const sidecarDeployment = new k8s.apps.v1.Deployment("sidecar-app", {

859

spec: {

860

replicas: 2,

861

selector: {

862

matchLabels: {

863

app: "sidecar-app",

864

},

865

},

866

template: {

867

metadata: {

868

labels: {

869

app: "sidecar-app",

870

},

871

},

872

spec: {

873

containers: [

874

// Main application container

875

{

876

name: "app",

877

image: "myapp:v1.0",

878

ports: [{

879

containerPort: 8080,

880

}],

881

volumeMounts: [{

882

name: "shared-logs",

883

mountPath: "/var/log/app",

884

}],

885

},

886

// Sidecar logging container

887

{

888

name: "log-shipper",

889

image: "fluent/fluent-bit:1.8",

890

volumeMounts: [{

891

name: "shared-logs",

892

mountPath: "/var/log/app",

893

readOnly: true,

894

}, {

895

name: "fluent-config",

896

mountPath: "/fluent-bit/etc",

897

}],

898

},

899

],

900

volumes: [{

901

name: "shared-logs",

902

emptyDir: {},

903

}, {

904

name: "fluent-config",

905

configMap: {

906

name: "fluent-config",

907

},

908

}],

909

},

910

},

911

},

912

});

913

```

914

915

## Resource Variants

916

917

All workload resources include the following variants:

918

919

### List Resources

920

- `DeploymentList`, `StatefulSetList`, `DaemonSetList`, `ReplicaSetList`, `JobList`, `CronJobList`

921

922

### Patch Resources

923

- `DeploymentPatch`, `StatefulSetPatch`, `DaemonSetPatch`, `ReplicaSetPatch`, `JobPatch`, `CronJobPatch`

924

925

```typescript { .api }

926

// Example patch operation for deployment

927

const deploymentPatch = new k8s.apps.v1.DeploymentPatch("update-deployment", {

928

metadata: {

929

name: "existing-deployment",

930

},

931

spec: {

932

replicas: 5, // Scale to 5 replicas

933

template: {

934

spec: {

935

containers: [{

936

name: "app",

937

image: "myapp:v2.0", // Update image

938

}],

939

},

940

},

941

},

942

});

943

```

944

945

The Apps API group provides powerful declarative workload management with intelligent lifecycle handling, making it easy to deploy, scale, and manage containerized applications with confidence.