A comprehensive Pulumi resource provider for creating and managing Kubernetes resources and workloads in a running cluster
—
Quality
Pending
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
The Apps API group provides high-level controllers for managing application deployments, scaling, and lifecycle operations. These resources offer declarative management of containerized applications with advanced deployment strategies.
import { apps } from "@pulumi/kubernetes";
import * as k8s from "@pulumi/kubernetes";
// Direct apps imports
import { Deployment, StatefulSet, DaemonSet, ReplicaSet } from "@pulumi/kubernetes/apps/v1";Deployment provides declarative updates for Pods and ReplicaSets with advanced deployment strategies like rolling updates and rollbacks.
class Deployment extends pulumi.CustomResource {
constructor(name: string, args?: DeploymentArgs, opts?: pulumi.CustomResourceOptions)
public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): Deployment
// Output properties
public readonly apiVersion!: pulumi.Output<"apps/v1">;
public readonly kind!: pulumi.Output<"Deployment">;
public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;
public readonly spec!: pulumi.Output<outputs.apps.v1.DeploymentSpec>;
public readonly status!: pulumi.Output<outputs.apps.v1.DeploymentStatus>;
}
interface DeploymentArgs {
apiVersion?: pulumi.Input<"apps/v1">;
kind?: pulumi.Input<"Deployment">;
metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;
spec?: pulumi.Input<inputs.apps.v1.DeploymentSpec>;
}The Deployment resource waits for the following conditions before marking as ready:
Default timeout is 10 minutes, configurable via customTimeouts.
// Deployment strategy types
interface DeploymentStrategy {
type?: "Recreate" | "RollingUpdate";
rollingUpdate?: {
maxSurge?: pulumi.Input<number | string>;
maxUnavailable?: pulumi.Input<number | string>;
};
}// Basic deployment with rolling updates
const webDeployment = new k8s.apps.v1.Deployment("web-app", {
spec: {
replicas: 3,
selector: {
matchLabels: {
app: "web-app",
},
},
template: {
metadata: {
labels: {
app: "web-app",
version: "v1.0",
},
},
spec: {
containers: [{
name: "web-server",
image: "nginx:1.21",
ports: [{
containerPort: 80,
}],
resources: {
requests: {
cpu: "100m",
memory: "128Mi",
},
limits: {
cpu: "500m",
memory: "512Mi",
},
},
}],
},
},
strategy: {
type: "RollingUpdate",
rollingUpdate: {
maxSurge: 1,
maxUnavailable: 1,
},
},
},
});
// Deployment with advanced configuration
const apiDeployment = new k8s.apps.v1.Deployment("api-server", {
spec: {
replicas: 5,
selector: {
matchLabels: {
app: "api-server",
tier: "backend",
},
},
template: {
metadata: {
labels: {
app: "api-server",
tier: "backend",
version: "v2.1",
},
annotations: {
"prometheus.io/scrape": "true",
"prometheus.io/port": "8080",
},
},
spec: {
containers: [{
name: "api",
image: "myapp/api:v2.1.0",
ports: [{
containerPort: 8080,
name: "http",
}, {
containerPort: 9090,
name: "metrics",
}],
env: [{
name: "DATABASE_URL",
valueFrom: {
secretKeyRef: {
name: "db-credentials",
key: "url",
},
},
}, {
name: "REDIS_HOST",
valueFrom: {
configMapKeyRef: {
name: "app-config",
key: "redis.host",
},
},
}],
livenessProbe: {
httpGet: {
path: "/health",
port: 8080,
},
initialDelaySeconds: 30,
periodSeconds: 10,
},
readinessProbe: {
httpGet: {
path: "/ready",
port: 8080,
},
initialDelaySeconds: 5,
periodSeconds: 5,
},
}],
serviceAccountName: "api-service-account",
},
},
strategy: {
type: "RollingUpdate",
rollingUpdate: {
maxSurge: "25%",
maxUnavailable: "25%",
},
},
},
});
// Blue-Green deployment using Recreate strategy
const blueGreenDeployment = new k8s.apps.v1.Deployment("blue-green-app", {
spec: {
replicas: 3,
selector: {
matchLabels: {
app: "blue-green-app",
},
},
template: {
metadata: {
labels: {
app: "blue-green-app",
},
},
spec: {
containers: [{
name: "app",
image: "myapp:blue",
ports: [{
containerPort: 8080,
}],
}],
},
},
strategy: {
type: "Recreate", // All pods terminated before new ones created
},
},
});StatefulSet manages the deployment and scaling of a set of Pods with persistent identity and storage.
class StatefulSet extends pulumi.CustomResource {
constructor(name: string, args?: StatefulSetArgs, opts?: pulumi.CustomResourceOptions)
public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): StatefulSet
// Output properties
public readonly apiVersion!: pulumi.Output<"apps/v1">;
public readonly kind!: pulumi.Output<"StatefulSet">;
public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;
public readonly spec!: pulumi.Output<outputs.apps.v1.StatefulSetSpec>;
public readonly status!: pulumi.Output<outputs.apps.v1.StatefulSetStatus>;
}
interface StatefulSetArgs {
apiVersion?: pulumi.Input<"apps/v1">;
kind?: pulumi.Input<"StatefulSet">;
metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;
spec?: pulumi.Input<inputs.apps.v1.StatefulSetSpec>;
}// Database cluster StatefulSet
const database = new k8s.apps.v1.StatefulSet("postgres-cluster", {
spec: {
serviceName: "postgres-headless", // Headless service for stable DNS
replicas: 3,
selector: {
matchLabels: {
app: "postgres",
},
},
template: {
metadata: {
labels: {
app: "postgres",
},
},
spec: {
containers: [{
name: "postgres",
image: "postgres:13",
env: [{
name: "POSTGRES_DB",
value: "mydb",
}, {
name: "POSTGRES_USER",
valueFrom: {
secretKeyRef: {
name: "postgres-secret",
key: "username",
},
},
}, {
name: "POSTGRES_PASSWORD",
valueFrom: {
secretKeyRef: {
name: "postgres-secret",
key: "password",
},
},
}],
ports: [{
containerPort: 5432,
name: "postgres",
}],
volumeMounts: [{
name: "postgres-storage",
mountPath: "/var/lib/postgresql/data",
}],
}],
},
},
volumeClaimTemplates: [{
metadata: {
name: "postgres-storage",
},
spec: {
accessModes: ["ReadWriteOnce"],
storageClassName: "fast-ssd",
resources: {
requests: {
storage: "20Gi",
},
},
},
}],
podManagementPolicy: "OrderedReady", // or "Parallel"
updateStrategy: {
type: "RollingUpdate",
rollingUpdate: {
partition: 0, // Update all pods
},
},
},
});
// Redis cluster StatefulSet
const redisCluster = new k8s.apps.v1.StatefulSet("redis-cluster", {
spec: {
serviceName: "redis-cluster-headless",
replicas: 6, // 3 masters + 3 replicas
selector: {
matchLabels: {
app: "redis-cluster",
},
},
template: {
metadata: {
labels: {
app: "redis-cluster",
},
},
spec: {
containers: [{
name: "redis",
image: "redis:7-alpine",
command: ["redis-server"],
args: [
"/etc/redis/redis.conf",
"--cluster-enabled", "yes",
"--cluster-config-file", "/data/nodes.conf",
"--cluster-node-timeout", "5000",
"--appendonly", "yes",
],
ports: [{
containerPort: 6379,
name: "redis",
}, {
containerPort: 16379,
name: "cluster",
}],
volumeMounts: [{
name: "redis-data",
mountPath: "/data",
}, {
name: "redis-config",
mountPath: "/etc/redis",
}],
}],
volumes: [{
name: "redis-config",
configMap: {
name: "redis-config",
},
}],
},
},
volumeClaimTemplates: [{
metadata: {
name: "redis-data",
},
spec: {
accessModes: ["ReadWriteOnce"],
resources: {
requests: {
storage: "10Gi",
},
},
},
}],
},
});DaemonSet ensures that all (or some) Nodes run a copy of a Pod. Typically used for cluster-level services.
class DaemonSet extends pulumi.CustomResource {
constructor(name: string, args?: DaemonSetArgs, opts?: pulumi.CustomResourceOptions)
public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): DaemonSet
// Output properties
public readonly apiVersion!: pulumi.Output<"apps/v1">;
public readonly kind!: pulumi.Output<"DaemonSet">;
public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;
public readonly spec!: pulumi.Output<outputs.apps.v1.DaemonSetSpec>;
public readonly status!: pulumi.Output<outputs.apps.v1.DaemonSetStatus>;
}
interface DaemonSetArgs {
apiVersion?: pulumi.Input<"apps/v1">;
kind?: pulumi.Input<"DaemonSet">;
metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;
spec?: pulumi.Input<inputs.apps.v1.DaemonSetSpec>;
}// Logging agent DaemonSet
const loggingAgent = new k8s.apps.v1.DaemonSet("fluentd-logger", {
spec: {
selector: {
matchLabels: {
app: "fluentd-logger",
},
},
template: {
metadata: {
labels: {
app: "fluentd-logger",
},
},
spec: {
serviceAccount: "fluentd",
tolerations: [
// Allow scheduling on master nodes
{
key: "node-role.kubernetes.io/master",
effect: "NoSchedule",
},
// Allow scheduling on nodes with any taint
{
operator: "Exists",
effect: "NoSchedule",
},
],
containers: [{
name: "fluentd",
image: "fluent/fluentd-kubernetes-daemonset:v1-debian-elasticsearch",
env: [{
name: "FLUENT_ELASTICSEARCH_HOST",
value: "elasticsearch.logging.svc.cluster.local",
}, {
name: "FLUENT_ELASTICSEARCH_PORT",
value: "9200",
}],
resources: {
requests: {
cpu: "100m",
memory: "200Mi",
},
limits: {
cpu: "200m",
memory: "400Mi",
},
},
volumeMounts: [{
name: "varlog",
mountPath: "/var/log",
readOnly: true,
}, {
name: "dockercontainers",
mountPath: "/var/lib/docker/containers",
readOnly: true,
}],
}],
volumes: [{
name: "varlog",
hostPath: {
path: "/var/log",
},
}, {
name: "dockercontainers",
hostPath: {
path: "/var/lib/docker/containers",
},
}],
hostNetwork: true, // Use host networking
dnsPolicy: "ClusterFirstWithHostNet",
},
},
updateStrategy: {
type: "RollingUpdate",
rollingUpdate: {
maxUnavailable: 1,
},
},
},
});
// Monitoring agent DaemonSet with node selector
const nodeExporter = new k8s.apps.v1.DaemonSet("node-exporter", {
spec: {
selector: {
matchLabels: {
app: "node-exporter",
},
},
template: {
metadata: {
labels: {
app: "node-exporter",
},
annotations: {
"prometheus.io/scrape": "true",
"prometheus.io/port": "9100",
},
},
spec: {
nodeSelector: {
"kubernetes.io/os": "linux", // Only run on Linux nodes
},
containers: [{
name: "node-exporter",
image: "prom/node-exporter:v1.3.1",
args: [
"--path.procfs=/host/proc",
"--path.sysfs=/host/sys",
"--path.rootfs=/host/root",
"--collector.filesystem.mount-points-exclude",
"^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/)",
],
ports: [{
name: "metrics",
containerPort: 9100,
hostPort: 9100,
}],
volumeMounts: [{
name: "proc",
mountPath: "/host/proc",
readOnly: true,
}, {
name: "sys",
mountPath: "/host/sys",
readOnly: true,
}, {
name: "root",
mountPath: "/host/root",
readOnly: true,
}],
}],
volumes: [{
name: "proc",
hostPath: {
path: "/proc",
},
}, {
name: "sys",
hostPath: {
path: "/sys",
},
}, {
name: "root",
hostPath: {
path: "/",
},
}],
hostNetwork: true,
hostPID: true,
},
},
},
});ReplicaSet maintains a stable set of replica Pods running at any given time. Usually managed by Deployments.
class ReplicaSet extends pulumi.CustomResource {
constructor(name: string, args?: ReplicaSetArgs, opts?: pulumi.CustomResourceOptions)
public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): ReplicaSet
// Output properties
public readonly apiVersion!: pulumi.Output<"apps/v1">;
public readonly kind!: pulumi.Output<"ReplicaSet">;
public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;
public readonly spec!: pulumi.Output<outputs.apps.v1.ReplicaSetSpec>;
public readonly status!: pulumi.Output<outputs.apps.v1.ReplicaSetStatus>;
}
interface ReplicaSetArgs {
apiVersion?: pulumi.Input<"apps/v1">;
kind?: pulumi.Input<"ReplicaSet">;
metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;
spec?: pulumi.Input<inputs.apps.v1.ReplicaSetSpec>;
}// Standalone ReplicaSet (rarely used directly)
const webReplicaSet = new k8s.apps.v1.ReplicaSet("web-rs", {
spec: {
replicas: 3,
selector: {
matchLabels: {
app: "web",
tier: "frontend",
},
},
template: {
metadata: {
labels: {
app: "web",
tier: "frontend",
},
},
spec: {
containers: [{
name: "nginx",
image: "nginx:1.21",
ports: [{
containerPort: 80,
}],
}],
},
},
},
});Job creates one or more Pods and ensures that a specified number of them successfully terminate.
class Job extends pulumi.CustomResource {
constructor(name: string, args?: JobArgs, opts?: pulumi.CustomResourceOptions)
public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): Job
// Output properties
public readonly apiVersion!: pulumi.Output<"batch/v1">;
public readonly kind!: pulumi.Output<"Job">;
public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;
public readonly spec!: pulumi.Output<outputs.batch.v1.JobSpec>;
public readonly status!: pulumi.Output<outputs.batch.v1.JobStatus>;
}
interface JobArgs {
apiVersion?: pulumi.Input<"batch/v1">;
kind?: pulumi.Input<"Job">;
metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;
spec?: pulumi.Input<inputs.batch.v1.JobSpec>;
}// Database migration job
const migrationJob = new k8s.batch.v1.Job("db-migration", {
spec: {
template: {
spec: {
containers: [{
name: "migrate",
image: "myapp/migration:v1.0",
command: ["./migrate"],
args: ["--up"],
env: [{
name: "DATABASE_URL",
valueFrom: {
secretKeyRef: {
name: "db-credentials",
key: "url",
},
},
}],
}],
restartPolicy: "OnFailure",
},
},
backoffLimit: 3, // Retry up to 3 times
completions: 1, // Only need 1 successful completion
parallelism: 1, // Run 1 pod at a time
},
});
// Parallel processing job
const dataProcessingJob = new k8s.batch.v1.Job("data-processing", {
spec: {
template: {
spec: {
containers: [{
name: "processor",
image: "myapp/data-processor:v2.1",
env: [{
name: "WORKER_ID",
valueFrom: {
fieldRef: {
fieldPath: "metadata.name",
},
},
}],
resources: {
requests: {
cpu: "500m",
memory: "1Gi",
},
},
}],
restartPolicy: "OnFailure",
},
},
completions: 10, // Need 10 successful completions
parallelism: 3, // Run up to 3 pods in parallel
backoffLimit: 6, // Allow retries
activeDeadlineSeconds: 3600, // Timeout after 1 hour
},
});CronJob creates Jobs on a repeating schedule.
class CronJob extends pulumi.CustomResource {
constructor(name: string, args?: CronJobArgs, opts?: pulumi.CustomResourceOptions)
public static get(name: string, id: pulumi.Input<pulumi.ID>, opts?: pulumi.CustomResourceOptions): CronJob
// Output properties
public readonly apiVersion!: pulumi.Output<"batch/v1">;
public readonly kind!: pulumi.Output<"CronJob">;
public readonly metadata!: pulumi.Output<outputs.meta.v1.ObjectMeta>;
public readonly spec!: pulumi.Output<outputs.batch.v1.CronJobSpec>;
public readonly status!: pulumi.Output<outputs.batch.v1.CronJobStatus>;
}
interface CronJobArgs {
apiVersion?: pulumi.Input<"batch/v1">;
kind?: pulumi.Input<"CronJob">;
metadata?: pulumi.Input<inputs.meta.v1.ObjectMeta>;
spec?: pulumi.Input<inputs.batch.v1.CronJobSpec>;
}// Daily backup job
const backupCronJob = new k8s.batch.v1.CronJob("daily-backup", {
spec: {
schedule: "0 2 * * *", // Daily at 2 AM
jobTemplate: {
spec: {
template: {
spec: {
containers: [{
name: "backup",
image: "myapp/backup:latest",
command: ["./backup.sh"],
env: [{
name: "BACKUP_TARGET",
value: "s3://my-backups/daily",
}, {
name: "AWS_ACCESS_KEY_ID",
valueFrom: {
secretKeyRef: {
name: "aws-credentials",
key: "access-key-id",
},
},
}],
volumeMounts: [{
name: "data",
mountPath: "/data",
readOnly: true,
}],
}],
volumes: [{
name: "data",
persistentVolumeClaim: {
claimName: "app-data",
},
}],
restartPolicy: "OnFailure",
},
},
},
},
successfulJobsHistoryLimit: 3,
failedJobsHistoryLimit: 1,
concurrencyPolicy: "Forbid", // Don't allow concurrent jobs
},
});
// Log cleanup job
const logCleanupJob = new k8s.batch.v1.CronJob("log-cleanup", {
spec: {
schedule: "0 0 * * 0", // Weekly on Sunday at midnight
jobTemplate: {
spec: {
template: {
spec: {
containers: [{
name: "cleanup",
image: "busybox",
command: ["sh"],
args: ["-c", "find /logs -name '*.log' -mtime +7 -delete"],
volumeMounts: [{
name: "logs",
mountPath: "/logs",
}],
}],
volumes: [{
name: "logs",
hostPath: {
path: "/var/log/app",
},
}],
restartPolicy: "OnFailure",
},
},
},
},
concurrencyPolicy: "Replace", // Replace if previous job still running
},
});// Sidecar pattern with main app and logging agent
const sidecarDeployment = new k8s.apps.v1.Deployment("sidecar-app", {
spec: {
replicas: 2,
selector: {
matchLabels: {
app: "sidecar-app",
},
},
template: {
metadata: {
labels: {
app: "sidecar-app",
},
},
spec: {
containers: [
// Main application container
{
name: "app",
image: "myapp:v1.0",
ports: [{
containerPort: 8080,
}],
volumeMounts: [{
name: "shared-logs",
mountPath: "/var/log/app",
}],
},
// Sidecar logging container
{
name: "log-shipper",
image: "fluent/fluent-bit:1.8",
volumeMounts: [{
name: "shared-logs",
mountPath: "/var/log/app",
readOnly: true,
}, {
name: "fluent-config",
mountPath: "/fluent-bit/etc",
}],
},
],
volumes: [{
name: "shared-logs",
emptyDir: {},
}, {
name: "fluent-config",
configMap: {
name: "fluent-config",
},
}],
},
},
},
});All workload resources include the following variants:
DeploymentList, StatefulSetList, DaemonSetList, ReplicaSetList, JobList, CronJobListDeploymentPatch, StatefulSetPatch, DaemonSetPatch, ReplicaSetPatch, JobPatch, CronJobPatch// Example patch operation for deployment
const deploymentPatch = new k8s.apps.v1.DeploymentPatch("update-deployment", {
metadata: {
name: "existing-deployment",
},
spec: {
replicas: 5, // Scale to 5 replicas
template: {
spec: {
containers: [{
name: "app",
image: "myapp:v2.0", // Update image
}],
},
},
},
});The Apps API group provides powerful declarative workload management with intelligent lifecycle handling, making it easy to deploy, scale, and manage containerized applications with confidence.
Install with Tessl CLI
npx tessl i tessl/npm-pulumi--kubernetes