Generate Kubernetes manifests for Deployments, Services, Ingress, ConfigMaps, Secrets, HPA, PVCs with kustomize overlay structure, health probes, and resource limits.
84
81%
Does it follow best practices?
Impact
Pending
No eval scenarios have been run
Passed
No known issues
Generate Kubernetes manifests for Deployments, Services, Ingress, ConfigMaps, Secrets, HPA, PVCs with kustomize overlay structure, health probes, and resource limits.
mkdir -p k8s/base k8s/overlays/dev k8s/overlays/staging k8s/overlays/production
# Base manifests
touch k8s/base/kustomization.yaml
touch k8s/base/namespace.yaml
touch k8s/base/deployment.yaml
touch k8s/base/service.yaml
touch k8s/base/configmap.yaml
touch k8s/base/ingress.yaml
touch k8s/base/hpa.yaml
# Overlay kustomizations
touch k8s/overlays/dev/kustomization.yaml
touch k8s/overlays/staging/kustomization.yaml
touch k8s/overlays/production/kustomization.yamlk8s/
base/
kustomization.yaml # Lists all base resources
namespace.yaml # Namespace definition
deployment.yaml # Application deployment
service.yaml # ClusterIP service
configmap.yaml # Non-secret configuration
ingress.yaml # Ingress rules
hpa.yaml # Horizontal Pod Autoscaler
pvc.yaml # Persistent Volume Claims (if needed)
overlays/
dev/
kustomization.yaml # Dev-specific patches and images
replica-patch.yaml
staging/
kustomization.yaml
replica-patch.yaml
production/
kustomization.yaml
replica-patch.yaml
hpa-patch.yaml
resource-patch.yamlnamespace in kustomization, not in individual manifests (kustomize applies it).SealedSecrets, ExternalSecrets, or cloud-native secret managers — never plain Secret manifests committed to git.app.kubernetes.io/name, app.kubernetes.io/version, app.kubernetes.io/component.latest.k8s/base/kustomization.yaml)apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
commonLabels:
app.kubernetes.io/name: myapp
app.kubernetes.io/managed-by: kustomize
resources:
- namespace.yaml
- deployment.yaml
- service.yaml
- configmap.yaml
- ingress.yaml
- hpa.yamlk8s/base/namespace.yaml)apiVersion: v1
kind: Namespace
metadata:
name: myappk8s/base/deployment.yaml)apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: myapp
template:
metadata:
labels:
app.kubernetes.io/name: myapp
spec:
serviceAccountName: myapp
securityContext:
runAsNonRoot: true
runAsUser: 1001
fsGroup: 1001
containers:
- name: myapp
# Replace with your registry/image. Use kustomize to override per environment:
# kustomization.yaml → images: [{name: ghcr.io/org/myapp, newName: your-registry/your-app, newTag: v1.0.0}]
image: ghcr.io/org/myapp:latest
ports:
- containerPort: 3000
protocol: TCP
envFrom:
- configMapRef:
name: myapp-config
env:
- name: DATABASE_URL
valueFrom:
secretKeyRef:
name: myapp-secrets
key: database-url
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 512Mi
livenessProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 10
periodSeconds: 15
timeoutSeconds: 3
failureThreshold: 3
readinessProbe:
httpGet:
path: /health/ready
port: 3000
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 3
failureThreshold: 3
startupProbe:
httpGet:
path: /health
port: 3000
initialDelaySeconds: 0
periodSeconds: 5
failureThreshold: 30
terminationGracePeriodSeconds: 30k8s/base/service.yaml)apiVersion: v1
kind: Service
metadata:
name: myapp
spec:
type: ClusterIP
selector:
app.kubernetes.io/name: myapp
ports:
- port: 80
targetPort: 3000
protocol: TCPk8s/base/ingress.yaml)apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: myapp
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
nginx.ingress.kubernetes.io/rate-limit: "100"
nginx.ingress.kubernetes.io/rate-limit-window: "1m"
spec:
ingressClassName: nginx
tls:
- hosts:
- myapp.example.com
secretName: myapp-tls
rules:
- host: myapp.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: myapp
port:
number: 80k8s/base/configmap.yaml)apiVersion: v1
kind: ConfigMap
metadata:
name: myapp-config
data:
NODE_ENV: production
LOG_LEVEL: info
PORT: "3000"k8s/base/hpa.yaml)apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: myapp
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: myapp
minReplicas: 2
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
behavior:
scaleDown:
stabilizationWindowSeconds: 300k8s/base/pvc.yaml)apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: myapp-data
spec:
accessModes:
- ReadWriteOnce
storageClassName: gp3
resources:
requests:
storage: 10Gik8s/overlays/dev/kustomization.yaml)apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: myapp-dev
resources:
- ../../base
patches:
- path: replica-patch.yaml
images:
- name: ghcr.io/org/myapp
newTag: dev-latestk8s/overlays/dev/replica-patch.yaml)apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
spec:
replicas: 1k8s/overlays/production/kustomization.yaml)apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
namespace: myapp-prod
resources:
- ../../base
patches:
- path: replica-patch.yaml
- path: resource-patch.yaml
images:
- name: ghcr.io/org/myapp
newTag: v1.2.3k8s/overlays/production/resource-patch.yaml)apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
spec:
template:
spec:
containers:
- name: myapp
resources:
requests:
cpu: 250m
memory: 256Mi
limits:
cpu: "1"
memory: 1Gi# Preview rendered manifests
kubectl kustomize k8s/overlays/dev
# Apply to cluster
kubectl apply -k k8s/overlays/dev
# Apply production overlay
kubectl apply -k k8s/overlays/production
# Check deployment status
kubectl -n myapp-prod rollout status deployment/myapp
# View pods
kubectl -n myapp-prod get pods -l app.kubernetes.io/name=myapp
# View pod logs
kubectl -n myapp-prod logs -l app.kubernetes.io/name=myapp -f
# Describe a failing pod
kubectl -n myapp-prod describe pod <pod-name>
# Port-forward for local access
kubectl -n myapp-dev port-forward svc/myapp 3000:80
# Scale manually (overrides HPA temporarily)
kubectl -n myapp-prod scale deployment/myapp --replicas=5
# Restart deployment (rolling)
kubectl -n myapp-prod rollout restart deployment/myapp
# View HPA status
kubectl -n myapp-prod get hpa
# Diff before apply
kubectl diff -k k8s/overlays/productiondockerfile-generator and github-actions-ci skills.github-actions-ci skill to run kubectl apply -k or use ArgoCD for GitOps-style continuous delivery.terraform-starter skill to provision the Kubernetes cluster (EKS, GKE, AKS) and supporting infrastructure./metrics endpoint is exposed.181fcbc
If you maintain this skill, you can claim it as your own. Once claimed, you can manage eval scenarios, bundle related skills, attach documentation or rules, and ensure cross-agent compatibility.