Official Go client library for Kubernetes API - typed clients, controllers, and cluster interaction tools
This document covers specialized client types for specific use cases: metadata-only operations, scale subresource access, and other specialized client patterns.
The metadata client provides a lightweight way to interact with Kubernetes resources using only their metadata, without loading full object specifications. This is useful for scenarios where you only need object metadata (name, namespace, labels, annotations, etc.) without the full spec and status.
k8s.io/client-go/metadatak8s.io/apimachinery/pkg/apis/meta/v1import (
"k8s.io/client-go/metadata"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)import (
"k8s.io/client-go/metadata"
"k8s.io/client-go/rest"
)
// From config
config, err := rest.InClusterConfig()
metadataClient, err := metadata.NewForConfig(config)
// With custom HTTP client
httpClient := &http.Client{}
metadataClient, err := metadata.NewForConfigAndClient(config, httpClient)type Interface interface {
Resource(resource schema.GroupVersionResource) Getter
}
type Getter interface {
Namespace(string) ResourceInterface
ResourceInterface
}
type ResourceInterface interface {
Get(ctx context.Context, name string, opts metav1.GetOptions, subresources ...string) (*metav1.PartialObjectMetadata, error)
List(ctx context.Context, opts metav1.ListOptions) (*metav1.PartialObjectMetadataList, error)
Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions, subresources ...string) error
DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (*metav1.PartialObjectMetadata, error)
}// PartialObjectMetadata contains only metadata, no spec or status
type PartialObjectMetadata struct {
metav1.TypeMeta
metav1.ObjectMeta
}
type PartialObjectMetadataList struct {
metav1.TypeMeta
metav1.ListMeta
Items []PartialObjectMetadata
}import (
"context"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// Define resource GVR
podsGVR := schema.GroupVersionResource{
Group: "",
Version: "v1",
Resource: "pods",
}
// Get pod metadata
podMeta, err := metadataClient.Resource(podsGVR).
Namespace("default").
Get(context.TODO(), "my-pod", metav1.GetOptions{})
if err != nil {
panic(err)
}
// Access metadata
fmt.Printf("Pod: %s/%s\\n", podMeta.Namespace, podMeta.Name)
fmt.Printf("Labels: %v\\n", podMeta.Labels)
fmt.Printf("Annotations: %v\\n", podMeta.Annotations)
fmt.Printf("Creation: %v\\n", podMeta.CreationTimestamp)
fmt.Printf("UID: %s\\n", podMeta.UID)
fmt.Printf("ResourceVersion: %s\\n", podMeta.ResourceVersion)// List all pod metadata in namespace
podMetaList, err := metadataClient.Resource(podsGVR).
Namespace("default").
List(context.TODO(), metav1.ListOptions{
LabelSelector: "app=myapp",
})
if err != nil {
panic(err)
}
fmt.Printf("Found %d pods\\n", len(podMetaList.Items))
for _, podMeta := range podMetaList.Items {
fmt.Printf(" - %s (created: %v)\\n",
podMeta.Name,
podMeta.CreationTimestamp)
}import "k8s.io/apimachinery/pkg/watch"
// Watch for metadata changes
watcher, err := metadataClient.Resource(podsGVR).
Namespace("default").
Watch(context.TODO(), metav1.ListOptions{})
if err != nil {
panic(err)
}
defer watcher.Stop()
for event := range watcher.ResultChan() {
meta := event.Object.(*metav1.PartialObjectMetadata)
fmt.Printf("Event %s: %s (labels: %v)\\n",
event.Type, meta.Name, meta.Labels)
}// Delete resource by name
err := metadataClient.Resource(podsGVR).
Namespace("default").
Delete(context.TODO(), "my-pod", metav1.DeleteOptions{})
// Delete collection with label selector
err = metadataClient.Resource(podsGVR).
Namespace("default").
DeleteCollection(
context.TODO(),
metav1.DeleteOptions{},
metav1.ListOptions{
LabelSelector: "app=myapp",
})import "k8s.io/apimachinery/pkg/types"
// Patch labels using JSON Patch
patch := []byte(`[
{"op": "add", "path": "/metadata/labels/env", "value": "production"},
{"op": "replace", "path": "/metadata/annotations/updated", "value": "true"}
]`)
patchedMeta, err := metadataClient.Resource(podsGVR).
Namespace("default").
Patch(
context.TODO(),
"my-pod",
types.JSONPatchType,
patch,
metav1.PatchOptions{})1. Label Management
// Update labels on multiple resources efficiently
gvr := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}
deployments, err := metadataClient.Resource(gvr).
Namespace("default").
List(context.TODO(), metav1.ListOptions{})
for _, deploy := range deployments.Items {
if deploy.Labels == nil {
deploy.Labels = make(map[string]string)
}
deploy.Labels["managed-by"] = "my-controller"
// Patch labels
patchData, _ := json.Marshal(map[string]interface{}{
"metadata": map[string]interface{}{
"labels": deploy.Labels,
},
})
_, err := metadataClient.Resource(gvr).
Namespace(deploy.Namespace).
Patch(context.TODO(), deploy.Name, types.MergePatchType, patchData, metav1.PatchOptions{})
}2. Owner Reference Management
// Check owner references without loading full objects
podMeta, err := metadataClient.Resource(podsGVR).
Namespace("default").
Get(context.TODO(), "my-pod", metav1.GetOptions{})
for _, ownerRef := range podMeta.OwnerReferences {
fmt.Printf("Owned by: %s/%s (UID: %s)\\n",
ownerRef.Kind, ownerRef.Name, ownerRef.UID)
if *ownerRef.Controller {
fmt.Printf(" Controller: true\\n")
}
}3. Garbage Collection Analysis
// Find orphaned resources (no owner references)
deployMeta, err := metadataClient.Resource(
schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}).
Namespace("default").
List(context.TODO(), metav1.ListOptions{})
for _, meta := range deployMeta.Items {
if len(meta.OwnerReferences) == 0 {
fmt.Printf("Orphaned deployment: %s\\n", meta.Name)
}
}The scale client provides access to the scale subresource, which allows getting and updating the replica count of scalable resources (Deployments, ReplicaSets, StatefulSets, etc.) without modifying other fields.
k8s.io/client-go/scalek8s.io/api/autoscaling/v1import (
"k8s.io/client-go/scale"
autoscalingv1 "k8s.io/api/autoscaling/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)import (
"k8s.io/client-go/discovery"
"k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/scale"
)
// Create discovery client for REST mapper
discoveryClient, err := discovery.NewDiscoveryClientForConfig(config)
if err != nil {
panic(err)
}
// Create REST mapper for resource resolution
groupResources, err := restmapper.GetAPIGroupResources(discoveryClient)
if err != nil {
panic(err)
}
mapper := restmapper.NewDiscoveryRESTMapper(groupResources)
// Create scale client
scaleClient, err := scale.NewForConfig(config, mapper, nil, schema.GroupVersion{Group: "apps", Version: "v1"})
if err != nil {
panic(err)
}type ScalesGetter interface {
Scales(namespace string) ScaleInterface
}
type ScaleInterface interface {
Get(ctx context.Context, resource schema.GroupResource, name string, opts metav1.GetOptions) (*autoscalingv1.Scale, error)
Update(ctx context.Context, resource schema.GroupResource, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error)
Patch(ctx context.Context, resource schema.GroupResource, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions) (*autoscalingv1.Scale, error)
}// autoscalingv1.Scale represents the scale subresource
type Scale struct {
metav1.TypeMeta
metav1.ObjectMeta
// Spec defines desired replica count
Spec ScaleSpec
// Status defines current replica count
Status ScaleStatus
}
type ScaleSpec struct {
// Desired number of replicas
Replicas int32
}
type ScaleStatus struct {
// Actual number of replicas
Replicas int32
// Label selector for pods
Selector string
}import (
"context"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// Define resource
deploymentGR := schema.GroupResource{
Group: "apps",
Resource: "deployments",
}
// Get current scale
scale, err := scaleClient.Scales("default").Get(
context.TODO(),
deploymentGR,
"my-deployment",
metav1.GetOptions{})
if err != nil {
panic(err)
}
fmt.Printf("Desired replicas: %d\\n", scale.Spec.Replicas)
fmt.Printf("Current replicas: %d\\n", scale.Status.Replicas)
fmt.Printf("Selector: %s\\n", scale.Status.Selector)// Get current scale
scale, err := scaleClient.Scales("default").Get(
context.TODO(),
deploymentGR,
"my-deployment",
metav1.GetOptions{})
if err != nil {
panic(err)
}
// Update replica count
scale.Spec.Replicas = 5
// Apply update
updatedScale, err := scaleClient.Scales("default").Update(
context.TODO(),
deploymentGR,
scale,
metav1.UpdateOptions{})
if err != nil {
panic(err)
}
fmt.Printf("Scaled to %d replicas\\n", updatedScale.Spec.Replicas)import "k8s.io/apimachinery/pkg/types"
// Patch replica count directly
patch := []byte(`{"spec":{"replicas":3}}`)
patchedScale, err := scaleClient.Scales("default").Patch(
context.TODO(),
deploymentGR,
"my-deployment",
types.MergePatchType,
patch,
metav1.PatchOptions{})deploymentGR := schema.GroupResource{
Group: "apps",
Resource: "deployments",
}
scale, err := scaleClient.Scales("default").Get(
context.TODO(), deploymentGR, "nginx-deployment", metav1.GetOptions{})
scale.Spec.Replicas = 10
scaleClient.Scales("default").Update(context.TODO(), deploymentGR, scale, metav1.UpdateOptions{})statefulSetGR := schema.GroupResource{
Group: "apps",
Resource: "statefulsets",
}
scale, err := scaleClient.Scales("default").Get(
context.TODO(), statefulSetGR, "mysql-cluster", metav1.GetOptions{})
scale.Spec.Replicas = 3
scaleClient.Scales("default").Update(context.TODO(), statefulSetGR, scale, metav1.UpdateOptions{})replicaSetGR := schema.GroupResource{
Group: "apps",
Resource: "replicasets",
}
scale, err := scaleClient.Scales("default").Get(
context.TODO(), replicaSetGR, "my-replicaset", metav1.GetOptions{})
scale.Spec.Replicas = 8
scaleClient.Scales("default").Update(context.TODO(), replicaSetGR, scale, metav1.UpdateOptions{})// For custom resources that support scale subresource
customResourceGR := schema.GroupResource{
Group: "stable.example.com",
Resource: "customapps",
}
scale, err := scaleClient.Scales("default").Get(
context.TODO(), customResourceGR, "my-custom-app", metav1.GetOptions{})1. Manual Scaling
func scaleDeployment(name, namespace string, replicas int32) error {
gr := schema.GroupResource{Group: "apps", Resource: "deployments"}
scale, err := scaleClient.Scales(namespace).Get(
context.TODO(), gr, name, metav1.GetOptions{})
if err != nil {
return err
}
scale.Spec.Replicas = replicas
_, err = scaleClient.Scales(namespace).Update(
context.TODO(), gr, scale, metav1.UpdateOptions{})
return err
}2. Custom Autoscaler
func autoScale(name, namespace string, currentLoad float64) error {
gr := schema.GroupResource{Group: "apps", Resource: "deployments"}
scale, err := scaleClient.Scales(namespace).Get(
context.TODO(), gr, name, metav1.GetOptions{})
if err != nil {
return err
}
// Calculate desired replicas based on load
desiredReplicas := int32(math.Ceil(currentLoad * float64(scale.Status.Replicas)))
if desiredReplicas != scale.Spec.Replicas {
scale.Spec.Replicas = desiredReplicas
_, err = scaleClient.Scales(namespace).Update(
context.TODO(), gr, scale, metav1.UpdateOptions{})
}
return err
}3. Scale to Zero
func scaleToZero(resourceType, name, namespace string) error {
gr := schema.GroupResource{
Group: "apps",
Resource: resourceType, // "deployments", "statefulsets", etc.
}
patch := []byte(`{"spec":{"replicas":0}}`)
_, err := scaleClient.Scales(namespace).Patch(
context.TODO(),
gr,
name,
types.MergePatchType,
patch,
metav1.PatchOptions{})
return err
}Using Scale Client (Recommended):
// Only modifies replicas
scale, _ := scaleClient.Scales("default").Get(ctx, gr, "nginx", metav1.GetOptions{})
scale.Spec.Replicas = 5
scaleClient.Scales("default").Update(ctx, gr, scale, metav1.UpdateOptions{})Using Full Update:
// Modifies entire deployment, risk of conflicts
deployment, _ := clientset.AppsV1().Deployments("default").Get(ctx, "nginx", metav1.GetOptions{})
*deployment.Spec.Replicas = 5
clientset.AppsV1().Deployments("default").Update(ctx, deployment, metav1.UpdateOptions{})Why Use Scale Client:
When working with multiple API versions:
import "k8s.io/client-go/discovery"
// Get preferred version for resource
discoveryClient, _ := discovery.NewDiscoveryClientForConfig(config)
resourceList, err := discoveryClient.ServerPreferredResources()
for _, list := range resourceList {
for _, resource := range list.APIResources {
if resource.Name == "deployments" {
fmt.Printf("Preferred version: %s\\n", list.GroupVersion)
}
}
}Both metadata and scale clients support cross-namespace operations:
// List metadata across all namespaces
allPods, err := metadataClient.Resource(podsGVR).
List(context.TODO(), metav1.ListOptions{})
// Note: Use empty namespace "" for cluster-scoped resources
nodes, err := metadataClient.Resource(nodesGVR).
List(context.TODO(), metav1.ListOptions{})Both clients support optimistic concurrency control:
// Get with specific resource version
meta, err := metadataClient.Resource(gvr).Namespace(ns).Get(
context.TODO(),
name,
metav1.GetOptions{ResourceVersion: "12345"})
// List from specific resource version (watch from)
list, err := metadataClient.Resource(gvr).Namespace(ns).List(
context.TODO(),
metav1.ListOptions{ResourceVersion: "12345"})Install with Tessl CLI
npx tessl i tessl/golang-k8s-io--client-go@0.35.0