Java client for Kubernetes and OpenShift providing access to full REST APIs via a fluent DSL
—
This document covers OpenShift specialized API group operations, including config, operators, monitoring, machine configuration, and other advanced API groups.
import io.fabric8.openshift.client.OpenShiftClient;
import io.fabric8.openshift.client.dsl.OpenShiftConfigAPIGroupDSL;
import io.fabric8.openshift.client.dsl.OpenShiftOperatorAPIGroupDSL;
import io.fabric8.openshift.client.dsl.OpenShiftMonitoringAPIGroupDSL;
import io.fabric8.openshift.client.dsl.OpenShiftConsoleAPIGroupDSL;
import io.fabric8.openshift.client.dsl.MachineConfigurationAPIGroupDSL;
import io.fabric8.openshift.client.dsl.OpenShiftMachineAPIGroupDSL;
import io.fabric8.openshift.client.dsl.OpenShiftClusterAutoscalingAPIGroupDSL;
import io.fabric8.openshift.client.dsl.OpenShiftHiveAPIGroupDSL;
import io.fabric8.openshift.client.dsl.OpenShiftOperatorHubAPIGroupDSL;
import io.fabric8.openshift.client.dsl.OpenShiftQuotaAPIGroupDSL;
import io.fabric8.openshift.client.dsl.OpenShiftTunedAPIGroupDSL;
import io.fabric8.openshift.client.dsl.OpenShiftWhereaboutsAPIGroupDSL;
import io.fabric8.openshift.client.dsl.OpenShiftStorageVersionMigratorApiGroupDSL;The Config API group provides access to cluster-wide configuration resources.
// Access config API group
OpenShiftConfigAPIGroupDSL config = client.config();
// Get cluster version information
ClusterVersion clusterVersion = config.clusterVersions()
.withName("version")
.get();
String currentVersion = clusterVersion.getStatus().getDesired().getVersion();
List<ClusterOperatorStatusCondition> conditions = clusterVersion.getStatus().getConditions();
// Get cluster operator status
ClusterOperatorList operators = config.clusterOperators().list();
for (ClusterOperator operator : operators.getItems()) {
String operatorName = operator.getMetadata().getName();
String version = operator.getStatus().getVersions().stream()
.filter(v -> "operator".equals(v.getName()))
.map(OperandVersion::getVersion)
.findFirst()
.orElse("unknown");
System.out.println("Operator: " + operatorName + " v" + version);
}// DNS configuration
DNS dnsConfig = config.dnses()
.withName("cluster")
.get();
String baseDomain = dnsConfig.getSpec().getBaseDomain();
DNSSpec.DNSType dnsType = dnsConfig.getSpec().getDnsType();
// Ingress configuration
Ingress ingressConfig = config.ingresses()
.withName("cluster")
.get();
String defaultDomain = ingressConfig.getSpec().getDomain();
// Network configuration
Network networkConfig = config.networks()
.withName("cluster")
.get();
List<String> serviceCIDR = networkConfig.getSpec().getServiceNetwork();
List<ClusterNetwork> clusterNetworks = networkConfig.getSpec().getClusterNetwork();
// OAuth configuration
OAuth oauthConfig = config.oauths()
.withName("cluster")
.get();
List<IdentityProvider> identityProviders = oauthConfig.getSpec().getIdentityProviders();// Infrastructure configuration
Infrastructure infrastructure = config.infrastructures()
.withName("cluster")
.get();
String platformType = infrastructure.getStatus().getPlatform();
String apiServerURL = infrastructure.getStatus().getApiServerURL();
String infrastructureName = infrastructure.getStatus().getInfrastructureName();
// Feature gates configuration
FeatureGate featureGates = config.featureGates()
.withName("cluster")
.get();
FeatureGateSelection featureSet = featureGates.getSpec().getFeatureSet();
// Image configuration
Image imageConfig = config.images()
.withName("cluster")
.get();
String externalRegistryHostname = imageConfig.getStatus().getExternalRegistryHostnames().get(0);
String internalRegistryHostname = imageConfig.getStatus().getInternalRegistryHostname();The Operator API group manages OpenShift operators and their configurations.
// Access operator API group
OpenShiftOperatorAPIGroupDSL operator = client.operator();
// DNS operator configuration
DNS dnsOperator = operator.dnses()
.withName("default")
.get();
DNSSpec dnsSpec = dnsOperator.getSpec();
DNSStatus dnsStatus = dnsOperator.getStatus();
// Ingress controller configuration
IngressControllerList ingressControllers = operator.ingressControllers()
.inNamespace("openshift-ingress-operator")
.list();
for (IngressController controller : ingressControllers.getItems()) {
String controllerName = controller.getMetadata().getName();
String domain = controller.getSpec().getDomain();
Integer replicas = controller.getSpec().getReplicas();
System.out.println("Ingress Controller: " + controllerName +
" (domain: " + domain + ", replicas: " + replicas + ")");
}
// Network operator
Network networkOperator = operator.networks()
.withName("cluster")
.get();
// Console operator
Console consoleOperator = operator.consoles()
.withName("cluster")
.get();
String publicURL = consoleOperator.getStatus().getConsoleURL();// Storage operator configuration
Storage storageOperator = operator.storages()
.withName("cluster")
.get();
String managementState = storageOperator.getSpec().getManagementState();
// Authentication operator
Authentication authOperator = operator.authentications()
.withName("cluster")
.get();
// Cloud credential operator
CloudCredential cloudCredOperator = operator.cloudCredentials()
.withName("cluster")
.get();
String credentialsMode = cloudCredOperator.getSpec().getCredentialsMode();
// Etcd operator
Etcd etcdOperator = operator.etcds()
.withName("cluster")
.get();
String etcdManagementState = etcdOperator.getSpec().getManagementState();// Kubernetes API server operator
KubeAPIServer kubeAPIServer = operator.kubeAPIServers()
.withName("cluster")
.get();
// Kubernetes controller manager operator
KubeControllerManager kubeControllerManager = operator.kubeControllerManagers()
.withName("cluster")
.get();
// Kubernetes scheduler operator
KubeScheduler kubeScheduler = operator.kubeSchedulers()
.withName("cluster")
.get();
// OpenShift API server operator
OpenShiftAPIServer openshiftAPIServer = operator.openShiftAPIServers()
.withName("cluster")
.get();
// OpenShift controller manager operator
OpenShiftControllerManager openshiftControllerManager = operator.openShiftControllerManagers()
.withName("cluster")
.get();The Monitoring API group provides access to Prometheus, Alertmanager, and related monitoring resources.
// Access monitoring API group
OpenShiftMonitoringAPIGroupDSL monitoring = client.monitoring();
// List Prometheus instances
PrometheusList prometheusList = monitoring.prometheuses()
.inNamespace("openshift-monitoring")
.list();
// Get specific Prometheus instance
Prometheus prometheus = monitoring.prometheuses()
.inNamespace("openshift-monitoring")
.withName("k8s")
.get();
PrometheusSpec prometheusSpec = prometheus.getSpec();
String retention = prometheusSpec.getRetention();
String replicas = prometheusSpec.getReplicas().toString();
// Create custom Prometheus instance
Prometheus customPrometheus = new PrometheusBuilder()
.withNewMetadata()
.withName("custom-prometheus")
.withNamespace("monitoring-namespace")
.endMetadata()
.withNewSpec()
.withReplicas(2)
.withRetention("30d")
.withNewServiceAccountName("prometheus")
.withServiceMonitorSelector(new LabelSelectorBuilder()
.addToMatchLabels("team", "backend")
.build())
.endSpec()
.build();
monitoring.prometheuses()
.inNamespace("monitoring-namespace")
.create(customPrometheus);// List service monitors
ServiceMonitorList serviceMonitors = monitoring.serviceMonitors()
.inNamespace("my-project")
.list();
// Create service monitor
ServiceMonitor serviceMonitor = new ServiceMonitorBuilder()
.withNewMetadata()
.withName("my-app-monitor")
.withNamespace("my-project")
.addToLabels("app", "my-app")
.endMetadata()
.withNewSpec()
.withSelector(new LabelSelectorBuilder()
.addToMatchLabels("app", "my-app")
.build())
.addNewEndpoint()
.withPort("metrics")
.withPath("/metrics")
.withInterval("30s")
.endEndpoint()
.endSpec()
.build();
monitoring.serviceMonitors()
.inNamespace("my-project")
.create(serviceMonitor);// Get Alertmanager configuration
AlertmanagerList alertmanagers = monitoring.alertmanagers()
.inNamespace("openshift-monitoring")
.list();
// Create Prometheus rule
PrometheusRule rule = new PrometheusRuleBuilder()
.withNewMetadata()
.withName("my-app-rules")
.withNamespace("my-project")
.addToLabels("prometheus", "kube-prometheus")
.addToLabels("role", "alert-rules")
.endMetadata()
.withNewSpec()
.addNewGroup()
.withName("my-app.rules")
.addNewRule()
.withAlert("HighErrorRate")
.withExpr("rate(http_requests_total{status=~\"5..\"}[5m]) > 0.1")
.withFor("5m")
.addToLabels("severity", "warning")
.addToAnnotations("summary", "High error rate detected")
.addToAnnotations("description", "Error rate is {{ $value }} errors per second")
.endRule()
.endGroup()
.endSpec()
.build();
monitoring.prometheusRules()
.inNamespace("my-project")
.create(rule);// Create pod monitor
PodMonitor podMonitor = new PodMonitorBuilder()
.withNewMetadata()
.withName("my-pod-monitor")
.withNamespace("my-project")
.endMetadata()
.withNewSpec()
.withSelector(new LabelSelectorBuilder()
.addToMatchLabels("app", "my-app")
.build())
.addNewPodMetricsEndpoint()
.withPort("metrics")
.withPath("/metrics")
.endPodMetricsEndpoint()
.endSpec()
.build();
monitoring.podMonitors()
.inNamespace("my-project")
.create(podMonitor);
// Create probe for external monitoring
Probe probe = new ProbeBuilder()
.withNewMetadata()
.withName("external-service-probe")
.withNamespace("my-project")
.endMetadata()
.withNewSpec()
.withJobName("external-probe")
.withNewProberSpec()
.withUrl("https://external-service.example.com/health")
.endProberSpec()
.addNewTarget()
.addToStaticConfig()
.withStatic(Arrays.asList("https://api.example.com"))
.endStaticConfig()
.endTarget()
.endSpec()
.build();
monitoring.probes()
.inNamespace("my-project")
.create(probe);The Machine Configuration API group manages node-level configurations.
// Access machine configuration API group
MachineConfigurationAPIGroupDSL machineConfig = client.machineConfigurations();
// List machine configs
MachineConfigList configs = machineConfig.machineConfigs().list();
// Get specific machine config
MachineConfig config = machineConfig.machineConfigs()
.withName("00-worker")
.get();
// Create custom machine config
MachineConfig customConfig = new MachineConfigBuilder()
.withNewMetadata()
.withName("99-custom-config")
.addToLabels("machineconfiguration.openshift.io/role", "worker")
.endMetadata()
.withNewSpec()
.withNewConfig()
.withIgnition(new IgnitionBuilder()
.withVersion("3.2.0")
.build())
.withStorage(new StorageBuilder()
.addNewFile()
.withPath("/etc/custom-config")
.withNewContents()
.withSource("data:text/plain;base64," +
Base64.getEncoder().encodeToString("custom config content".getBytes()))
.endContents()
.withMode(0644)
.endFile()
.build())
.endConfig()
.endSpec()
.build();
machineConfig.machineConfigs().create(customConfig);// List machine config pools
MachineConfigPoolList pools = machineConfig.machineConfigPools().list();
// Get worker pool status
MachineConfigPool workerPool = machineConfig.machineConfigPools()
.withName("worker")
.get();
Integer readyMachineCount = workerPool.getStatus().getReadyMachineCount();
Integer machineCount = workerPool.getStatus().getMachineCount();
String currentConfiguration = workerPool.getStatus().getConfiguration().getName();
// Pause machine config pool updates
MachineConfigPool pausedPool = machineConfig.machineConfigPools()
.withName("worker")
.edit(pool -> new MachineConfigPoolBuilder(pool)
.editSpec()
.withPaused(true)
.endSpec()
.build());The Console API group manages OpenShift web console configurations.
// Access console API group
OpenShiftConsoleAPIGroupDSL console = client.console();
// Console links for navigation
ConsoleLink customLink = new ConsoleLinkBuilder()
.withNewMetadata()
.withName("custom-documentation")
.endMetadata()
.withNewSpec()
.withLocation(ConsoleLinkLocation.HelpMenu)
.withText("Custom Documentation")
.withHref("https://docs.example.com")
.endSpec()
.build();
console.consoleLinks().create(customLink);
// Console notification
ConsoleNotification notification = new ConsoleNotificationBuilder()
.withNewMetadata()
.withName("maintenance-notice")
.endMetadata()
.withNewSpec()
.withText("Scheduled maintenance window: Saturday 2AM-4AM UTC")
.withLocation(ConsoleNotificationLocation.BannerTop)
.withColor("#ff0000")
.endSpec()
.build();
console.consoleNotifications().create(notification);The Cluster Autoscaling API group manages automatic cluster scaling.
// Access cluster autoscaling API group
OpenShiftClusterAutoscalingAPIGroupDSL clusterAutoscaling = client.clusterAutoscaling();
// Create cluster autoscaler
ClusterAutoscaler autoscaler = new ClusterAutoscalerBuilder()
.withNewMetadata()
.withName("default")
.endMetadata()
.withNewSpec()
.withScaleDownDelayAfterAdd("10m")
.withScaleDownDelayAfterDelete("10s")
.withScaleDownDelayAfterFailure("3m")
.withScaleDownUnneededTime("10m")
.withSkipNodesWithLocalStorage(true)
.withSkipNodesWithSystemPods(true)
.endSpec()
.build();
clusterAutoscaling.clusterAutoscalers().create(autoscaler);
// Create machine autoscaler
MachineAutoscaler machineAutoscaler = new MachineAutoscalerBuilder()
.withNewMetadata()
.withName("worker-autoscaler")
.withNamespace("openshift-machine-api")
.endMetadata()
.withNewSpec()
.withMinReplicas(1)
.withMaxReplicas(12)
.withNewScaleTargetRef()
.withApiVersion("machine.openshift.io/v1beta1")
.withKind("MachineSet")
.withName("worker-machineset")
.endScaleTargetRef()
.endSpec()
.build();
clusterAutoscaling.machineAutoscalers()
.inNamespace("openshift-machine-api")
.create(machineAutoscaler);import io.fabric8.openshift.client.OpenShiftClient;
import io.fabric8.openshift.client.dsl.*;
public class ClusterConfigurationManager {
private final OpenShiftClient client;
public ClusterConfigurationManager(OpenShiftClient client) {
this.client = client;
}
public void auditClusterConfiguration() {
System.out.println("=== Cluster Configuration Audit ===\n");
auditClusterVersion();
auditOperatorStatus();
auditInfrastructure();
auditNetworkConfiguration();
auditMonitoringSetup();
}
private void auditClusterVersion() {
System.out.println("Cluster Version:");
ClusterVersion version = client.config().clusterVersions()
.withName("version")
.get();
if (version != null) {
System.out.println(" Current: " + version.getStatus().getDesired().getVersion());
System.out.println(" Channel: " + version.getSpec().getChannel());
List<Update> availableUpdates = version.getStatus().getAvailableUpdates();
if (availableUpdates != null && !availableUpdates.isEmpty()) {
System.out.println(" Available Updates:");
availableUpdates.forEach(update ->
System.out.println(" " + update.getVersion()));
}
}
System.out.println();
}
private void auditOperatorStatus() {
System.out.println("Operator Status:");
ClusterOperatorList operators = client.config().clusterOperators().list();
for (ClusterOperator operator : operators.getItems()) {
String name = operator.getMetadata().getName();
boolean available = operator.getStatus().getConditions().stream()
.anyMatch(condition ->
"Available".equals(condition.getType()) &&
"True".equals(condition.getStatus()));
boolean progressing = operator.getStatus().getConditions().stream()
.anyMatch(condition ->
"Progressing".equals(condition.getType()) &&
"True".equals(condition.getStatus()));
System.out.println(" " + name + ": " +
(available ? "Available" : "Not Available") +
(progressing ? " (Progressing)" : ""));
}
System.out.println();
}
private void auditInfrastructure() {
System.out.println("Infrastructure:");
Infrastructure infra = client.config().infrastructures()
.withName("cluster")
.get();
if (infra != null) {
System.out.println(" Platform: " + infra.getStatus().getPlatform());
System.out.println(" Region: " + infra.getStatus().getPlatformStatus().getAws().getRegion());
System.out.println(" API Server: " + infra.getStatus().getApiServerURL());
}
System.out.println();
}
private void auditNetworkConfiguration() {
System.out.println("Network Configuration:");
Network network = client.config().networks()
.withName("cluster")
.get();
if (network != null) {
System.out.println(" Service Network: " +
String.join(", ", network.getSpec().getServiceNetwork()));
System.out.println(" Cluster Networks:");
network.getSpec().getClusterNetwork().forEach(cn ->
System.out.println(" " + cn.getCidr() + " (host prefix: " +
cn.getHostPrefix() + ")"));
}
System.out.println();
}
private void auditMonitoringSetup() {
System.out.println("Monitoring Setup:");
try {
PrometheusList prometheuses = client.monitoring().prometheuses()
.inNamespace("openshift-monitoring")
.list();
System.out.println(" Prometheus Instances: " + prometheuses.getItems().size());
AlertmanagerList alertmanagers = client.monitoring().alertmanagers()
.inNamespace("openshift-monitoring")
.list();
System.out.println(" Alertmanager Instances: " + alertmanagers.getItems().size());
} catch (Exception e) {
System.out.println(" Monitoring API not accessible: " + e.getMessage());
}
System.out.println();
}
public void setupApplicationMonitoring(String namespace, String appName) {
// Create service monitor for application
ServiceMonitor serviceMonitor = new ServiceMonitorBuilder()
.withNewMetadata()
.withName(appName + "-monitor")
.withNamespace(namespace)
.addToLabels("app", appName)
.endMetadata()
.withNewSpec()
.withSelector(new LabelSelectorBuilder()
.addToMatchLabels("app", appName)
.build())
.addNewEndpoint()
.withPort("metrics")
.withPath("/metrics")
.withInterval("30s")
.endEndpoint()
.endSpec()
.build();
client.monitoring().serviceMonitors()
.inNamespace(namespace)
.createOrReplace(serviceMonitor);
// Create alerting rules
PrometheusRule rules = new PrometheusRuleBuilder()
.withNewMetadata()
.withName(appName + "-rules")
.withNamespace(namespace)
.addToLabels("prometheus", "kube-prometheus")
.addToLabels("role", "alert-rules")
.endMetadata()
.withNewSpec()
.addNewGroup()
.withName(appName + ".rules")
.addNewRule()
.withAlert("ApplicationDown")
.withExpr("up{job=\"" + appName + "\"} == 0")
.withFor("5m")
.addToLabels("severity", "critical")
.addToAnnotations("summary", appName + " is down")
.addToAnnotations("description",
"Application " + appName + " has been down for more than 5 minutes")
.endRule()
.addNewRule()
.withAlert("HighResponseTime")
.withExpr("histogram_quantile(0.95, rate(http_request_duration_seconds_bucket{job=\"" +
appName + "\"}[5m])) > 1")
.withFor("10m")
.addToLabels("severity", "warning")
.addToAnnotations("summary", "High response time for " + appName)
.addToAnnotations("description",
"95th percentile response time is {{ $value }} seconds")
.endRule()
.endGroup()
.endSpec()
.build();
client.monitoring().prometheusRules()
.inNamespace(namespace)
.createOrReplace(rules);
System.out.println("Monitoring setup completed for: " + appName);
}
}The API group operations work with many specialized types. Here are key examples:
// ClusterVersion for cluster version information
public class ClusterVersion implements HasMetadata {
public ObjectMeta getMetadata();
public ClusterVersionSpec getSpec();
public ClusterVersionStatus getStatus();
}
// ClusterOperator for operator status
public class ClusterOperator implements HasMetadata {
public ObjectMeta getMetadata();
public ClusterOperatorSpec getSpec();
public ClusterOperatorStatus getStatus();
}// Prometheus instance configuration
public class Prometheus implements HasMetadata {
public ObjectMeta getMetadata();
public PrometheusSpec getSpec();
public PrometheusStatus getStatus();
}
// ServiceMonitor for service monitoring
public class ServiceMonitor implements HasMetadata {
public ObjectMeta getMetadata();
public ServiceMonitorSpec getSpec();
}// MachineConfig for node configuration
public class MachineConfig implements HasMetadata {
public ObjectMeta getMetadata();
public MachineConfigSpec getSpec();
}
// MachineConfigPool for managing machine groups
public class MachineConfigPool implements HasMetadata {
public ObjectMeta getMetadata();
public MachineConfigPoolSpec getSpec();
public MachineConfigPoolStatus getStatus();
}Install with Tessl CLI
npx tessl i tessl/maven-io-fabric8--kubernetes-client-project