diff --git a/.azure/templates/jobs/system-tests/feature_gates_regression_jobs.yaml b/.azure/templates/jobs/system-tests/feature_gates_regression_jobs.yaml index bb323b25be0..940a39ce1d4 100644 --- a/.azure/templates/jobs/system-tests/feature_gates_regression_jobs.yaml +++ b/.azure/templates/jobs/system-tests/feature_gates_regression_jobs.yaml @@ -5,7 +5,7 @@ jobs: display_name: 'feature-gates-regression-bundle I. - kafka + oauth' profile: 'azp_kafka_oauth' cluster_operator_install_type: 'bundle' - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" timeout: 360 releaseVersion: '${{ parameters.releaseVersion }}' @@ -17,7 +17,7 @@ jobs: display_name: 'feature-gates-regression-bundle II. - security' profile: 'azp_security' cluster_operator_install_type: 'bundle' - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" timeout: 360 releaseVersion: '${{ parameters.releaseVersion }}' @@ -29,7 +29,7 @@ jobs: display_name: 'feature-gates-regression-bundle III. - dynconfig + tracing + watcher' profile: 'azp_dynconfig_listeners_tracing_watcher' cluster_operator_install_type: 'bundle' - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" timeout: 360 releaseVersion: '${{ parameters.releaseVersion }}' @@ -41,7 +41,7 @@ jobs: display_name: 'feature-gates-regression-bundle IV. - operators' profile: 'azp_operators' cluster_operator_install_type: 'bundle' - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" timeout: 360 releaseVersion: '${{ parameters.releaseVersion }}' @@ -53,7 +53,7 @@ jobs: display_name: 'feature-gates-regression-bundle V. - rollingupdate' profile: 'azp_rolling_update_bridge' cluster_operator_install_type: 'bundle' - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" timeout: 360 releaseVersion: '${{ parameters.releaseVersion }}' @@ -65,7 +65,7 @@ jobs: display_name: 'feature-gates-regression-bundle VI. - connect + mirrormaker' profile: 'azp_connect_mirrormaker' cluster_operator_install_type: 'bundle' - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" timeout: 360 releaseVersion: '${{ parameters.releaseVersion }}' @@ -77,7 +77,7 @@ jobs: display_name: 'feature-gates-regression-bundle VII. - remaining system tests' profile: 'azp_remaining' cluster_operator_install_type: 'bundle' - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" timeout: 360 releaseVersion: '${{ parameters.releaseVersion }}' diff --git a/.azure/templates/jobs/system-tests/feature_gates_regression_namespace_rbac_jobs.yaml b/.azure/templates/jobs/system-tests/feature_gates_regression_namespace_rbac_jobs.yaml index 2c8f9228338..34b4ec1d77d 100644 --- a/.azure/templates/jobs/system-tests/feature_gates_regression_namespace_rbac_jobs.yaml +++ b/.azure/templates/jobs/system-tests/feature_gates_regression_namespace_rbac_jobs.yaml @@ -8,7 +8,7 @@ jobs: cluster_operator_install_type: 'bundle' timeout: 360 strimzi_rbac_scope: NAMESPACE - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" releaseVersion: '${{ parameters.releaseVersion }}' kafkaVersion: '${{ parameters.kafkaVersion }}' @@ -22,7 +22,7 @@ jobs: cluster_operator_install_type: 'bundle' timeout: 360 strimzi_rbac_scope: NAMESPACE - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" releaseVersion: '${{ parameters.releaseVersion }}' kafkaVersion: '${{ parameters.kafkaVersion }}' @@ -36,7 +36,7 @@ jobs: cluster_operator_install_type: 'bundle' timeout: 360 strimzi_rbac_scope: NAMESPACE - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" releaseVersion: '${{ parameters.releaseVersion }}' kafkaVersion: '${{ parameters.kafkaVersion }}' @@ -50,7 +50,7 @@ jobs: cluster_operator_install_type: 'bundle' timeout: 360 strimzi_rbac_scope: NAMESPACE - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" releaseVersion: '${{ parameters.releaseVersion }}' kafkaVersion: '${{ parameters.kafkaVersion }}' @@ -64,7 +64,7 @@ jobs: cluster_operator_install_type: 'bundle' timeout: 360 strimzi_rbac_scope: NAMESPACE - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" releaseVersion: '${{ parameters.releaseVersion }}' kafkaVersion: '${{ parameters.kafkaVersion }}' @@ -78,7 +78,7 @@ jobs: cluster_operator_install_type: 'bundle' timeout: 360 strimzi_rbac_scope: NAMESPACE - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" releaseVersion: '${{ parameters.releaseVersion }}' kafkaVersion: '${{ parameters.kafkaVersion }}' @@ -92,7 +92,7 @@ jobs: cluster_operator_install_type: 'bundle' timeout: 360 strimzi_rbac_scope: NAMESPACE - strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft' + strimzi_feature_gates: '-UnidirectionalTopicOperator,-UseKRaft,+UseServerSideApply' strimzi_use_node_pools_in_tests: "false" releaseVersion: '${{ parameters.releaseVersion }}' kafkaVersion: '${{ parameters.kafkaVersion }}' diff --git a/CHANGELOG.md b/CHANGELOG.md index 95b04b818cc..1cc44643b74 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ * The `KafkaNodePools` feature gate moves to GA stage and is permanently enabled without the possibility to disable it. To use the Kafka Node Pool resources, you still need to use the `strimzi.io/node-pools: enabled` annotation on the `Kafka` custom resources. * Added support for configuring the `externalIPs` field in node port type services. +* Added support for [Server-Side Apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/) via `UseServerSideApply` feature gate. The feature is disabled by default. + If needed, `UseServerSideApply` can be enabled in the feature gates configuration in the Cluster Operator. ## 0.40.0 diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/FeatureGates.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/FeatureGates.java index 693c70eb6cf..ac0c100b537 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/FeatureGates.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/FeatureGates.java @@ -4,11 +4,12 @@ */ package io.strimzi.operator.cluster; -import io.strimzi.operator.common.InvalidConfigurationException; +import io.strimzi.operator.common.config.FeatureGate; +import io.strimzi.operator.common.config.FeatureGatesParser; import java.util.List; - -import static java.util.Arrays.asList; +import java.util.Map; +import java.util.stream.Collectors; /** * Class for handling the configuration of feature gates @@ -18,10 +19,13 @@ public class FeatureGates { private static final String USE_KRAFT = "UseKRaft"; private static final String UNIDIRECTIONAL_TOPIC_OPERATOR = "UnidirectionalTopicOperator"; + private static final String USE_SERVER_SIDE_APPLY = "UseServerSideApply"; - // When adding new feature gates, do not forget to add them to allFeatureGates() and toString() methods - private final FeatureGate useKRaft = new FeatureGate(USE_KRAFT, true); - private final FeatureGate unidirectionalTopicOperator = new FeatureGate(UNIDIRECTIONAL_TOPIC_OPERATOR, true); + private final Map featureGates = Map.ofEntries( + Map.entry(USE_KRAFT, new FeatureGate(USE_KRAFT, true)), + Map.entry(UNIDIRECTIONAL_TOPIC_OPERATOR, new FeatureGate(UNIDIRECTIONAL_TOPIC_OPERATOR, true)), + Map.entry(USE_SERVER_SIDE_APPLY, new FeatureGate(USE_SERVER_SIDE_APPLY, false)) + ); /** * Constructs the feature gates configuration. @@ -29,71 +33,28 @@ public class FeatureGates { * @param featureGateConfig String with comma separated list of enabled or disabled feature gates */ public FeatureGates(String featureGateConfig) { - if (featureGateConfig != null && !featureGateConfig.trim().isEmpty()) { - List featureGates; - - if (featureGateConfig.matches("(\\s*[+-][a-zA-Z0-9]+\\s*,)*\\s*[+-][a-zA-Z0-9]+\\s*")) { - featureGates = asList(featureGateConfig.trim().split("\\s*,+\\s*")); - } else { - throw new InvalidConfigurationException(featureGateConfig + " is not a valid feature gate configuration"); - } - - for (String featureGate : featureGates) { - boolean value = '+' == featureGate.charAt(0); - featureGate = featureGate.substring(1); - - switch (featureGate) { - case USE_KRAFT: - setValueOnlyOnce(useKRaft, value); - break; - case UNIDIRECTIONAL_TOPIC_OPERATOR: - setValueOnlyOnce(unidirectionalTopicOperator, value); - break; - default: - throw new InvalidConfigurationException("Unknown feature gate " + featureGate + " found in the configuration"); - } - } - - validateInterDependencies(); - } - } - - /** - * Validates any dependencies between various feature gates. When the dependencies are not satisfied, - * InvalidConfigurationException is thrown. - */ - private void validateInterDependencies() { - // There are currently no interdependencies between different feature gates. - // But we keep this method as these might happen again in the future. - } - - /** - * Sets the feature gate value if it was not set yet. But if it is already set, then it throws an exception. This - * helps to ensure that each feature gate is configured always only once. - * - * @param gate Feature gate which is being configured - * @param value Value which should be set - */ - private void setValueOnlyOnce(FeatureGate gate, boolean value) { - if (gate.isSet()) { - throw new InvalidConfigurationException("Feature gate " + gate.getName() + " is configured multiple times"); - } - - gate.setValue(value); + new FeatureGatesParser(featureGateConfig).applyFor(featureGates); } /** * @return Returns true when the UseKRaft feature gate is enabled */ public boolean useKRaftEnabled() { - return useKRaft.isEnabled(); + return featureGates.get(USE_KRAFT).isEnabled(); } /** * @return Returns true when the UnidirectionalTopicOperator feature gate is enabled */ public boolean unidirectionalTopicOperatorEnabled() { - return unidirectionalTopicOperator.isEnabled(); + return featureGates.get(UNIDIRECTIONAL_TOPIC_OPERATOR).isEnabled(); + } + + /** + * @return Returns true when the UseServerSideApply feature gate is enabled + */ + public boolean useServerSideApply() { + return featureGates.get(USE_SERVER_SIDE_APPLY).isEnabled(); } /** @@ -102,74 +63,15 @@ public boolean unidirectionalTopicOperatorEnabled() { * @return List of all Feature Gates */ /*test*/ List allFeatureGates() { - return List.of( - useKRaft, - unidirectionalTopicOperator - ); + return featureGates.values().stream().toList(); } @Override public String toString() { - return "FeatureGates(" + - "UseKRaft=" + useKRaft.isEnabled() + "," + - "UnidirectionalTopicOperator=" + unidirectionalTopicOperator.isEnabled() + - ")"; - } - - /** - * Feature gate class represents individual feature fate - */ - static class FeatureGate { - private final String name; - private final boolean defaultValue; - private Boolean value = null; - - /** - * Feature fate constructor - * - * @param name Name of the feature gate - * @param defaultValue Default value of the feature gate - */ - FeatureGate(String name, boolean defaultValue) { - this.name = name; - this.defaultValue = defaultValue; - } - - /** - * @return The name of the feature gate - */ - public String getName() { - return name; - } - - /** - * @return Returns true if the value for this feature gate is already set or false if it is still null - */ - public boolean isSet() { - return value != null; - } - - /** - * Sets the value of the feature gate - * - * @param value Value of the feature gate - */ - public void setValue(boolean value) { - this.value = value; - } - - /** - * @return True if the feature gate is enabled. False otherwise. - */ - public boolean isEnabled() { - return value == null ? defaultValue : value; - } - - /** - * @return Returns True if this feature gate is enabled by default. False otherwise. - */ - public boolean isEnabledByDefault() { - return defaultValue; - } + String featureGatesValues = featureGates.entrySet() + .stream() + .map(featureGate -> featureGate.getKey() + "=" + featureGate.getValue().isEnabled()) + .collect(Collectors.joining(",")); + return "FeatureGates(%s)".formatted(featureGatesValues); } } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java index 511acc56852..83157a3dd2c 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/Main.java @@ -152,7 +152,8 @@ static CompositeFuture deployClusterOperatorVerticles(Vertx vertx, KubernetesCli metricsProvider, pfa, config.getOperationTimeoutMs(), - config.getOperatorName() + config.getOperatorName(), + config.featureGates().useServerSideApply() ); // Initialize the PodSecurityProvider factory to provide the user configured provider diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CertUtils.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CertUtils.java index 25f07a50f3d..b9137c2a4d7 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CertUtils.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/CertUtils.java @@ -176,6 +176,8 @@ public static CertAndKey keyStoreCertAndKey(Secret secret, String keyCertName) { * changed. This method is used to evaluate whether rolling update of existing brokers is needed when secrets with * certificates change. It separates changes for existing certificates with other changes to the Secret such as * added or removed certificates (scale-up or scale-down). + *

+ * Note: this method checks if secrets differ, not whether secret is being created for first time. * * @param current Existing secret * @param desired Desired secret @@ -183,6 +185,10 @@ public static CertAndKey keyStoreCertAndKey(Secret secret, String keyCertName) { * @return True if there is a key which exists in the data sections of both secrets and which changed. */ public static boolean doExistingCertificatesDiffer(Secret current, Secret desired) { + if (current == null || desired == null) { + return false; + } + Map currentData = current.getData(); Map desiredData = desired.getData(); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java index c103e25e05a..91abe3ae64e 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/KafkaCluster.java @@ -1088,6 +1088,10 @@ private Map preparePodSetAnnotations(Storage storage) { controllerAnnotations.put(ANNO_STRIMZI_IO_KAFKA_VERSION, kafkaVersion.version()); controllerAnnotations.put(Annotations.ANNO_STRIMZI_IO_STORAGE, ModelUtils.encodeStorageToJson(storage)); + //Take ownership of the rolling update annotation (for SSA) and reset it + //as (potential) one-time roll was already ran by reconcile loop run if it was needed. + controllerAnnotations.put(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "false"); + return controllerAnnotations; } diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperCluster.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperCluster.java index 92c45c28141..a5cef97ec03 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperCluster.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/model/ZookeeperCluster.java @@ -409,7 +409,12 @@ public StrimziPodSet generatePodSet(int replicas, ownerReference, templatePodSet, replicas, - Map.of(Annotations.ANNO_STRIMZI_IO_STORAGE, ModelUtils.encodeStorageToJson(storage)), + Map.of( + Annotations.ANNO_STRIMZI_IO_STORAGE, ModelUtils.encodeStorageToJson(storage), + //Take ownership of the rolling update annotation (for SSA) and reset it + //as (potential) one-time roll was already ran by reconcile loop run if it was needed. + Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "false" + ), labels.strimziSelectorLabels(), podNum -> WorkloadUtils.createStatefulPod( reconciliation, diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconciler.java index 1c8ba3b8eb1..28baa969650 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/CruiseControlReconciler.java @@ -231,13 +231,7 @@ protected Future certificatesSecret(Clock clock) { .reconcile(reconciliation, reconciliation.namespace(), CruiseControlResources.secretName(reconciliation.name()), cruiseControl.generateCertificatesSecret(reconciliation.namespace(), reconciliation.name(), clusterCa, Util.isMaintenanceTimeWindowsSatisfied(reconciliation, maintenanceWindows, clock.instant()))) .compose(patchResult -> { - if (patchResult instanceof ReconcileResult.Patched) { - // The secret is patched and some changes to the existing certificates actually occurred - existingCertsChanged = CertUtils.doExistingCertificatesDiffer(oldSecret, patchResult.resource()); - } else { - existingCertsChanged = false; - } - + existingCertsChanged = patchResult != null && CertUtils.doExistingCertificatesDiffer(oldSecret, patchResult.resourceOpt().orElse(null)); return Future.succeededFuture(); }); }); @@ -334,7 +328,11 @@ protected Future deployment(boolean isOpenShift, ImagePullPolicy imagePull return deploymentOperator .reconcile(reconciliation, reconciliation.namespace(), CruiseControlResources.componentName(reconciliation.name()), deployment) .compose(patchResult -> { - if (patchResult instanceof ReconcileResult.Noop) { + boolean isNoop = patchResult instanceof ReconcileResult.Noop; + boolean patchedUsingServerSideApply = patchResult instanceof ReconcileResult.Patched + && ((ReconcileResult.Patched) patchResult).isUsingServerSideApply(); + + if (isNoop || patchedUsingServerSideApply) { // Deployment needs ot be rolled because the certificate secret changed or older/expired cluster CA removed if (existingCertsChanged || clusterCa.certsRemoved()) { LOGGER.infoCr(reconciliation, "Rolling Cruise Control to update or remove certificates"); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/EntityOperatorReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/EntityOperatorReconciler.java index fed391611a7..f1ee9d5d871 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/EntityOperatorReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/EntityOperatorReconciler.java @@ -378,13 +378,7 @@ protected Future topicOperatorSecret(Clock clock) { .reconcile(reconciliation, reconciliation.namespace(), KafkaResources.entityTopicOperatorSecretName(reconciliation.name()), entityOperator.topicOperator().generateSecret(clusterCa, Util.isMaintenanceTimeWindowsSatisfied(reconciliation, maintenanceWindows, clock.instant()))) .compose(patchResult -> { - if (patchResult instanceof ReconcileResult.Patched) { - // The secret is patched and some changes to the existing certificates actually occurred - existingEntityTopicOperatorCertsChanged = CertUtils.doExistingCertificatesDiffer(oldSecret, patchResult.resource()); - } else { - existingEntityTopicOperatorCertsChanged = false; - } - + existingEntityTopicOperatorCertsChanged = patchResult != null && CertUtils.doExistingCertificatesDiffer(oldSecret, patchResult.resourceOpt().orElse(null)); return Future.succeededFuture(); }); }); @@ -411,13 +405,7 @@ protected Future userOperatorSecret(Clock clock) { .reconcile(reconciliation, reconciliation.namespace(), KafkaResources.entityUserOperatorSecretName(reconciliation.name()), entityOperator.userOperator().generateSecret(clusterCa, Util.isMaintenanceTimeWindowsSatisfied(reconciliation, maintenanceWindows, clock.instant()))) .compose(patchResult -> { - if (patchResult instanceof ReconcileResult.Patched) { - // The secret is patched and some changes to the existing certificates actually occurred - existingEntityUserOperatorCertsChanged = CertUtils.doExistingCertificatesDiffer(oldSecret, patchResult.resource()); - } else { - existingEntityUserOperatorCertsChanged = false; - } - + existingEntityUserOperatorCertsChanged = patchResult != null && CertUtils.doExistingCertificatesDiffer(oldSecret, patchResult.resourceOpt().orElse(null)); return Future.succeededFuture(); }); }); @@ -466,7 +454,11 @@ protected Future deployment(boolean isOpenShift, ImagePullPolicy imagePull return deploymentOperator .reconcile(reconciliation, reconciliation.namespace(), KafkaResources.entityOperatorDeploymentName(reconciliation.name()), deployment) .compose(patchResult -> { - if (patchResult instanceof ReconcileResult.Noop) { + boolean isNoop = patchResult instanceof ReconcileResult.Noop; + boolean patchedUsingServerSideApply = patchResult instanceof ReconcileResult.Patched + && ((ReconcileResult.Patched) patchResult).isUsingServerSideApply(); + + if (isNoop || patchedUsingServerSideApply) { // Deployment needs ot be rolled because the certificate secret changed or older/expired cluster CA removed if (existingEntityTopicOperatorCertsChanged || existingEntityUserOperatorCertsChanged || clusterCa.certsRemoved()) { LOGGER.infoCr(reconciliation, "Rolling Entity Operator to update or remove certificates"); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconciler.java index 42b3f88be26..8a8493fba66 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaExporterReconciler.java @@ -132,13 +132,7 @@ private Future certificatesSecret(Clock clock) { .reconcile(reconciliation, reconciliation.namespace(), KafkaExporterResources.secretName(reconciliation.name()), kafkaExporter.generateSecret(clusterCa, Util.isMaintenanceTimeWindowsSatisfied(reconciliation, maintenanceWindows, clock.instant()))) .compose(patchResult -> { - if (patchResult instanceof ReconcileResult.Patched) { - // The secret is patched and some changes to the existing certificates actually occurred - existingKafkaExporterCertsChanged = CertUtils.doExistingCertificatesDiffer(oldSecret, patchResult.resource()); - } else { - existingKafkaExporterCertsChanged = false; - } - + existingKafkaExporterCertsChanged = patchResult != null && CertUtils.doExistingCertificatesDiffer(oldSecret, patchResult.resourceOpt().orElse(null)); return Future.succeededFuture(); }); }); @@ -190,7 +184,11 @@ private Future deployment(boolean isOpenShift, ImagePullPolicy imagePullPo return deploymentOperator .reconcile(reconciliation, reconciliation.namespace(), KafkaExporterResources.componentName(reconciliation.name()), deployment) .compose(patchResult -> { - if (patchResult instanceof ReconcileResult.Noop) { + boolean isNoop = patchResult instanceof ReconcileResult.Noop; + boolean patchedUsingServerSideApply = patchResult instanceof ReconcileResult.Patched + && ((ReconcileResult.Patched) patchResult).isUsingServerSideApply(); + + if (isNoop || patchedUsingServerSideApply) { // Deployment needs ot be rolled because the certificate secret changed or older/expired cluster CA removed if (existingKafkaExporterCertsChanged || clusterCa.certsRemoved()) { LOGGER.infoCr(reconciliation, "Rolling Kafka Exporter to update or remove certificates"); diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java index 658a7d5039b..74a25385b4a 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconciler.java @@ -120,7 +120,7 @@ public class KafkaReconciler { private final List imagePullSecrets; // Objects used during the reconciliation - /* test */ final Reconciliation reconciliation; + /* test */ protected Reconciliation reconciliation; private final KafkaCluster kafka; private final List kafkaNodePoolCrs; private final ClusterCa clusterCa; @@ -157,7 +157,7 @@ public class KafkaReconciler { private final Map kafkaServerCertificateHash = new HashMap<>(); /* test */ KafkaListenersReconciler.ReconciliationResult listenerReconciliationResults; // Result of the listener reconciliation with the listener details - private final KafkaMetadataStateManager kafkaMetadataStateManager; + protected KafkaMetadataStateManager kafkaMetadataStateManager; /** * Constructs the Kafka reconciler diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ResourceOperatorSupplier.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ResourceOperatorSupplier.java index ecbb80813c4..7738dff74d1 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ResourceOperatorSupplier.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/ResourceOperatorSupplier.java @@ -255,8 +255,15 @@ public class ResourceOperatorSupplier { * @param pfa Platform Availability Features * @param operationTimeoutMs Operation timeout in milliseconds * @param operatorName Name of this operator instance + * @param useServerSideApply whether operators should use Server Side Apply */ - public ResourceOperatorSupplier(Vertx vertx, KubernetesClient client, MetricsProvider metricsProvider, PlatformFeaturesAvailability pfa, long operationTimeoutMs, String operatorName) { + public ResourceOperatorSupplier(Vertx vertx, + KubernetesClient client, + MetricsProvider metricsProvider, + PlatformFeaturesAvailability pfa, + long operationTimeoutMs, + String operatorName, + boolean useServerSideApply) { this(vertx, client, new ZookeeperLeaderFinder(vertx, @@ -268,7 +275,46 @@ public ResourceOperatorSupplier(Vertx vertx, KubernetesClient client, MetricsPro metricsProvider, pfa, operationTimeoutMs, - new KubernetesRestartEventPublisher(client, operatorName) + new KubernetesRestartEventPublisher(client, operatorName), + useServerSideApply + ); + } + + /** + * Constructor used for tests + * + * @param vertx Vert.x instance + * @param client Kubernetes Client + * @param zlf ZooKeeper Leader Finder + * @param adminClientProvider Kafka Admin client provider + * @param zkScalerProvider ZooKeeper Scaler provider + * @param kafkaAgentClientProvider Kafka Agent client provider + * @param metricsProvider Metrics provider + * @param pfa Platform Availability Features + * @param operationTimeoutMs Operation timeout in milliseconds + * @param useServerSideApply whether operators should use Server Side Apply + */ + public ResourceOperatorSupplier(Vertx vertx, + KubernetesClient client, + ZookeeperLeaderFinder zlf, + AdminClientProvider adminClientProvider, + ZookeeperScalerProvider zkScalerProvider, + KafkaAgentClientProvider kafkaAgentClientProvider, + MetricsProvider metricsProvider, + PlatformFeaturesAvailability pfa, + long operationTimeoutMs, + boolean useServerSideApply) { + this(vertx, + client, + zlf, + adminClientProvider, + zkScalerProvider, + kafkaAgentClientProvider, + metricsProvider, + pfa, + operationTimeoutMs, + new KubernetesRestartEventPublisher(client, "operatorName"), + useServerSideApply ); } @@ -303,49 +349,51 @@ public ResourceOperatorSupplier(Vertx vertx, metricsProvider, pfa, operationTimeoutMs, - new KubernetesRestartEventPublisher(client, "operatorName") + new KubernetesRestartEventPublisher(client, "operatorName"), + false ); } private ResourceOperatorSupplier(Vertx vertx, - KubernetesClient client, - ZookeeperLeaderFinder zlf, - AdminClientProvider adminClientProvider, - ZookeeperScalerProvider zkScalerProvider, - KafkaAgentClientProvider kafkaAgentClientProvider, - MetricsProvider metricsProvider, - PlatformFeaturesAvailability pfa, - long operationTimeoutMs, - KubernetesRestartEventPublisher restartEventPublisher) { - this(new ServiceOperator(vertx, client), - pfa.hasRoutes() ? new RouteOperator(vertx, client.adapt(OpenShiftClient.class)) : null, - pfa.hasImages() ? new ImageStreamOperator(vertx, client.adapt(OpenShiftClient.class)) : null, - new StatefulSetOperator(vertx, client, operationTimeoutMs), - new ConfigMapOperator(vertx, client), - new SecretOperator(vertx, client), - new PvcOperator(vertx, client), - new DeploymentOperator(vertx, client), - new ServiceAccountOperator(vertx, client), - new RoleBindingOperator(vertx, client), - new RoleOperator(vertx, client), - new ClusterRoleBindingOperator(vertx, client), - new NetworkPolicyOperator(vertx, client), - new PodDisruptionBudgetOperator(vertx, client), - new PodOperator(vertx, client), - new IngressOperator(vertx, client), - pfa.hasBuilds() ? new BuildConfigOperator(vertx, client.adapt(OpenShiftClient.class)) : null, - pfa.hasBuilds() ? new BuildOperator(vertx, client.adapt(OpenShiftClient.class)) : null, - new CrdOperator<>(vertx, client, Kafka.class, KafkaList.class, Kafka.RESOURCE_KIND), - new CrdOperator<>(vertx, client, KafkaConnect.class, KafkaConnectList.class, KafkaConnect.RESOURCE_KIND), - new CrdOperator<>(vertx, client, KafkaMirrorMaker.class, KafkaMirrorMakerList.class, KafkaMirrorMaker.RESOURCE_KIND), - new CrdOperator<>(vertx, client, KafkaBridge.class, KafkaBridgeList.class, KafkaBridge.RESOURCE_KIND), - new CrdOperator<>(vertx, client, KafkaConnector.class, KafkaConnectorList.class, KafkaConnector.RESOURCE_KIND), - new CrdOperator<>(vertx, client, KafkaMirrorMaker2.class, KafkaMirrorMaker2List.class, KafkaMirrorMaker2.RESOURCE_KIND), - new CrdOperator<>(vertx, client, KafkaRebalance.class, KafkaRebalanceList.class, KafkaRebalance.RESOURCE_KIND), - new CrdOperator<>(vertx, client, KafkaNodePool.class, KafkaNodePoolList.class, KafkaNodePool.RESOURCE_KIND), - new StrimziPodSetOperator(vertx, client), - new StorageClassOperator(vertx, client), - new NodeOperator(vertx, client), + KubernetesClient client, + ZookeeperLeaderFinder zlf, + AdminClientProvider adminClientProvider, + ZookeeperScalerProvider zkScalerProvider, + KafkaAgentClientProvider kafkaAgentClientProvider, + MetricsProvider metricsProvider, + PlatformFeaturesAvailability pfa, + long operationTimeoutMs, + KubernetesRestartEventPublisher restartEventPublisher, + boolean useServerSideApply) { + this(new ServiceOperator(vertx, client, useServerSideApply), + pfa.hasRoutes() ? new RouteOperator(vertx, client.adapt(OpenShiftClient.class), useServerSideApply) : null, + pfa.hasImages() ? new ImageStreamOperator(vertx, client.adapt(OpenShiftClient.class), useServerSideApply) : null, + new StatefulSetOperator(vertx, client, operationTimeoutMs, useServerSideApply), + new ConfigMapOperator(vertx, client, useServerSideApply), + new SecretOperator(vertx, client, useServerSideApply), + new PvcOperator(vertx, client, useServerSideApply), + new DeploymentOperator(vertx, client, useServerSideApply), + new ServiceAccountOperator(vertx, client, useServerSideApply), + new RoleBindingOperator(vertx, client, useServerSideApply), + new RoleOperator(vertx, client, useServerSideApply), + new ClusterRoleBindingOperator(vertx, client, useServerSideApply), + new NetworkPolicyOperator(vertx, client, useServerSideApply), + new PodDisruptionBudgetOperator(vertx, client, useServerSideApply), + new PodOperator(vertx, client, useServerSideApply), + new IngressOperator(vertx, client, useServerSideApply), + pfa.hasBuilds() ? new BuildConfigOperator(vertx, client.adapt(OpenShiftClient.class), useServerSideApply) : null, + pfa.hasBuilds() ? new BuildOperator(vertx, client.adapt(OpenShiftClient.class), useServerSideApply) : null, + new CrdOperator<>(vertx, client, Kafka.class, KafkaList.class, Kafka.RESOURCE_KIND, useServerSideApply), + new CrdOperator<>(vertx, client, KafkaConnect.class, KafkaConnectList.class, KafkaConnect.RESOURCE_KIND, useServerSideApply), + new CrdOperator<>(vertx, client, KafkaMirrorMaker.class, KafkaMirrorMakerList.class, KafkaMirrorMaker.RESOURCE_KIND, useServerSideApply), + new CrdOperator<>(vertx, client, KafkaBridge.class, KafkaBridgeList.class, KafkaBridge.RESOURCE_KIND, useServerSideApply), + new CrdOperator<>(vertx, client, KafkaConnector.class, KafkaConnectorList.class, KafkaConnector.RESOURCE_KIND, useServerSideApply), + new CrdOperator<>(vertx, client, KafkaMirrorMaker2.class, KafkaMirrorMaker2List.class, KafkaMirrorMaker2.RESOURCE_KIND, useServerSideApply), + new CrdOperator<>(vertx, client, KafkaRebalance.class, KafkaRebalanceList.class, KafkaRebalance.RESOURCE_KIND, useServerSideApply), + new CrdOperator<>(vertx, client, KafkaNodePool.class, KafkaNodePoolList.class, KafkaNodePool.RESOURCE_KIND, useServerSideApply), + new StrimziPodSetOperator(vertx, client, useServerSideApply), + new StorageClassOperator(vertx, client, useServerSideApply), + new NodeOperator(vertx, client, useServerSideApply), zkScalerProvider, kafkaAgentClientProvider, metricsProvider, diff --git a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperator.java b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperator.java index ced0dcb76f7..35fb974a4e8 100644 --- a/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperator.java +++ b/cluster-operator/src/main/java/io/strimzi/operator/cluster/operator/resource/StatefulSetOperator.java @@ -46,6 +46,30 @@ public StatefulSetOperator(Vertx vertx, KubernetesClient client, long operationT this(vertx, client, operationTimeoutMs, new PodOperator(vertx, client)); } + /** + * Constructor + * @param vertx The Vertx instance. + * @param client The Kubernetes client. + * @param operationTimeoutMs The timeout. + * @param useServerSideApply Whether to use server side apply + */ + public StatefulSetOperator(Vertx vertx, KubernetesClient client, long operationTimeoutMs, boolean useServerSideApply) { + this(vertx, client, operationTimeoutMs, new PodOperator(vertx, client), useServerSideApply); + } + + /** + * @param vertx The Vertx instance. + * @param client The Kubernetes client. + * @param operationTimeoutMs The timeout. + * @param podOperator The pod operator. + * @param useServerSideApply Whether to use server side apply + */ + public StatefulSetOperator(Vertx vertx, KubernetesClient client, long operationTimeoutMs, PodOperator podOperator, boolean useServerSideApply) { + super(vertx, client, "StatefulSet", useServerSideApply); + this.podOperations = podOperator; + this.operationTimeoutMs = operationTimeoutMs; + } + /** * @param vertx The Vertx instance. * @param client The Kubernetes client. diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/FeatureGatesTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/FeatureGatesTest.java index 9aaea2e8b9c..adf1bde5216 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/FeatureGatesTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/FeatureGatesTest.java @@ -5,6 +5,7 @@ package io.strimzi.operator.cluster; import io.strimzi.operator.common.InvalidConfigurationException; +import io.strimzi.operator.common.config.FeatureGate; import io.strimzi.test.annotations.ParallelSuite; import io.strimzi.test.annotations.ParallelTest; @@ -20,7 +21,7 @@ public class FeatureGatesTest { @ParallelTest public void testIndividualFeatureGates() { - for (FeatureGates.FeatureGate gate : FeatureGates.NONE.allFeatureGates()) { + for (FeatureGate gate : FeatureGates.NONE.allFeatureGates()) { FeatureGates enabled = new FeatureGates("+" + gate.getName()); FeatureGates disabled; @@ -41,18 +42,18 @@ public void testAllFeatureGates() { List allEnabled = new ArrayList<>(); List allDisabled = new ArrayList<>(); - for (FeatureGates.FeatureGate gate : FeatureGates.NONE.allFeatureGates()) { + for (FeatureGate gate : FeatureGates.NONE.allFeatureGates()) { allEnabled.add("+" + gate.getName()); allDisabled.add("-" + gate.getName()); } FeatureGates enabled = new FeatureGates(String.join(",", allEnabled)); - for (FeatureGates.FeatureGate gate : enabled.allFeatureGates()) { + for (FeatureGate gate : enabled.allFeatureGates()) { assertThat(gate.isEnabled(), is(true)); } FeatureGates disabled = new FeatureGates(String.join(",", allDisabled)); - for (FeatureGates.FeatureGate gate : disabled.allFeatureGates()) { + for (FeatureGate gate : disabled.allFeatureGates()) { assertThat(gate.isEnabled(), is(false)); } } @@ -62,14 +63,10 @@ public void testFeatureGatesParsing() { assertThat(new FeatureGates("+UseKRaft").useKRaftEnabled(), is(true)); assertThat(new FeatureGates("-UseKRaft").useKRaftEnabled(), is(false)); assertThat(new FeatureGates(" -UseKRaft ").useKRaftEnabled(), is(false)); - // TODO: Add more tests with various feature gate combinations once we have multiple feature gates again. - // The commented out code below shows the tests we used to have with multiple feature gates. - //assertThat(new FeatureGates("-UseKRaft,-KafkaNodePools").useKRaftEnabled(), is(false)); - //assertThat(new FeatureGates("-UseKRaft,-KafkaNodePools").kafkaNodePoolsEnabled(), is(false)); - //assertThat(new FeatureGates(" +UseKRaft , +KafkaNodePools").useKRaftEnabled(), is(true)); - //assertThat(new FeatureGates(" -UseKRaft , -KafkaNodePools").kafkaNodePoolsEnabled(), is(false)); - //assertThat(new FeatureGates("+KafkaNodePools,-UseKRaft").useKRaftEnabled(), is(false)); - //assertThat(new FeatureGates("+KafkaNodePools,-UseKRaft").kafkaNodePoolsEnabled(), is(true)); + assertThat(new FeatureGates("-UseKRaft,-UseServerSideApply").useKRaftEnabled(), is(false)); + assertThat(new FeatureGates("-UseKRaft,-UseServerSideApply").useServerSideApply(), is(false)); + assertThat(new FeatureGates("-UseKRaft,+UseServerSideApply").useKRaftEnabled(), is(false)); + assertThat(new FeatureGates("-UseKRaft,+UseServerSideApply").useServerSideApply(), is(true)); } @ParallelTest @@ -82,7 +79,7 @@ public void testEmptyFeatureGates() { FeatureGates.NONE); for (FeatureGates fgs : emptyFeatureGates) { - for (FeatureGates.FeatureGate fg : fgs.allFeatureGates()) { + for (FeatureGate fg : fgs.allFeatureGates()) { assertThat(fg.isEnabled(), is(fg.isEnabledByDefault())); } } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java index a8affb87e9f..b8dbdec6bce 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/ResourceUtils.java @@ -721,6 +721,12 @@ public static ClusterOperatorConfig dummyClusterOperatorConfig(KafkaVersion.Look .build(); } + public static ClusterOperatorConfig dummyClusterOperatorConfig(KafkaVersion.Lookup versions, String featureGates) { + return new ClusterOperatorConfigBuilder(dummyClusterOperatorConfig(), versions) + .with(ClusterOperatorConfig.FEATURE_GATES.key(), featureGates) + .build(); + } + public static ClusterOperatorConfig dummyClusterOperatorConfig(String featureGates) { return new ClusterOperatorConfigBuilder(dummyClusterOperatorConfig(), KafkaVersionTestUtils.getKafkaVersionLookup()) .with(ClusterOperatorConfig.FEATURE_GATES.key(), featureGates) diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorMockTest.java index 3982e863651..9a241c42d0d 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorMockTest.java @@ -77,8 +77,8 @@ @ExtendWith(VertxExtension.class) @SuppressWarnings("checkstyle:ClassFanOutComplexity") -public class KafkaAssemblyOperatorMockTest { - private static final Logger LOGGER = LogManager.getLogger(KafkaAssemblyOperatorMockTest.class); +public abstract class KafkaAssemblyOperatorMockTest { + private static final Logger LOGGER = LogManager.getLogger(KafkaAssemblyOperatorWithoutSSAMockTest.class); private static final String CLUSTER_NAME = "my-cluster"; private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); @@ -137,36 +137,36 @@ public void beforeEach(TestInfo testInfo) { mockKube.prepareNamespace(namespace); cluster = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(namespace) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withConfig(new HashMap<>()) - .withReplicas(KAFKA_REPLICAS) - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build()) - .withNewPersistentClaimStorage() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(true) - .endPersistentClaimStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewPersistentClaimStorage() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(true) - .endPersistentClaimStorage() - .endZookeeper() - .endSpec() - .build(); + .withNewMetadata() + .withName(CLUSTER_NAME) + .withNamespace(namespace) + .endMetadata() + .withNewSpec() + .withNewKafka() + .withConfig(new HashMap<>()) + .withReplicas(KAFKA_REPLICAS) + .withListeners(new GenericKafkaListenerBuilder() + .withName("tls") + .withPort(9092) + .withType(KafkaListenerType.INTERNAL) + .withTls(true) + .build()) + .withNewPersistentClaimStorage() + .withSize("123") + .withStorageClass("foo") + .withDeleteClaim(true) + .endPersistentClaimStorage() + .endKafka() + .withNewZookeeper() + .withReplicas(3) + .withNewPersistentClaimStorage() + .withSize("123") + .withStorageClass("foo") + .withDeleteClaim(true) + .endPersistentClaimStorage() + .endZookeeper() + .endSpec() + .build(); this.kafkaStorage = cluster.getSpec().getKafka().getStorage(); // Create the initial resources @@ -188,46 +188,48 @@ public void afterEach() { client.namespaces().withName(namespace).delete(); } + protected abstract boolean getSSA(); + private ResourceOperatorSupplier supplierWithMocks() { return new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), - ResourceUtils.metricsProvider(), new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), 2_000); + ResourceUtils.metricsProvider(), new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), 2_000, getSSA()); } private Future initialReconcile(VertxTestContext context) { LOGGER.info("Reconciling initially -> create"); return operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME)) - .onComplete(context.succeeding(v -> context.verify(() -> { - StrimziPodSet sps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(KafkaResources.kafkaComponentName(CLUSTER_NAME)).get(); - assertThat(sps, is(notNullValue())); - - sps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLIENTS_CA_CERT_GENERATION, "0")); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0")); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, "0")); - var brokersSecret = client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get(); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, - CertUtils.getCertificateThumbprint(brokersSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())) - )); - }); - - StrimziPodSet zkSps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(KafkaResources.zookeeperComponentName(CLUSTER_NAME)).get(); - zkSps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0")); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, "0")); - var zooKeeperSecret = client.secrets().inNamespace(namespace).withName(KafkaResources.zookeeperSecretName(CLUSTER_NAME)).get(); - assertThat(pod.getMetadata().getAnnotations(), hasEntry(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, - CertUtils.getCertificateThumbprint(zooKeeperSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())) - )); - }); - assertThat(client.configMaps().inNamespace(namespace).withName(KafkaResources.zookeeperMetricsAndLogConfigMapName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clientsCaKeySecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.zookeeperSecretName(CLUSTER_NAME)).get(), is(notNullValue())); - }))); + .onComplete(context.succeeding(v -> context.verify(() -> { + StrimziPodSet sps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(KafkaResources.kafkaComponentName(CLUSTER_NAME)).get(); + assertThat(sps, is(notNullValue())); + + sps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { + assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLIENTS_CA_CERT_GENERATION, "0")); + assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0")); + assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, "0")); + var brokersSecret = client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get(); + assertThat(pod.getMetadata().getAnnotations(), hasEntry(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, + CertUtils.getCertificateThumbprint(brokersSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())) + )); + }); + + StrimziPodSet zkSps = supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(KafkaResources.zookeeperComponentName(CLUSTER_NAME)).get(); + zkSps.getSpec().getPods().stream().map(PodSetUtils::mapToPod).forEach(pod -> { + assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION, "0")); + assertThat(pod.getMetadata().getAnnotations(), hasEntry(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION, "0")); + var zooKeeperSecret = client.secrets().inNamespace(namespace).withName(KafkaResources.zookeeperSecretName(CLUSTER_NAME)).get(); + assertThat(pod.getMetadata().getAnnotations(), hasEntry(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH, + CertUtils.getCertificateThumbprint(zooKeeperSecret, Ca.SecretEntry.CRT.asKey(pod.getMetadata().getName())) + )); + }); + assertThat(client.configMaps().inNamespace(namespace).withName(KafkaResources.zookeeperMetricsAndLogConfigMapName(CLUSTER_NAME)).get(), is(notNullValue())); + assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clientsCaKeySecretName(CLUSTER_NAME)).get(), is(notNullValue())); + assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME)).get(), is(notNullValue())); + assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.clusterCaCertificateSecretName(CLUSTER_NAME)).get(), is(notNullValue())); + assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get(), is(notNullValue())); + assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.zookeeperSecretName(CLUSTER_NAME)).get(), is(notNullValue())); + }))); } /** Create a cluster from a Kafka */ @@ -235,9 +237,9 @@ private Future initialReconcile(VertxTestContext context) { public void testReconcile(VertxTestContext context) { Checkpoint async = context.checkpoint(); initialReconcile(context) - .onComplete(context.succeeding(i -> { })) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> async.flag())); + .onComplete(context.succeeding(i -> { })) + .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) + .onComplete(context.succeeding(v -> async.flag())); } @Test @@ -258,23 +260,23 @@ private void initialReconcileThenDeleteSecretsThenReconcile(VertxTestContext con Checkpoint async = context.checkpoint(); initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String secret: secrets) { - client.secrets().inNamespace(namespace).withName(secret).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - client.secrets().inNamespace(namespace).withName(secret).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); - assertThat("Expected secret " + secret + " to not exist", - client.secrets().inNamespace(namespace).withName(secret).get(), is(nullValue())); - } - LOGGER.info("Reconciling again -> update"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String secret: secrets) { - assertThat("Expected secret " + secret + " to have been recreated", - client.secrets().inNamespace(namespace).withName(secret).get(), is(notNullValue())); - } - async.flag(); - }))); + .onComplete(context.succeeding(v -> context.verify(() -> { + for (String secret: secrets) { + client.secrets().inNamespace(namespace).withName(secret).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); + client.secrets().inNamespace(namespace).withName(secret).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); + assertThat("Expected secret " + secret + " to not exist", + client.secrets().inNamespace(namespace).withName(secret).get(), is(nullValue())); + } + LOGGER.info("Reconciling again -> update"); + }))) + .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) + .onComplete(context.succeeding(v -> context.verify(() -> { + for (String secret: secrets) { + assertThat("Expected secret " + secret + " to have been recreated", + client.secrets().inNamespace(namespace).withName(secret).get(), is(notNullValue())); + } + async.flag(); + }))); } /** @@ -284,23 +286,23 @@ private void initialReconcileThenDeleteServicesThenReconcile(VertxTestContext co Checkpoint async = context.checkpoint(); initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String service : services) { - client.services().inNamespace(namespace).withName(service).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - client.services().inNamespace(namespace).withName(service).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); - assertThat("Expected service " + service + " to be not exist", - client.services().inNamespace(namespace).withName(service).get(), is(nullValue())); - } - LOGGER.info("Reconciling again -> update"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - for (String service: services) { - assertThat("Expected service " + service + " to have been recreated", - client.services().inNamespace(namespace).withName(service).get(), is(notNullValue())); - } - async.flag(); - }))); + .onComplete(context.succeeding(v -> context.verify(() -> { + for (String service : services) { + client.services().inNamespace(namespace).withName(service).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); + client.services().inNamespace(namespace).withName(service).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); + assertThat("Expected service " + service + " to be not exist", + client.services().inNamespace(namespace).withName(service).get(), is(nullValue())); + } + LOGGER.info("Reconciling again -> update"); + }))) + .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) + .onComplete(context.succeeding(v -> context.verify(() -> { + for (String service: services) { + assertThat("Expected service " + service + " to have been recreated", + client.services().inNamespace(namespace).withName(service).get(), is(notNullValue())); + } + async.flag(); + }))); } @Test @@ -332,20 +334,20 @@ public void testReconcileReplacesDeletedKafkaPodSet(VertxTestContext context) { private void initialReconcileThenDeletePodSetsThenReconcile(VertxTestContext context, String podSetName) { Checkpoint async = context.checkpoint(); initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); - assertThat("Expected sts " + podSetName + " should not exist", - supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).get(), is(nullValue())); - - LOGGER.info("Reconciling again -> update"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat("Expected sts " + podSetName + " should have been re-created", - supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-kafka").get(), is(notNullValue())); - async.flag(); - }))); + .onComplete(context.succeeding(v -> context.verify(() -> { + supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); + supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).waitUntilCondition(Objects::isNull, 10_000, TimeUnit.MILLISECONDS); + assertThat("Expected sts " + podSetName + " should not exist", + supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(podSetName).get(), is(nullValue())); + + LOGGER.info("Reconciling again -> update"); + }))) + .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) + .onComplete(context.succeeding(v -> context.verify(() -> { + assertThat("Expected sts " + podSetName + " should have been re-created", + supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-kafka").get(), is(notNullValue())); + async.flag(); + }))); } @Test @@ -356,32 +358,32 @@ public void testReconcileUpdatesKafkaPersistentVolumes(VertxTestContext context) Checkpoint async = context.checkpoint(); initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(originalStorageClass, is("foo")); + .onComplete(context.succeeding(v -> context.verify(() -> { + assertThat(originalStorageClass, is("foo")); - // Try to update the storage class - String changedClass = originalStorageClass + "2"; + // Try to update the storage class + String changedClass = originalStorageClass + "2"; - Kafka patchedPersistenceKafka = new KafkaBuilder(cluster) - .editSpec() + Kafka patchedPersistenceKafka = new KafkaBuilder(cluster) + .editSpec() .editKafka() - .withNewPersistentClaimStorage() - .withStorageClass(changedClass) - .withSize("123") - .endPersistentClaimStorage() + .withNewPersistentClaimStorage() + .withStorageClass(changedClass) + .withSize("123") + .endPersistentClaimStorage() .endKafka() - .endSpec() - .build(); - kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), patchedPersistenceKafka); - - LOGGER.info("Updating with changed storage class"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Check the storage class was not changed - assertThat(((PersistentClaimStorage) kafkaStorage).getStorageClass(), is(originalStorageClass)); - async.flag(); - }))); + .endSpec() + .build(); + kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), patchedPersistenceKafka); + + LOGGER.info("Updating with changed storage class"); + }))) + .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) + .onComplete(context.succeeding(v -> context.verify(() -> { + // Check the storage class was not changed + assertThat(((PersistentClaimStorage) kafkaStorage).getStorageClass(), is(originalStorageClass)); + async.flag(); + }))); } private Resource kafkaAssembly(String namespace, String name) { @@ -393,31 +395,31 @@ private Resource kafkaAssembly(String namespace, String name) { public void testReconcileUpdatesKafkaStorageType(VertxTestContext context) { Checkpoint async = context.checkpoint(); initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - Kafka updatedStorageKafka = null; - if (cluster.getSpec().getKafka().getStorage() instanceof PersistentClaimStorage) { - updatedStorageKafka = new KafkaBuilder(cluster) - .editSpec() + .onComplete(context.succeeding(v -> context.verify(() -> { + Kafka updatedStorageKafka = null; + if (cluster.getSpec().getKafka().getStorage() instanceof PersistentClaimStorage) { + updatedStorageKafka = new KafkaBuilder(cluster) + .editSpec() .editKafka() - .withNewEphemeralStorage() - .endEphemeralStorage() + .withNewEphemeralStorage() + .endEphemeralStorage() .endKafka() - .endSpec() - .build(); - } else { - context.failNow(new Exception("If storage is not persistent, something has gone wrong")); - } - kafkaAssembly(namespace, CLUSTER_NAME).patch(updatedStorageKafka); - - LOGGER.info("Updating with changed storage type"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - // Check the Volumes and PVCs were not changed - assertPVCs(context, CLUSTER_NAME + "-kafka"); - assertVolumes(context, CLUSTER_NAME + "-kafka"); - async.flag(); - }))); + .endSpec() + .build(); + } else { + context.failNow(new Exception("If storage is not persistent, something has gone wrong")); + } + kafkaAssembly(namespace, CLUSTER_NAME).patch(updatedStorageKafka); + + LOGGER.info("Updating with changed storage type"); + }))) + .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) + .onComplete(context.succeeding(v -> context.verify(() -> { + // Check the Volumes and PVCs were not changed + assertPVCs(context, CLUSTER_NAME + "-kafka"); + assertVolumes(context, CLUSTER_NAME + "-kafka"); + async.flag(); + }))); } @@ -474,42 +476,42 @@ public void testReconcileUpdatesKafkaWithChangedDeleteClaim(VertxTestContext con Checkpoint async = context.checkpoint(); initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - kafkaPvcs.set(client.persistentVolumeClaims().inNamespace(namespace).withLabels(kafkaLabels).list().getItems() - .stream() - .map(pvc -> pvc.getMetadata().getName()) - .collect(Collectors.toSet())); - - zkPvcs.set(client.persistentVolumeClaims().inNamespace(namespace).withLabels(zkLabels).list().getItems() - .stream() - .map(pvc -> pvc.getMetadata().getName()) - .collect(Collectors.toSet())); - - originalKafkaDeleteClaim.set(deleteClaim(kafkaStorage)); - - // Try to update the storage class - Kafka updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka() - .withNewPersistentClaimStorage() - .withSize("123") - .withStorageClass("foo") - .withDeleteClaim(!originalKafkaDeleteClaim.get()) - .endPersistentClaimStorage().endKafka().endSpec().build(); - kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), updatedStorageKafka); - LOGGER.info("Updating with changed delete claim"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - // check that the new delete-claim annotation is on the PVCs - for (String pvcName: kafkaPvcs.get()) { - assertThat(client.persistentVolumeClaims().inNamespace(namespace).withName(pvcName).get() - .getMetadata().getAnnotations(), - hasEntry(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM, String.valueOf(!originalKafkaDeleteClaim.get()))); - } - kafkaAssembly(namespace, CLUSTER_NAME).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); - LOGGER.info("Reconciling again -> delete"); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> async.flag())); + .onComplete(context.succeeding(v -> context.verify(() -> { + kafkaPvcs.set(client.persistentVolumeClaims().inNamespace(namespace).withLabels(kafkaLabels).list().getItems() + .stream() + .map(pvc -> pvc.getMetadata().getName()) + .collect(Collectors.toSet())); + + zkPvcs.set(client.persistentVolumeClaims().inNamespace(namespace).withLabels(zkLabels).list().getItems() + .stream() + .map(pvc -> pvc.getMetadata().getName()) + .collect(Collectors.toSet())); + + originalKafkaDeleteClaim.set(deleteClaim(kafkaStorage)); + + // Try to update the storage class + Kafka updatedStorageKafka = new KafkaBuilder(cluster).editSpec().editKafka() + .withNewPersistentClaimStorage() + .withSize("123") + .withStorageClass("foo") + .withDeleteClaim(!originalKafkaDeleteClaim.get()) + .endPersistentClaimStorage().endKafka().endSpec().build(); + kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), updatedStorageKafka); + LOGGER.info("Updating with changed delete claim"); + }))) + .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) + .onComplete(context.succeeding(v -> context.verify(() -> { + // check that the new delete-claim annotation is on the PVCs + for (String pvcName: kafkaPvcs.get()) { + assertThat(client.persistentVolumeClaims().inNamespace(namespace).withName(pvcName).get() + .getMetadata().getAnnotations(), + hasEntry(Annotations.ANNO_STRIMZI_IO_DELETE_CLAIM, String.valueOf(!originalKafkaDeleteClaim.get()))); + } + kafkaAssembly(namespace, CLUSTER_NAME).withPropagationPolicy(DeletionPropagation.FOREGROUND).delete(); + LOGGER.info("Reconciling again -> delete"); + }))) + .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) + .onComplete(context.succeeding(v -> async.flag())); } /** Create a cluster from a Kafka Cluster CM */ @@ -523,40 +525,40 @@ public void testReconcileKafkaScaleDown(VertxTestContext context) { Checkpoint async = context.checkpoint(); initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - brokersInternalCertsCount.set(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get() - .getData() - .size()); + .onComplete(context.succeeding(v -> context.verify(() -> { + brokersInternalCertsCount.set(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get() + .getData() + .size()); - assertThat(client.pods().inNamespace(namespace).withName(deletedPod).get(), is(notNullValue())); + assertThat(client.pods().inNamespace(namespace).withName(deletedPod).get(), is(notNullValue())); - Kafka scaledDownCluster = new KafkaBuilder(cluster) - .editSpec() + Kafka scaledDownCluster = new KafkaBuilder(cluster) + .editSpec() .editKafka() - .withReplicas(scaleDownTo) + .withReplicas(scaleDownTo) .endKafka() - .endSpec() - .build(); - kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), scaledDownCluster); - - LOGGER.info("Scaling down to {} Kafka pods", scaleDownTo); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(Crds.strimziPodSetOperation(client).inNamespace(namespace).withName(CLUSTER_NAME + "-kafka").get().getSpec().getPods().size(), - is(scaleDownTo)); - assertThat("Expected pod " + deletedPod + " to have been deleted", - client.pods().inNamespace(namespace).withName(deletedPod).get(), - is(nullValue())); - - // removing one pod, the related private and public keys, keystore and password (4 entries) should not be in the Secrets - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get() - .getData(), - aMapWithSize(brokersInternalCertsCount.get() - 4)); - - // TODO assert no rolling update - async.flag(); - }))); + .endSpec() + .build(); + kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), scaledDownCluster); + + LOGGER.info("Scaling down to {} Kafka pods", scaleDownTo); + }))) + .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) + .onComplete(context.succeeding(v -> context.verify(() -> { + assertThat(Crds.strimziPodSetOperation(client).inNamespace(namespace).withName(CLUSTER_NAME + "-kafka").get().getSpec().getPods().size(), + is(scaleDownTo)); + assertThat("Expected pod " + deletedPod + " to have been deleted", + client.pods().inNamespace(namespace).withName(deletedPod).get(), + is(nullValue())); + + // removing one pod, the related private and public keys, keystore and password (4 entries) should not be in the Secrets + assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get() + .getData(), + aMapWithSize(brokersInternalCertsCount.get() - 4)); + + // TODO assert no rolling update + async.flag(); + }))); } /** Create a cluster from a Kafka Cluster CM */ @@ -569,38 +571,38 @@ public void testReconcileKafkaScaleUp(VertxTestContext context) { String newPod = KafkaResources.kafkaPodName(CLUSTER_NAME, KAFKA_REPLICAS); initialReconcile(context) - .onComplete(context.succeeding(v -> context.verify(() -> { - brokersInternalCertsCount.set(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get() - .getData() - .size()); + .onComplete(context.succeeding(v -> context.verify(() -> { + brokersInternalCertsCount.set(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get() + .getData() + .size()); - assertThat(client.pods().inNamespace(namespace).withName(newPod).get(), is(nullValue())); + assertThat(client.pods().inNamespace(namespace).withName(newPod).get(), is(nullValue())); - Kafka scaledUpKafka = new KafkaBuilder(cluster) - .editSpec() + Kafka scaledUpKafka = new KafkaBuilder(cluster) + .editSpec() .editKafka() - .withReplicas(scaleUpTo) + .withReplicas(scaleUpTo) .endKafka() - .endSpec() - .build(); - kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), scaledUpKafka); - - LOGGER.info("Scaling up to {} Kafka pods", scaleUpTo); - }))) - .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) - .onComplete(context.succeeding(v -> context.verify(() -> { - assertThat(supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-kafka").get().getSpec().getPods().size(), - is(scaleUpTo)); - assertThat("Expected pod " + newPod + " to have been created", - client.pods().inNamespace(namespace).withName(newPod).get(), - is(notNullValue())); - - // adding one pod, the related private and public keys, keystore and password should be added to the Secrets - assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get().getData(), - aMapWithSize(brokersInternalCertsCount.get() + 4)); - - // TODO assert no rolling update - async.flag(); - }))); + .endSpec() + .build(); + kafkaAssembly(namespace, CLUSTER_NAME).patch(PatchContext.of(PatchType.JSON), scaledUpKafka); + + LOGGER.info("Scaling up to {} Kafka pods", scaleUpTo); + }))) + .compose(v -> operator.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, CLUSTER_NAME))) + .onComplete(context.succeeding(v -> context.verify(() -> { + assertThat(supplier.strimziPodSetOperator.client().inNamespace(namespace).withName(CLUSTER_NAME + "-kafka").get().getSpec().getPods().size(), + is(scaleUpTo)); + assertThat("Expected pod " + newPod + " to have been created", + client.pods().inNamespace(namespace).withName(newPod).get(), + is(notNullValue())); + + // adding one pod, the related private and public keys, keystore and password should be added to the Secrets + assertThat(client.secrets().inNamespace(namespace).withName(KafkaResources.kafkaSecretName(CLUSTER_NAME)).get().getData(), + aMapWithSize(brokersInternalCertsCount.get() + 4)); + + // TODO assert no rolling update + async.flag(); + }))); } -} +} \ No newline at end of file diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetTest.java index 51090337362..b7d69e5bb54 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetTest.java @@ -14,8 +14,6 @@ import io.strimzi.api.kafka.model.kafka.KafkaResources; import io.strimzi.api.kafka.model.kafka.KafkaStatus; import io.strimzi.api.kafka.model.kafka.Storage; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; import io.strimzi.api.kafka.model.podset.StrimziPodSet; import io.strimzi.certs.CertManager; import io.strimzi.certs.OpenSslCertManager; @@ -88,7 +86,7 @@ @ExtendWith(VertxExtension.class) @SuppressWarnings("checkstyle:ClassFanOutComplexity") -public class KafkaAssemblyOperatorPodSetTest { +public abstract class KafkaAssemblyOperatorPodSetTest { private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); private static final KubernetesVersion KUBERNETES_VERSION = KubernetesVersion.MINIMAL_SUPPORTED_VERSION; @@ -101,34 +99,12 @@ public class KafkaAssemblyOperatorPodSetTest { VERSIONS.defaultVersion().messageVersion(), VERSIONS.defaultVersion().metadataVersion() ); - private static final String NAMESPACE = "my-ns"; - private static final String CLUSTER_NAME = "my-cluster"; - private static final Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); - private static final List POOLS = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, KAFKA, null, Map.of(), Map.of(), false, SHARED_ENV_PROVIDER); - private static final KafkaCluster KAFKA_CLUSTER = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, POOLS, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); + protected final String namespace = "my-ns"; + protected final String clusterName = "my-cluster"; + private final Kafka kafka = initKafka(); + + private final List pools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, kafka, null, Map.of(), Map.of(), false, SHARED_ENV_PROVIDER); + private final KafkaCluster kafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, pools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); private static final Map> ADVERTISED_HOSTNAMES = Map.of( 0, Map.of("PLAIN_9092", "broker-0"), @@ -146,23 +122,23 @@ public class KafkaAssemblyOperatorPodSetTest { 4, Map.of("PLAIN_9092", "10004") ); - private final static ClusterCa CLUSTER_CA = new ClusterCa( + private final ClusterCa clusterCa = new ClusterCa( Reconciliation.DUMMY_RECONCILIATION, CERT_MANAGER, PASSWORD_GENERATOR, - CLUSTER_NAME, - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()) + clusterName, + ResourceUtils.createInitialCaCertSecret(namespace, clusterName, AbstractModel.clusterCaCertSecretName(clusterName), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), + ResourceUtils.createInitialCaKeySecret(namespace, clusterName, AbstractModel.clusterCaKeySecretName(clusterName), MockCertManager.clusterCaKey()) ); - private final static ClientsCa CLIENTS_CA = new ClientsCa( + private final ClientsCa clientsCa = new ClientsCa( Reconciliation.DUMMY_RECONCILIATION, new OpenSslCertManager(), new PasswordGenerator(10, "a", "a"), - KafkaResources.clientsCaCertificateSecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaCertSecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaCertSecretName(CLUSTER_NAME), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), - KafkaResources.clientsCaKeySecretName(CLUSTER_NAME), - ResourceUtils.createInitialCaKeySecret(NAMESPACE, CLUSTER_NAME, AbstractModel.clusterCaKeySecretName(CLUSTER_NAME), MockCertManager.clusterCaKey()), + KafkaResources.clientsCaCertificateSecretName(clusterName), + ResourceUtils.createInitialCaCertSecret(namespace, clusterName, AbstractModel.clusterCaCertSecretName(clusterName), MockCertManager.clusterCaCert(), MockCertManager.clusterCaCertStore(), "123456"), + KafkaResources.clientsCaKeySecretName(clusterName), + ResourceUtils.createInitialCaKeySecret(namespace, clusterName, AbstractModel.clusterCaKeySecretName(clusterName), MockCertManager.clusterCaKey()), 365, 30, true, @@ -172,6 +148,8 @@ public class KafkaAssemblyOperatorPodSetTest { protected static Vertx vertx; private static WorkerExecutor sharedWorkerExecutor; + abstract Kafka initKafka(); + @BeforeAll public static void beforeAll() { vertx = Vertx.vertx(); @@ -191,9 +169,9 @@ public static void afterAll() { */ @Test public void testRegularReconciliation(VertxTestContext context) { - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet zkPodSet = zkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - StrimziPodSet kafkaPodSet = KAFKA_CLUSTER.generatePodSets(false, null, null, brokerId -> null).get(0); + ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); + StrimziPodSet zkPodSet = zkCluster.generatePodSet(kafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); + StrimziPodSet kafkaPodSet = kafkaCluster.generatePodSets(false, null, null, brokerId -> null).get(0); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -202,7 +180,7 @@ public void testRegularReconciliation(VertxTestContext context) { when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of())); + when(mockCmOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of())); ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); @@ -213,8 +191,8 @@ public void testRegularReconciliation(VertxTestContext context) { when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(zkPodSet)); when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), any())).thenReturn(Future.succeededFuture(ReconcileResult.noop(zkPodSet))); // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(kafkaPodSet))); - when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { + when(mockPodSetOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(kafkaPodSet))); + when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> { List podSets = i.getArgument(2); HashMap> result = new HashMap<>(); @@ -227,42 +205,42 @@ public void testRegularReconciliation(VertxTestContext context) { StatefulSetOperator mockStsOps = supplier.stsOperations; when(mockStsOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Zoo STS is queried and deleted if it still exists - when(mockStsOps.getAsync(any(), eq(KAFKA_CLUSTER.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Kafka STS is queried and deleted if it still exists + when(mockStsOps.getAsync(any(), eq(kafkaCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Kafka STS is queried and deleted if it still exists PodOperator mockPodOps = supplier.podOperations; when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); + when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); + when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka)); + when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, + kafka, VERSION_CHANGE, null, 0, - CLUSTER_CA); + clusterCa); MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); + kafka, + kafkaCluster, + clusterCa, + clientsCa); MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), @@ -274,7 +252,7 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), kr); Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) + kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)) .onComplete(context.succeeding(v -> context.verify(() -> { assertThat(zr.maybeRollZooKeeperInvocations, is(1)); assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(zkPodSet, "my-cluster-zookeeper-0")), empty()); @@ -302,9 +280,9 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), */ @Test public void testFirstReconciliation(VertxTestContext context) { - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet zkPodSet = zkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); - StrimziPodSet kafkaPodSet = KAFKA_CLUSTER.generatePodSets(false, null, null, brokerId -> null).get(0); + ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); + StrimziPodSet zkPodSet = zkCluster.generatePodSet(kafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); + StrimziPodSet kafkaPodSet = kafkaCluster.generatePodSets(false, null, null, brokerId -> null).get(0); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -313,7 +291,7 @@ public void testFirstReconciliation(VertxTestContext context) { when(secretOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(List.of())); ConfigMapOperator mockCmOps = supplier.configMapOperations; - when(mockCmOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(KAFKA_CLUSTER.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); + when(mockCmOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(kafkaCluster.generatePerBrokerConfigurationConfigMaps(new MetricsAndLogging(null, null), ADVERTISED_HOSTNAMES, ADVERTISED_PORTS))); ArgumentCaptor cmReconciliationCaptor = ArgumentCaptor.forClass(String.class); when(mockCmOps.reconcile(any(), any(), cmReconciliationCaptor.capture(), any())).thenReturn(Future.succeededFuture()); ArgumentCaptor cmDeletionCaptor = ArgumentCaptor.forClass(String.class); @@ -324,8 +302,8 @@ public void testFirstReconciliation(VertxTestContext context) { when(mockPodSetOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // The PodSet does not exist yet in the first reconciliation when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), any())).thenReturn(Future.succeededFuture(ReconcileResult.created(zkPodSet))); // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of())); - when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { + when(mockPodSetOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of())); + when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> { List podSets = i.getArgument(2); HashMap> result = new HashMap<>(); @@ -338,46 +316,46 @@ public void testFirstReconciliation(VertxTestContext context) { StatefulSetOperator mockStsOps = supplier.stsOperations; // Zoo - when(mockStsOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(new StatefulSetBuilder().withNewMetadata().withName(zkCluster.getComponentName()).withNamespace(NAMESPACE).endMetadata().build())); // Zoo STS still exists in the first reconciliation + when(mockStsOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(new StatefulSetBuilder().withNewMetadata().withName(zkCluster.getComponentName()).withNamespace(namespace).endMetadata().build())); // Zoo STS still exists in the first reconciliation when(mockStsOps.deleteAsync(any(), any(), eq(zkCluster.getComponentName()), eq(false))).thenReturn(Future.succeededFuture()); // The Zoo STS will be deleted during the reconciliation // Kafka - when(mockStsOps.getAsync(any(), eq(KAFKA_CLUSTER.getComponentName()))).thenReturn(Future.succeededFuture(new StatefulSetBuilder().withNewMetadata().withName(KAFKA_CLUSTER.getComponentName()).withNamespace(NAMESPACE).endMetadata().build())); - when(mockStsOps.deleteAsync(any(), any(), eq(KAFKA_CLUSTER.getComponentName()), eq(false))).thenReturn(Future.succeededFuture()); // The Kafka STS will be deleted during the reconciliation + when(mockStsOps.getAsync(any(), eq(kafkaCluster.getComponentName()))).thenReturn(Future.succeededFuture(new StatefulSetBuilder().withNewMetadata().withName(kafkaCluster.getComponentName()).withNamespace(namespace).endMetadata().build())); + when(mockStsOps.deleteAsync(any(), any(), eq(kafkaCluster.getComponentName()), eq(false))).thenReturn(Future.succeededFuture()); // The Kafka STS will be deleted during the reconciliation PodOperator mockPodOps = supplier.podOperations; when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); + when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); + when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka)); + when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, + kafka, VERSION_CHANGE, null, 0, - CLUSTER_CA); + clusterCa); MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); + kafka, + kafkaCluster, + clusterCa, + clientsCa); MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), @@ -389,7 +367,7 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), kr); Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) + kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)) .onComplete(context.succeeding(v -> context.verify(() -> { // Test that the old Zoo STS was deleted verify(mockStsOps, times(1)).deleteAsync(any(), any(), eq(zkCluster.getComponentName()), eq(false)); @@ -420,7 +398,7 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), */ @Test public void testReconciliationWithRoll(VertxTestContext context) { - Kafka oldKafka = new KafkaBuilder(KAFKA) + Kafka oldKafka = new KafkaBuilder(kafka) .editSpec() .editZookeeper() .withImage("old-image:latest") @@ -432,12 +410,12 @@ public void testReconciliationWithRoll(VertxTestContext context) { .build(); ZookeeperCluster oldZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, VERSIONS, SHARED_ENV_PROVIDER); - StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(KAFKA.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); + StrimziPodSet oldZkPodSet = oldZkCluster.generatePodSet(kafka.getSpec().getZookeeper().getReplicas(), false, null, null, podNum -> null); List oldPools = NodePoolUtils.createKafkaPools(Reconciliation.DUMMY_RECONCILIATION, oldKafka, null, Map.of(), Map.of(), false, SHARED_ENV_PROVIDER); KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); StrimziPodSet oldKafkaPodSet = oldKafkaCluster.generatePodSets(false, null, null, brokerId -> null).get(0); - ZookeeperCluster newZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); + ZookeeperCluster newZkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -453,8 +431,8 @@ public void testReconciliationWithRoll(VertxTestContext context) { StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; when(mockPodSetOps.getAsync(any(), eq(newZkCluster.getComponentName()))).thenReturn(Future.succeededFuture(oldZkPodSet)); when(mockPodSetOps.reconcile(any(), any(), eq(newZkCluster.getComponentName()), any())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); - when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { + when(mockPodSetOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); + when(mockPodSetOps.batchReconcile(any(), any(), any(), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> { List podSets = i.getArgument(2); HashMap> result = new HashMap<>(); @@ -467,42 +445,42 @@ public void testReconciliationWithRoll(VertxTestContext context) { StatefulSetOperator mockStsOps = supplier.stsOperations; when(mockStsOps.getAsync(any(), eq(newZkCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Zoo STS is queried and deleted if it still exists - when(mockStsOps.getAsync(any(), eq(KAFKA_CLUSTER.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Kafka STS is queried and deleted if it still exists + when(mockStsOps.getAsync(any(), eq(kafkaCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Kafka STS is queried and deleted if it still exists PodOperator mockPodOps = supplier.podOperations; when(mockPodOps.listAsync(any(), eq(newZkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); + when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); + when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka)); + when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, + kafka, VERSION_CHANGE, null, 0, - CLUSTER_CA); + clusterCa); MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); + kafka, + kafkaCluster, + clusterCa, + clientsCa); MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), @@ -514,7 +492,7 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), kr); Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) + kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)) .onComplete(context.succeeding(v -> context.verify(() -> { assertThat(zr.maybeRollZooKeeperInvocations, is(1)); assertThat(zr.zooPodNeedsRestart.apply(podFromPodSet(oldZkPodSet, "my-cluster-zookeeper-0")), is(List.of("Pod has old revision"))); @@ -537,7 +515,7 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), */ @Test public void testScaleUp(VertxTestContext context) { - Kafka oldKafka = new KafkaBuilder(KAFKA) + Kafka oldKafka = new KafkaBuilder(kafka) .editSpec() .editZookeeper() .withReplicas(1) @@ -554,7 +532,7 @@ public void testScaleUp(VertxTestContext context) { KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); StrimziPodSet oldKafkaPodSet = oldKafkaCluster.generatePodSets(false, null, null, brokerId -> null).get(0); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); + ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -576,10 +554,10 @@ public void testScaleUp(VertxTestContext context) { ArgumentCaptor zkPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), zkPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); // Zoo - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); + when(mockPodSetOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); @SuppressWarnings("unchecked") ArgumentCaptor> kafkaPodSetBatchCaptor = ArgumentCaptor.forClass(List.class); - when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { + when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> { List podSets = i.getArgument(2); HashMap> result = new HashMap<>(); @@ -592,43 +570,43 @@ public void testScaleUp(VertxTestContext context) { StatefulSetOperator mockStsOps = supplier.stsOperations; when(mockStsOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Zoo STS is queried and deleted if it still exists - when(mockStsOps.getAsync(any(), eq(KAFKA_CLUSTER.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Kafka STS is queried and deleted if it still exists + when(mockStsOps.getAsync(any(), eq(kafkaCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Kafka STS is queried and deleted if it still exists PodOperator mockPodOps = supplier.podOperations; when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); + when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); + when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka)); + when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, + kafka, VERSION_CHANGE, null, 1, - CLUSTER_CA); + clusterCa); MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); + kafka, + kafkaCluster, + clusterCa, + clientsCa); MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), @@ -640,7 +618,7 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), kr); Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) + kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)) .onComplete(context.succeeding(v -> context.verify(() -> { // Scale-up of Zoo is done pod by pod => the reconcile method is called 3 times with 1, 2 and 3 pods. assertThat(zkPodSetCaptor.getAllValues().size(), is(3)); @@ -676,7 +654,7 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), */ @Test public void testScaleDown(VertxTestContext context) { - Kafka oldKafka = new KafkaBuilder(KAFKA) + Kafka oldKafka = new KafkaBuilder(kafka) .editSpec() .editZookeeper() .withReplicas(5) @@ -693,7 +671,7 @@ public void testScaleDown(VertxTestContext context) { KafkaCluster oldKafkaCluster = KafkaCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, oldKafka, oldPools, VERSIONS, KafkaVersionTestUtils.DEFAULT_ZOOKEEPER_VERSION_CHANGE, KafkaMetadataConfigurationState.ZK, null, SHARED_ENV_PROVIDER); StrimziPodSet oldKafkaPodSet = oldKafkaCluster.generatePodSets(false, null, null, brokerId -> null).get(0); - ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, KAFKA, VERSIONS, SHARED_ENV_PROVIDER); + ZookeeperCluster zkCluster = ZookeeperCluster.fromCrd(Reconciliation.DUMMY_RECONCILIATION, kafka, VERSIONS, SHARED_ENV_PROVIDER); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -715,10 +693,10 @@ public void testScaleDown(VertxTestContext context) { ArgumentCaptor zkPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); when(mockPodSetOps.reconcile(any(), any(), eq(zkCluster.getComponentName()), zkPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); + when(mockPodSetOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); @SuppressWarnings("unchecked") ArgumentCaptor> kafkaPodSetBatchCaptor = ArgumentCaptor.forClass(List.class); - when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { + when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> { List podSets = i.getArgument(2); HashMap> result = new HashMap<>(); @@ -729,48 +707,48 @@ public void testScaleDown(VertxTestContext context) { return Future.succeededFuture(result); }); ArgumentCaptor kafkaPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), eq(KAFKA_CLUSTER.getComponentName()), kafkaPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); + when(mockPodSetOps.reconcile(any(), any(), eq(kafkaCluster.getComponentName()), kafkaPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); StatefulSetOperator mockStsOps = supplier.stsOperations; when(mockStsOps.getAsync(any(), eq(zkCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Zoo STS is queried and deleted if it still exists - when(mockStsOps.getAsync(any(), eq(KAFKA_CLUSTER.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Kafka STS is queried and deleted if it still exists + when(mockStsOps.getAsync(any(), eq(kafkaCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Kafka STS is queried and deleted if it still exists PodOperator mockPodOps = supplier.podOperations; when(mockPodOps.listAsync(any(), eq(zkCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); + when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockPodOps.waitFor(any(), any(), any(), any(), anyLong(), anyLong(), any())).thenReturn(Future.succeededFuture()); CrdOperator mockKafkaOps = supplier.kafkaOperator; - when(mockKafkaOps.getAsync(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(Future.succeededFuture(KAFKA)); - when(mockKafkaOps.get(eq(NAMESPACE), eq(CLUSTER_NAME))).thenReturn(KAFKA); + when(mockKafkaOps.getAsync(eq(namespace), eq(clusterName))).thenReturn(Future.succeededFuture(kafka)); + when(mockKafkaOps.get(eq(namespace), eq(clusterName))).thenReturn(kafka); when(mockKafkaOps.updateStatusAsync(any(), any())).thenReturn(Future.succeededFuture()); ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); MockZooKeeperReconciler zr = new MockZooKeeperReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, + kafka, VERSION_CHANGE, null, 5, - CLUSTER_CA); + clusterCa); MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), - KAFKA, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); + kafka, + kafkaCluster, + clusterCa, + clientsCa); MockKafkaAssemblyOperator kao = new MockKafkaAssemblyOperator( vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), @@ -782,7 +760,7 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), kr); Checkpoint async = context.checkpoint(); - kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME)) + kao.reconcile(new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName)) .onComplete(context.succeeding(v -> context.verify(() -> { // Scale-down of Zoo is done pod by pod => the reconcile method is called 3 times with 1, 2 and 3 pods. assertThat(zkPodSetCaptor.getAllValues().size(), is(3)); @@ -816,7 +794,7 @@ vertx, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), @Test public void testScaleDownWithEmptyBrokersWithBrokerScaleDownCheckEnabled(VertxTestContext context) { - Kafka oldKafka = new KafkaBuilder(KAFKA) + Kafka oldKafka = new KafkaBuilder(kafka) .editSpec() .editZookeeper() .withReplicas(5) @@ -827,7 +805,7 @@ public void testScaleDownWithEmptyBrokersWithBrokerScaleDownCheckEnabled(VertxTe .endSpec() .build(); - Kafka patchKafka = new KafkaBuilder(KAFKA) + Kafka patchKafka = new KafkaBuilder(kafka) .editMetadata() .addToAnnotations(Map.of(Annotations.ANNO_STRIMZI_IO_SKIP_BROKER_SCALEDOWN_CHECK, "false")) .endMetadata() @@ -854,10 +832,10 @@ public void testScaleDownWithEmptyBrokersWithBrokerScaleDownCheckEnabled(VertxTe StrimziPodSetOperator mockPodSetOps = supplier.strimziPodSetOperator; // Kafka - when(mockPodSetOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); + when(mockPodSetOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(List.of(oldKafkaPodSet))); @SuppressWarnings("unchecked") ArgumentCaptor> kafkaPodSetBatchCaptor = ArgumentCaptor.forClass(List.class); - when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenAnswer(i -> { + when(mockPodSetOps.batchReconcile(any(), any(), kafkaPodSetBatchCaptor.capture(), eq(kafkaCluster.getSelectorLabels()))).thenAnswer(i -> { List podSets = i.getArgument(2); HashMap> result = new HashMap<>(); @@ -868,13 +846,13 @@ public void testScaleDownWithEmptyBrokersWithBrokerScaleDownCheckEnabled(VertxTe return Future.succeededFuture(result); }); ArgumentCaptor kafkaPodSetCaptor = ArgumentCaptor.forClass(StrimziPodSet.class); - when(mockPodSetOps.reconcile(any(), any(), eq(KAFKA_CLUSTER.getComponentName()), kafkaPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); + when(mockPodSetOps.reconcile(any(), any(), eq(kafkaCluster.getComponentName()), kafkaPodSetCaptor.capture())).thenAnswer(i -> Future.succeededFuture(ReconcileResult.noop(i.getArgument(3)))); StatefulSetOperator mockStsOps = supplier.stsOperations; - when(mockStsOps.getAsync(any(), eq(KAFKA_CLUSTER.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Kafka STS is queried and deleted if it still exists + when(mockStsOps.getAsync(any(), eq(kafkaCluster.getComponentName()))).thenReturn(Future.succeededFuture(null)); // Kafka STS is queried and deleted if it still exists PodOperator mockPodOps = supplier.podOperations; - when(mockPodOps.listAsync(any(), eq(KAFKA_CLUSTER.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); + when(mockPodOps.listAsync(any(), eq(kafkaCluster.getSelectorLabels()))).thenReturn(Future.succeededFuture(Collections.emptyList())); when(mockPodOps.listAsync(any(), any(Labels.class))).thenReturn(Future.succeededFuture(Collections.emptyList())); when(mockPodOps.readiness(any(), any(), any(), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockPodOps.waitFor(any(), any(), any(), any(), anyLong(), anyLong(), any())).thenReturn(Future.succeededFuture()); @@ -882,15 +860,15 @@ public void testScaleDownWithEmptyBrokersWithBrokerScaleDownCheckEnabled(VertxTe ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); MockKafkaReconciler kr = new MockKafkaReconciler( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, namespace, clusterName), vertx, config, supplier, new PlatformFeaturesAvailability(false, KUBERNETES_VERSION), patchKafka, - KAFKA_CLUSTER, - CLUSTER_CA, - CLIENTS_CA); + kafkaCluster, + clusterCa, + clientsCa); KafkaStatus status = new KafkaStatus(); Checkpoint async = context.checkpoint(); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetWithSSATest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetWithSSATest.java new file mode 100644 index 00000000000..163654d67cb --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetWithSSATest.java @@ -0,0 +1,65 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.fabric8.kubernetes.api.model.ManagedFieldsEntryBuilder; +import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.kafka.KafkaBuilder; +import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; +import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.Map; + +@ExtendWith(VertxExtension.class) +@SuppressWarnings("checkstyle:ClassFanOutComplexity") +public class KafkaAssemblyOperatorPodSetWithSSATest extends KafkaAssemblyOperatorPodSetTest { + Kafka initKafka() { + return new KafkaBuilder() + .withNewMetadata() + .withName(clusterName) + .withNamespace(namespace) + .withManagedFields( + new ManagedFieldsEntryBuilder() + .withManager("test") + .withOperation("Apply") + .withApiVersion("v1") + .withTime(OffsetDateTime.now(ZoneOffset.UTC).toString()) + .withFieldsType("FieldsV1").withNewFieldsV1() + .addToAdditionalProperties( + Map.of("f:metadata", + Map.of("f:labels", + Map.of("f:test-label", Map.of()) + ) + ) + ) + .endFieldsV1() + .build() + ) + .endMetadata() + .withNewSpec() + .withNewKafka() + .withReplicas(3) + .withListeners(new GenericKafkaListenerBuilder() + .withName("plain") + .withPort(9092) + .withType(KafkaListenerType.INTERNAL) + .withTls(false) + .build()) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endKafka() + .withNewZookeeper() + .withReplicas(3) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endZookeeper() + .endSpec() + .build(); + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetWithoutSSATest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetWithoutSSATest.java new file mode 100644 index 00000000000..4e5ed110206 --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorPodSetWithoutSSATest.java @@ -0,0 +1,44 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.kafka.KafkaBuilder; +import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; +import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +@ExtendWith(VertxExtension.class) +@SuppressWarnings("checkstyle:ClassFanOutComplexity") +public class KafkaAssemblyOperatorPodSetWithoutSSATest extends KafkaAssemblyOperatorPodSetTest { + @Override + Kafka initKafka() { + return new KafkaBuilder() + .withNewMetadata() + .withName(clusterName) + .withNamespace(namespace) + .endMetadata() + .withNewSpec() + .withNewKafka() + .withReplicas(3) + .withListeners(new GenericKafkaListenerBuilder() + .withName("plain") + .withPort(9092) + .withType(KafkaListenerType.INTERNAL) + .withTls(false) + .build()) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endKafka() + .withNewZookeeper() + .withReplicas(3) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endZookeeper() + .endSpec() + .build(); + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsKRafWithSSAMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsKRafWithSSAMockTest.java new file mode 100644 index 00000000000..3ce1d842474 --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsKRafWithSSAMockTest.java @@ -0,0 +1,20 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +/** + * Tests in this class mirrors KafkaAssemblyOperatorWithPoolsKRaftMockTest with +UserServerSideApply + */ +@ExtendWith(VertxExtension.class) +@SuppressWarnings("checkstyle:ClassFanOutComplexity") +public class KafkaAssemblyOperatorWithPoolsKRafWithSSAMockTest extends KafkaAssemblyOperatorWithPoolsKRaftMockTest { + @Override + protected boolean getSSA() { + return true; + } +} \ No newline at end of file diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsKRaftMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsKRaftMockTest.java index bfdf0dae0ef..8afe05267e9 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsKRaftMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsKRaftMockTest.java @@ -57,7 +57,7 @@ @ExtendWith(VertxExtension.class) @SuppressWarnings("checkstyle:ClassFanOutComplexity") -public class KafkaAssemblyOperatorWithPoolsKRaftMockTest { +public abstract class KafkaAssemblyOperatorWithPoolsKRaftMockTest { private static final String CLUSTER_NAME = "my-cluster"; private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); @@ -71,6 +71,8 @@ public class KafkaAssemblyOperatorWithPoolsKRaftMockTest { private StrimziPodSetController podSetController; private KafkaAssemblyOperator operator; + protected abstract boolean getSSA(); + @BeforeAll public static void beforeAll() { // Configure the Kubernetes Mock @@ -104,60 +106,60 @@ public void beforeEach(TestInfo testInfo) { Kafka cluster = new KafkaBuilder() .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(namespace) - .withAnnotations(Map.of( - Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled", - Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled" - )) + .withName(CLUSTER_NAME) + .withNamespace(namespace) + .withAnnotations(Map.of( + Annotations.ANNO_STRIMZI_IO_NODE_POOLS, "enabled", + Annotations.ANNO_STRIMZI_IO_KRAFT, "enabled" + )) .endMetadata() .withNewSpec() - .withNewKafka() - .withConfig(new HashMap<>()) - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build()) - .endKafka() + .withNewKafka() + .withConfig(new HashMap<>()) + .withListeners(new GenericKafkaListenerBuilder() + .withName("tls") + .withPort(9092) + .withType(KafkaListenerType.INTERNAL) + .withTls(true) + .build()) + .endKafka() .endSpec() .withNewStatus() - .withClusterId("CLUSTERID") // Needed to avoid CLuster ID conflicts => should be the same as used in the Kafka Admin API + .withClusterId("CLUSTERID") // Needed to avoid CLuster ID conflicts => should be the same as used in the Kafka Admin API .endStatus() .build(); KafkaNodePool poolA = new KafkaNodePoolBuilder() .withNewMetadata() - .withName("controllers") - .withNamespace(namespace) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .withGeneration(1L) + .withName("controllers") + .withNamespace(namespace) + .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) + .withGeneration(1L) .endMetadata() .withNewSpec() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").withStorageClass("gp99").build()) - .endJbodStorage() - .withRoles(ProcessRoles.CONTROLLER) - .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("4"))).build()) + .withReplicas(3) + .withNewJbodStorage() + .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("100Gi").withStorageClass("gp99").build()) + .endJbodStorage() + .withRoles(ProcessRoles.CONTROLLER) + .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("4"))).build()) .endSpec() .build(); KafkaNodePool poolB = new KafkaNodePoolBuilder() .withNewMetadata() - .withName("brokers") - .withNamespace(namespace) - .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) - .withGeneration(1L) + .withName("brokers") + .withNamespace(namespace) + .withLabels(Map.of(Labels.STRIMZI_CLUSTER_LABEL, CLUSTER_NAME)) + .withGeneration(1L) .endMetadata() .withNewSpec() - .withReplicas(3) - .withNewJbodStorage() - .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").withStorageClass("gp99").build()) - .endJbodStorage() - .withRoles(ProcessRoles.BROKER) - .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("6"))).build()) + .withReplicas(3) + .withNewJbodStorage() + .withVolumes(new PersistentClaimStorageBuilder().withId(0).withSize("200Gi").withStorageClass("gp99").build()) + .endJbodStorage() + .withRoles(ProcessRoles.BROKER) + .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("cpu", new Quantity("6"))).build()) .endSpec() .build(); @@ -185,7 +187,7 @@ public void beforeEach(TestInfo testInfo) { private ResourceOperatorSupplier supplierWithMocks() { return new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), - ResourceUtils.metricsProvider(), new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), 2_000); + ResourceUtils.metricsProvider(), new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), 2_000, getSSA()); } @AfterEach diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsKRaftWithoutSSAMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsKRaftWithoutSSAMockTest.java new file mode 100644 index 00000000000..631d90e993f --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsKRaftWithoutSSAMockTest.java @@ -0,0 +1,17 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +@ExtendWith(VertxExtension.class) +@SuppressWarnings("checkstyle:ClassFanOutComplexity") +public class KafkaAssemblyOperatorWithPoolsKRaftWithoutSSAMockTest extends KafkaAssemblyOperatorWithPoolsKRaftMockTest { + @Override + protected boolean getSSA() { + return false; + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsMockTest.java index d299e059a6a..0409a35bc78 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsMockTest.java @@ -70,7 +70,7 @@ @ExtendWith(VertxExtension.class) @SuppressWarnings("checkstyle:ClassFanOutComplexity") -public class KafkaAssemblyOperatorWithPoolsMockTest { +public abstract class KafkaAssemblyOperatorWithPoolsMockTest { private static final Logger LOGGER = LogManager.getLogger(KafkaAssemblyOperatorWithPoolsMockTest.class); private static final String CLUSTER_NAME = "my-cluster"; @@ -204,10 +204,12 @@ public void afterEach() { ResourceUtils.cleanUpTemporaryTLSFiles(); } + protected abstract boolean getSSA(); + private ResourceOperatorSupplier supplierWithMocks() { return new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), - ResourceUtils.metricsProvider(), new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), 2_000); + ResourceUtils.metricsProvider(), new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION), 2_000, getSSA()); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsWithSSAMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsWithSSAMockTest.java new file mode 100644 index 00000000000..75d2b6d81ac --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsWithSSAMockTest.java @@ -0,0 +1,12 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +public class KafkaAssemblyOperatorWithPoolsWithSSAMockTest extends KafkaAssemblyOperatorWithPoolsMockTest { + @Override + protected boolean getSSA() { + return true; + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsWithoutSSAMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsWithoutSSAMockTest.java new file mode 100644 index 00000000000..f3bdb5d2e88 --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithPoolsWithoutSSAMockTest.java @@ -0,0 +1,12 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +public class KafkaAssemblyOperatorWithPoolsWithoutSSAMockTest extends KafkaAssemblyOperatorWithPoolsMockTest { + @Override + protected boolean getSSA() { + return false; + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithSSAMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithSSAMockTest.java new file mode 100644 index 00000000000..74339701a1b --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithSSAMockTest.java @@ -0,0 +1,20 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +/** + * Tests in this class mirrors KafkaAssemblyOperatorMockTest with +UserServerSideApply + */ +@ExtendWith(VertxExtension.class) +@SuppressWarnings("checkstyle:ClassFanOutComplexity") +public class KafkaAssemblyOperatorWithSSAMockTest extends KafkaAssemblyOperatorMockTest { + @Override + protected boolean getSSA() { + return true; + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithoutSSAMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithoutSSAMockTest.java new file mode 100644 index 00000000000..8ccfacf9cec --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaAssemblyOperatorWithoutSSAMockTest.java @@ -0,0 +1,17 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +@ExtendWith(VertxExtension.class) +@SuppressWarnings("checkstyle:ClassFanOutComplexity") +public class KafkaAssemblyOperatorWithoutSSAMockTest extends KafkaAssemblyOperatorMockTest { + @Override + protected boolean getSSA() { + return false; + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java index 48c0ef7a569..5e22b7c32ad 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorMockTest.java @@ -57,7 +57,7 @@ import static org.mockito.Mockito.when; @ExtendWith(VertxExtension.class) -public class KafkaConnectAssemblyOperatorMockTest { +public abstract class KafkaConnectAssemblyOperatorMockTest { private static final Logger LOGGER = LogManager.getLogger(KafkaConnectAssemblyOperatorMockTest.class); private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); private static final PlatformFeaturesAvailability PFA = new PlatformFeaturesAvailability(false, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); @@ -103,7 +103,7 @@ public void beforeEach(TestInfo testInfo) { namespace = testInfo.getTestMethod().orElseThrow().getName().toLowerCase(Locale.ROOT); mockKube.prepareNamespace(namespace); - supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), PFA, 2_000); + supplier = new ResourceOperatorSupplier(vertx, client, ResourceUtils.zookeeperLeaderFinder(vertx, client), ResourceUtils.adminClientProvider(), ResourceUtils.zookeeperScalerProvider(), ResourceUtils.kafkaAgentClientProvider(), ResourceUtils.metricsProvider(), PFA, 2_000, getSSA()); podSetController = new StrimziPodSetController(namespace, Labels.EMPTY, supplier.kafkaOperator, supplier.connectOperator, supplier.mirrorMaker2Operator, supplier.strimziPodSetOperator, supplier.podOperations, supplier.metricsProvider, Integer.parseInt(ClusterOperatorConfig.POD_SET_CONTROLLER_WORK_QUEUE_SIZE.defaultValue())); podSetController.start(); } @@ -114,6 +114,8 @@ public void afterEach() { client.namespaces().withName(namespace).delete(); } + protected abstract boolean getSSA(); + private Future createConnectCluster(VertxTestContext context, KafkaConnectApi kafkaConnectApi, boolean reconciliationPaused) { ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); this.kco = new KafkaConnectAssemblyOperator(vertx, PFA, supplier, config, foo -> kafkaConnectApi); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorWithSSAMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorWithSSAMockTest.java new file mode 100644 index 00000000000..18aef4ab798 --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorWithSSAMockTest.java @@ -0,0 +1,19 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +/** + * Tests in this class mirrors KafkaConnectAssemblyOperatorMockTest with +UserServerSideApply + */ +@ExtendWith(VertxExtension.class) +public class KafkaConnectAssemblyOperatorWithSSAMockTest extends KafkaConnectAssemblyOperatorMockTest { + @Override + protected boolean getSSA() { + return true; + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorWithoutSSAMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorWithoutSSAMockTest.java new file mode 100644 index 00000000000..526bc6d69ce --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaConnectAssemblyOperatorWithoutSSAMockTest.java @@ -0,0 +1,16 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +@ExtendWith(VertxExtension.class) +public class KafkaConnectAssemblyOperatorWithoutSSAMockTest extends KafkaConnectAssemblyOperatorMockTest { + @Override + protected boolean getSSA() { + return false; + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetTest.java index e4037b96873..389ed376d4a 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetTest.java @@ -90,27 +90,17 @@ @SuppressWarnings({"checkstyle:ClassFanOutComplexity"}) @ExtendWith(VertxExtension.class) -public class KafkaMirrorMaker2AssemblyOperatorPodSetTest { - private final static String NAME = "my-mm2"; +public abstract class KafkaMirrorMaker2AssemblyOperatorPodSetTest { + protected final static String NAME = "my-mm2"; private final static String COMPONENT_NAME = NAME + "-mirrormaker2"; - private final static String NAMESPACE = "my-namespace"; + protected final static String NAMESPACE = "my-namespace"; private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); private static final KubernetesVersion KUBERNETES_VERSION = KubernetesVersion.MINIMAL_SUPPORTED_VERSION; private static final Reconciliation RECONCILIATION = new Reconciliation("test", "KafkaMirrorMaker2", NAMESPACE, NAME); private static final SharedEnvironmentProvider SHARED_ENV_PROVIDER = new MockSharedEnvironmentProvider(); - private static final KafkaMirrorMaker2 MM2 = new KafkaMirrorMaker2Builder() - .withNewMetadata() - .withName(NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withReplicas(3) - .withClusters(List.of()) - .withMirrors(List.of()) - .endSpec() - .build(); - private static final KafkaMirrorMaker2Cluster CLUSTER = KafkaMirrorMaker2Cluster.fromCrd(RECONCILIATION, MM2, VERSIONS, SHARED_ENV_PROVIDER); + private final KafkaMirrorMaker2 mm2 = initKafkaMirrorMaker(); + private final KafkaMirrorMaker2Cluster cluster = KafkaMirrorMaker2Cluster.fromCrd(RECONCILIATION, mm2, VERSIONS, SHARED_ENV_PROVIDER); protected static Vertx vertx; @@ -124,9 +114,11 @@ public static void after() { vertx.close(); } + abstract KafkaMirrorMaker2 initKafkaMirrorMaker(); + @Test public void testCreateCluster(VertxTestContext context) { - KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(MM2).build(); + KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(this.mm2).build(); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock deployment @@ -240,8 +232,8 @@ public void testCreateCluster(VertxTestContext context) { @Test public void testScaleUpCluster(VertxTestContext context) { - KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(MM2).build(); - StrimziPodSet oldPodSet = CLUSTER.generatePodSet(1, null, null, false, null, null, null); + KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(this.mm2).build(); + StrimziPodSet oldPodSet = cluster.generatePodSet(1, null, null, false, null, null, null); List oldPods = PodSetUtils.podSetToPods(oldPodSet); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -337,8 +329,8 @@ public void testScaleUpCluster(VertxTestContext context) { @Test public void testScaleDownCluster(VertxTestContext context) { - KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(MM2).build(); - StrimziPodSet oldPodSet = CLUSTER.generatePodSet(5, null, null, false, null, null, null); + KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(this.mm2).build(); + StrimziPodSet oldPodSet = cluster.generatePodSet(5, null, null, false, null, null, null); List oldPods = PodSetUtils.podSetToPods(oldPodSet); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -434,13 +426,13 @@ public void testScaleDownCluster(VertxTestContext context) { @Test public void testScaleClusterToZero(VertxTestContext context) { - KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(MM2) + KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(this.mm2) .editSpec() .withReplicas(0) .endSpec() .build(); - StrimziPodSet oldPodSet = CLUSTER.generatePodSet(3, null, null, false, null, null, null); + StrimziPodSet oldPodSet = cluster.generatePodSet(3, null, null, false, null, null, null); List oldPods = PodSetUtils.podSetToPods(oldPodSet); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -536,8 +528,8 @@ public void testScaleClusterToZero(VertxTestContext context) { @Test public void testUpdateClusterNoDiff(VertxTestContext context) { - KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(MM2).build(); - StrimziPodSet oldPodSet = CLUSTER.generatePodSet(3, null, null, false, null, null, null); + KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(this.mm2).build(); + StrimziPodSet oldPodSet = cluster.generatePodSet(3, null, null, false, null, null, null); List oldPods = PodSetUtils.podSetToPods(oldPodSet); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -650,13 +642,13 @@ public void testUpdateClusterNoDiff(VertxTestContext context) { @Test public void testUpdateCluster(VertxTestContext context) { - KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(MM2) + KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(this.mm2) .editSpec() .withImage("some/different:image") .withResources(new ResourceRequirementsBuilder().withRequests(Map.of("Memory", new Quantity("1Gi"))).build()) .endSpec() .build(); - StrimziPodSet oldPodSet = CLUSTER.generatePodSet(3, null, null, false, null, null, null); + StrimziPodSet oldPodSet = cluster.generatePodSet(3, null, null, false, null, null, null); List oldPods = PodSetUtils.podSetToPods(oldPodSet); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -773,8 +765,8 @@ public void testUpdateCluster(VertxTestContext context) { @Test public void testUpdateWithFailure(VertxTestContext context) { - KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(MM2).build(); - StrimziPodSet oldPodSet = CLUSTER.generatePodSet(3, null, null, false, null, null, null); + KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(this.mm2).build(); + StrimziPodSet oldPodSet = cluster.generatePodSet(3, null, null, false, null, null, null); List oldPods = PodSetUtils.podSetToPods(oldPodSet); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); @@ -903,7 +895,7 @@ public Future createOrUpdate(Reconciliation reconciliat @Test public void testClusterMigrationToPodSets(VertxTestContext context) { - KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(MM2).build(); + KafkaMirrorMaker2 mm2 = new KafkaMirrorMaker2Builder(this.mm2).build(); Deployment deployment = new DeploymentBuilder() .withNewMetadata() @@ -1190,7 +1182,7 @@ private ArgumentCaptor createMirrorMaker2CaptorMock(String ta @Test public void testManualRollingUpdate(VertxTestContext context) { - StrimziPodSet oldPodSet = CLUSTER.generatePodSet(3, null, null, false, null, null, null); + StrimziPodSet oldPodSet = cluster.generatePodSet(3, null, null, false, null, null, null); oldPodSet.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"); // We want the pods to roll manually List oldPods = PodSetUtils.podSetToPods(oldPodSet); @@ -1242,8 +1234,8 @@ public void testManualRollingUpdate(VertxTestContext context) { // Mock KafkaMirrorMaker2 CRs CrdOperator mockConnectOps = supplier.mirrorMaker2Operator; - when(mockConnectOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(new KafkaMirrorMaker2Builder(MM2).build()); - when(mockConnectOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(new KafkaMirrorMaker2Builder(MM2).build())); + when(mockConnectOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(new KafkaMirrorMaker2Builder(mm2).build()); + when(mockConnectOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(new KafkaMirrorMaker2Builder(mm2).build())); ArgumentCaptor mm2StatusCaptor = ArgumentCaptor.forClass(KafkaMirrorMaker2.class); when(mockConnectOps.updateStatusAsync(any(), mm2StatusCaptor.capture())).thenReturn(Future.succeededFuture()); @@ -1278,7 +1270,7 @@ public void testManualRollingUpdate(VertxTestContext context) { @Test public void testManualRollingUpdateAtScaleUp(VertxTestContext context) { - StrimziPodSet oldPodSet = CLUSTER.generatePodSet(1, null, null, false, null, null, null); + StrimziPodSet oldPodSet = cluster.generatePodSet(1, null, null, false, null, null, null); oldPodSet.getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"); // We want the pods to roll manually List oldPods = PodSetUtils.podSetToPods(oldPodSet); @@ -1330,8 +1322,8 @@ public void testManualRollingUpdateAtScaleUp(VertxTestContext context) { // Mock KafkaMirrorMaker2 CRs CrdOperator mockConnectOps = supplier.mirrorMaker2Operator; - when(mockConnectOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(new KafkaMirrorMaker2Builder(MM2).build()); - when(mockConnectOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(new KafkaMirrorMaker2Builder(MM2).build())); + when(mockConnectOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(new KafkaMirrorMaker2Builder(mm2).build()); + when(mockConnectOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(new KafkaMirrorMaker2Builder(mm2).build())); ArgumentCaptor mm2StatusCaptor = ArgumentCaptor.forClass(KafkaMirrorMaker2.class); when(mockConnectOps.updateStatusAsync(any(), mm2StatusCaptor.capture())).thenReturn(Future.succeededFuture()); @@ -1367,7 +1359,7 @@ public void testManualRollingUpdateAtScaleUp(VertxTestContext context) { @Test public void testManualRollingUpdatePerPod(VertxTestContext context) { - StrimziPodSet oldPodSet = CLUSTER.generatePodSet(3, null, null, false, null, null, null); + StrimziPodSet oldPodSet = cluster.generatePodSet(3, null, null, false, null, null, null); List oldPods = PodSetUtils.podSetToPods(oldPodSet); oldPods.get(1).getMetadata().getAnnotations().put(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"); // We want the pod to roll manually @@ -1419,8 +1411,8 @@ public void testManualRollingUpdatePerPod(VertxTestContext context) { // Mock KafkaMirrorMaker2 CRs CrdOperator mockConnectOps = supplier.mirrorMaker2Operator; - when(mockConnectOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(new KafkaMirrorMaker2Builder(MM2).build()); - when(mockConnectOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(new KafkaMirrorMaker2Builder(MM2).build())); + when(mockConnectOps.get(eq(NAMESPACE), eq(NAME))).thenReturn(new KafkaMirrorMaker2Builder(mm2).build()); + when(mockConnectOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture(new KafkaMirrorMaker2Builder(mm2).build())); ArgumentCaptor mm2StatusCaptor = ArgumentCaptor.forClass(KafkaMirrorMaker2.class); when(mockConnectOps.updateStatusAsync(any(), mm2StatusCaptor.capture())).thenReturn(Future.succeededFuture()); diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetWithSSATest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetWithSSATest.java new file mode 100644 index 00000000000..587f7df56d0 --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetWithSSATest.java @@ -0,0 +1,52 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.fabric8.kubernetes.api.model.ManagedFieldsEntryBuilder; +import io.strimzi.api.kafka.model.mirrormaker2.KafkaMirrorMaker2; +import io.strimzi.api.kafka.model.mirrormaker2.KafkaMirrorMaker2Builder; +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.List; +import java.util.Map; + +@SuppressWarnings({"checkstyle:ClassFanOutComplexity"}) +@ExtendWith(VertxExtension.class) +public class KafkaMirrorMaker2AssemblyOperatorPodSetWithSSATest extends KafkaMirrorMaker2AssemblyOperatorPodSetTest { + @Override + KafkaMirrorMaker2 initKafkaMirrorMaker() { + return new KafkaMirrorMaker2Builder() + .withNewMetadata() + .withName(NAME) + .withNamespace(NAMESPACE) + .withManagedFields( + new ManagedFieldsEntryBuilder() + .withManager("test") + .withOperation("Apply") + .withApiVersion("v1") + .withTime(OffsetDateTime.now(ZoneOffset.UTC).toString()) + .withFieldsType("FieldsV1").withNewFieldsV1() + .addToAdditionalProperties( + Map.of("f:metadata", + Map.of("f:labels", + Map.of("f:test-label", Map.of()) + ) + ) + ) + .endFieldsV1() + .build() + ) + .endMetadata() + .withNewSpec() + .withReplicas(3) + .withClusters(List.of()) + .withMirrors(List.of()) + .endSpec() + .build(); + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetWithoutSSATest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetWithoutSSATest.java new file mode 100644 index 00000000000..b0994d1b99c --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaMirrorMaker2AssemblyOperatorPodSetWithoutSSATest.java @@ -0,0 +1,31 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.strimzi.api.kafka.model.mirrormaker2.KafkaMirrorMaker2; +import io.strimzi.api.kafka.model.mirrormaker2.KafkaMirrorMaker2Builder; +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +import java.util.List; + +@SuppressWarnings({"checkstyle:ClassFanOutComplexity"}) +@ExtendWith(VertxExtension.class) +public class KafkaMirrorMaker2AssemblyOperatorPodSetWithoutSSATest extends KafkaMirrorMaker2AssemblyOperatorPodSetTest { + @Override + KafkaMirrorMaker2 initKafkaMirrorMaker() { + return new KafkaMirrorMaker2Builder() + .withNewMetadata() + .withName(NAME) + .withNamespace(NAMESPACE) + .endMetadata() + .withNewSpec() + .withReplicas(3) + .withClusters(List.of()) + .withMirrors(List.of()) + .endSpec() + .build(); + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerKRaftMigrationTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerKRaftMigrationTest.java index f1a0e997190..8788214aabb 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerKRaftMigrationTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerKRaftMigrationTest.java @@ -184,7 +184,8 @@ public void testMigrationFromZooKeeperToKRaftPostMigrationState(VertxTestContext List.of(BROKERS, CONTROLLERS), supplier, versionChange, - kafkaMetadataStateManager); + kafkaMetadataStateManager, + vertx); KafkaStatus status = new KafkaStatus(); @@ -231,7 +232,8 @@ public void testMigrationFromKRaftPostMigrationToKRaft(VertxTestContext context) List.of(BROKERS, CONTROLLERS), supplier, versionChange, - kafkaMetadataStateManager); + kafkaMetadataStateManager, + vertx); KafkaStatus status = new KafkaStatus(); @@ -273,7 +275,8 @@ public void testRollbackFromKRaftPostMigrationToKRaftDualWriting(VertxTestContex List.of(BROKERS, CONTROLLERS), supplier, versionChange, - kafkaMetadataStateManager); + kafkaMetadataStateManager, + vertx); KafkaStatus status = new KafkaStatus(); @@ -312,7 +315,8 @@ public void testRollbackFromKRaftDualWritingToZooKeeper(VertxTestContext context List.of(BROKERS), supplier, versionChange, - kafkaMetadataStateManager); + kafkaMetadataStateManager, + vertx); KafkaStatus status = new KafkaStatus(); @@ -326,7 +330,7 @@ public void testRollbackFromKRaftDualWritingToZooKeeper(VertxTestContext context static class MockKafkaReconciler extends KafkaReconciler { private static int count = 0; - public MockKafkaReconciler(Reconciliation reconciliation, Kafka kafkaCr, List nodePools, ResourceOperatorSupplier supplier, KafkaVersionChange versionChange, KafkaMetadataStateManager kafkaMetadataStateManager) { + public MockKafkaReconciler(Reconciliation reconciliation, Kafka kafkaCr, List nodePools, ResourceOperatorSupplier supplier, KafkaVersionChange versionChange, KafkaMetadataStateManager kafkaMetadataStateManager, Vertx vertx) { super(reconciliation, kafkaCr, nodePools, createKafkaCluster(reconciliation, supplier, kafkaCr, nodePools, versionChange, kafkaMetadataStateManager), CLUSTER_CA, CLIENTS_CA, CO_CONFIG, supplier, PFA, vertx, kafkaMetadataStateManager); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusTest.java index b0a86ba0cc3..9bcc7933270 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusTest.java @@ -65,10 +65,10 @@ import static org.mockito.Mockito.when; @ExtendWith(VertxExtension.class) -public class KafkaReconcilerStatusTest { - private final static String NAMESPACE = "testns"; - private final static String CLUSTER_NAME = "testkafka"; - private final static KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); +public abstract class KafkaReconcilerStatusTest { + protected final static String NAMESPACE = "testns"; + protected final static String CLUSTER_NAME = "testkafka"; + protected final static KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); private final static PlatformFeaturesAvailability PFA = new PlatformFeaturesAvailability(true, KubernetesVersion.MINIMAL_SUPPORTED_VERSION); private final static KafkaVersionChange VERSION_CHANGE = new KafkaVersionChange( VERSIONS.defaultVersion(), @@ -77,7 +77,7 @@ public class KafkaReconcilerStatusTest { VERSIONS.defaultVersion().messageVersion(), VERSIONS.defaultVersion().metadataVersion() ); - private final static ClusterOperatorConfig CO_CONFIG = ResourceUtils.dummyClusterOperatorConfig(); + private final ClusterOperatorConfig coConfig = getClusterOperatorConfig(); private final static ClusterCa CLUSTER_CA = new ClusterCa( Reconciliation.DUMMY_RECONCILIATION, new OpenSslCertManager(), @@ -99,30 +99,10 @@ public class KafkaReconcilerStatusTest { true, null ); - private final static Kafka KAFKA = new KafkaBuilder() - .withNewMetadata() - .withName(CLUSTER_NAME) - .withNamespace(NAMESPACE) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("tls") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(true) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .build(); + private final Kafka kafka = getKafkaCrd(); + abstract ClusterOperatorConfig getClusterOperatorConfig(); + + abstract Kafka getKafkaCrd(); private static Vertx vertx; private static WorkerExecutor sharedWorkerExecutor; @@ -141,7 +121,7 @@ public static void afterAll() { @Test public void testKafkaReconcilerStatus(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) + Kafka kafka = new KafkaBuilder(this.kafka) .editOrNewSpec() .editOrNewKafka() .withReplicas(1) @@ -158,10 +138,13 @@ public void testKafkaReconcilerStatus(VertxTestContext context) { when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.secretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); // Run the test + Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + reconciliation, supplier, - kafka + kafka, + vertx, + coConfig ); KafkaStatus status = new KafkaStatus(); @@ -187,7 +170,7 @@ public void testKafkaReconcilerStatus(VertxTestContext context) { @Test public void testKafkaReconcilerStatusUpdateVersion(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) + Kafka kafka = new KafkaBuilder(this.kafka) .editOrNewSpec() .editOrNewKafka() .withReplicas(1) @@ -207,10 +190,13 @@ public void testKafkaReconcilerStatusUpdateVersion(VertxTestContext context) { when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.secretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); // Run the test + Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + reconciliation, supplier, - kafka + kafka, + vertx, + coConfig ); KafkaStatus status = new KafkaStatus(); @@ -227,7 +213,7 @@ public void testKafkaReconcilerStatusUpdateVersion(VertxTestContext context) { @Test public void testKafkaReconcilerStatusDoesNotUpdateVersionOnFailure(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) + Kafka kafka = new KafkaBuilder(this.kafka) .editOrNewSpec() .editOrNewKafka() .withReplicas(1) @@ -247,10 +233,13 @@ public void testKafkaReconcilerStatusDoesNotUpdateVersionOnFailure(VertxTestCont when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.secretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); // Run the test + Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + reconciliation, supplier, - kafka + kafka, + vertx, + coConfig ); KafkaStatus status = new KafkaStatus(); @@ -267,7 +256,7 @@ public void testKafkaReconcilerStatusDoesNotUpdateVersionOnFailure(VertxTestCont @Test public void testKafkaReconcilerStatusCustomKafkaVersion(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) + Kafka kafka = new KafkaBuilder(this.kafka) .editOrNewSpec() .editOrNewKafka() .withVersion(KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION) @@ -285,10 +274,13 @@ public void testKafkaReconcilerStatusCustomKafkaVersion(VertxTestContext context when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.secretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); // Run the test + Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + reconciliation, supplier, - kafka + kafka, + vertx, + coConfig ); KafkaStatus status = new KafkaStatus(); @@ -314,10 +306,13 @@ public void testKafkaReconcilerStatusWithSpecCheckerWarnings(VertxTestContext co when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.secretName(CLUSTER_NAME)))).thenReturn(Future.succeededFuture(secret)); // Run the test + Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + reconciliation, supplier, - KAFKA + kafka, + vertx, + coConfig ); KafkaStatus status = new KafkaStatus(); @@ -339,7 +334,7 @@ public void testKafkaReconcilerStatusWithSpecCheckerWarnings(VertxTestContext co @Test public void testKafkaReconcilerStatusWithNodePorts(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) + Kafka kafka = new KafkaBuilder(this.kafka) .editOrNewSpec() .editOrNewKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -401,10 +396,13 @@ public void testKafkaReconcilerStatusWithNodePorts(VertxTestContext context) { when(mockNodeOps.listAsync(any(Labels.class))).thenReturn(Future.succeededFuture(kubernetesWorkerNodes())); // Run the test + Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + reconciliation, supplier, - kafka + kafka, + vertx, + coConfig ); KafkaStatus status = new KafkaStatus(); @@ -440,7 +438,7 @@ public void testKafkaReconcilerStatusWithNodePortsAndOverrides(VertxTestContext .withAdvertisedHost("my-address-1") .build(); - Kafka kafka = new KafkaBuilder(KAFKA) + Kafka kafka = new KafkaBuilder(this.kafka) .editOrNewSpec() .editOrNewKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -505,10 +503,13 @@ public void testKafkaReconcilerStatusWithNodePortsAndOverrides(VertxTestContext when(mockNodeOps.listAsync(any(Labels.class))).thenReturn(Future.succeededFuture(kubernetesWorkerNodes())); // Run the test + Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + reconciliation, supplier, - kafka + kafka, + vertx, + coConfig ); KafkaStatus status = new KafkaStatus(); @@ -534,7 +535,7 @@ public void testKafkaReconcilerStatusWithNodePortsAndOverrides(VertxTestContext @Test public void testKafkaReconcilerStatusWithNodePortsWithPreferredAddressType(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) + Kafka kafka = new KafkaBuilder(this.kafka) .editOrNewSpec() .editOrNewKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -599,10 +600,13 @@ public void testKafkaReconcilerStatusWithNodePortsWithPreferredAddressType(Vertx when(mockNodeOps.listAsync(any(Labels.class))).thenReturn(Future.succeededFuture(kubernetesWorkerNodes())); // Run the test + Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + reconciliation, supplier, - kafka + kafka, + vertx, + coConfig ); KafkaStatus status = new KafkaStatus(); @@ -628,7 +632,7 @@ public void testKafkaReconcilerStatusWithNodePortsWithPreferredAddressType(Vertx @Test public void testKafkaReconcilerStatusWithNodePortsOnSameNode(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) + Kafka kafka = new KafkaBuilder(this.kafka) .editOrNewSpec() .editOrNewKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -690,10 +694,13 @@ public void testKafkaReconcilerStatusWithNodePortsOnSameNode(VertxTestContext co when(mockNodeOps.listAsync(any(Labels.class))).thenReturn(Future.succeededFuture(kubernetesWorkerNodes())); // Run the test + Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + reconciliation, supplier, - kafka + kafka, + vertx, + coConfig ); KafkaStatus status = new KafkaStatus(); @@ -716,7 +723,7 @@ public void testKafkaReconcilerStatusWithNodePortsOnSameNode(VertxTestContext co @Test public void testKafkaReconcilerStatusWithNodePortsAndMissingNode(VertxTestContext context) { - Kafka kafka = new KafkaBuilder(KAFKA) + Kafka kafka = new KafkaBuilder(this.kafka) .editOrNewSpec() .editOrNewKafka() .withListeners(new GenericKafkaListenerBuilder() @@ -778,10 +785,13 @@ public void testKafkaReconcilerStatusWithNodePortsAndMissingNode(VertxTestContex when(mockNodeOps.listAsync(any(Labels.class))).thenReturn(Future.succeededFuture(kubernetesWorkerNodes())); // Run the test + Reconciliation reconciliation = new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME); KafkaReconciler reconciler = new MockKafkaReconcilerStatusTasks( - new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), + reconciliation, supplier, - kafka + kafka, + vertx, + coConfig ); KafkaStatus status = new KafkaStatus(); @@ -858,24 +868,24 @@ private static List kubernetesWorkerNodes() { return nodes; } - static class MockKafkaReconcilerStatusTasks extends KafkaReconciler { - private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(MockKafkaReconcilerStatusTasks.class.getName()); + private KafkaCluster createKafkaCluster(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr) { + return KafkaClusterCreator.createKafkaCluster( + reconciliation, + kafkaCr, + null, + Map.of(), + Map.of(), + VERSION_CHANGE, + new KafkaMetadataStateManager(reconciliation, kafkaCr, coConfig.featureGates().useKRaftEnabled()).getMetadataConfigurationState(), + VERSIONS, + supplier.sharedEnvironmentProvider); + } - public MockKafkaReconcilerStatusTasks(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr) { - super(reconciliation, kafkaCr, null, createKafkaCluster(reconciliation, supplier, kafkaCr), CLUSTER_CA, CLIENTS_CA, CO_CONFIG, supplier, PFA, vertx, new KafkaMetadataStateManager(reconciliation, kafkaCr, CO_CONFIG.featureGates().useKRaftEnabled())); - } + class MockKafkaReconcilerStatusTasks extends KafkaReconciler { + private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(MockKafkaReconcilerStatusTasks.class.getName()); - private static KafkaCluster createKafkaCluster(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr) { - return KafkaClusterCreator.createKafkaCluster( - reconciliation, - kafkaCr, - null, - Map.of(), - Map.of(), - VERSION_CHANGE, - new KafkaMetadataStateManager(reconciliation, kafkaCr, CO_CONFIG.featureGates().useKRaftEnabled()).getMetadataConfigurationState(), - VERSIONS, - supplier.sharedEnvironmentProvider); + public MockKafkaReconcilerStatusTasks(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr, Vertx vertx, ClusterOperatorConfig coConfig) { + super(reconciliation, kafkaCr, null, createKafkaCluster(reconciliation, supplier, kafkaCr), CLUSTER_CA, CLIENTS_CA, coConfig, supplier, PFA, vertx, new KafkaMetadataStateManager(reconciliation, kafkaCr, coConfig.featureGates().useKRaftEnabled())); } @Override diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusWithSSATest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusWithSSATest.java new file mode 100644 index 00000000000..02324bf49fc --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusWithSSATest.java @@ -0,0 +1,72 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.fabric8.kubernetes.api.model.ManagedFieldsEntryBuilder; +import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.kafka.KafkaBuilder; +import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; +import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; +import io.strimzi.operator.cluster.ClusterOperatorConfig; +import io.strimzi.operator.cluster.ResourceUtils; +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.Map; + +@ExtendWith(VertxExtension.class) +public class KafkaReconcilerStatusWithSSATest extends KafkaReconcilerStatusTest { + @Override + ClusterOperatorConfig getClusterOperatorConfig() { + return ResourceUtils.dummyClusterOperatorConfig(VERSIONS, "+UseServerSideApply"); + } + + @Override + Kafka getKafkaCrd() { + return new KafkaBuilder() + .withNewMetadata() + .withName(CLUSTER_NAME) + .withNamespace(NAMESPACE) + .withManagedFields( + new ManagedFieldsEntryBuilder() + .withManager("test") + .withOperation("Apply") + .withApiVersion("v1") + .withTime(OffsetDateTime.now(ZoneOffset.UTC).toString()) + .withFieldsType("FieldsV1").withNewFieldsV1() + .addToAdditionalProperties( + Map.of("f:metadata", + Map.of("f:labels", + Map.of("f:test-label", Map.of()) + ) + ) + ) + .endFieldsV1() + .build() + ) + .endMetadata() + .withNewSpec() + .withNewKafka() + .withReplicas(3) + .withListeners(new GenericKafkaListenerBuilder() + .withName("tls") + .withPort(9092) + .withType(KafkaListenerType.INTERNAL) + .withTls(true) + .build()) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endKafka() + .withNewZookeeper() + .withReplicas(3) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endZookeeper() + .endSpec() + .build(); + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusWithoutSSATest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusWithoutSSATest.java new file mode 100644 index 00000000000..7b7fea5a335 --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerStatusWithoutSSATest.java @@ -0,0 +1,50 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.kafka.KafkaBuilder; +import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; +import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; +import io.strimzi.operator.cluster.ClusterOperatorConfig; +import io.strimzi.operator.cluster.ResourceUtils; +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +@ExtendWith(VertxExtension.class) +public class KafkaReconcilerStatusWithoutSSATest extends KafkaReconcilerStatusTest { + @Override + ClusterOperatorConfig getClusterOperatorConfig() { + return ResourceUtils.dummyClusterOperatorConfig(VERSIONS); + } + + @Override + Kafka getKafkaCrd() { + return new KafkaBuilder() + .withNewMetadata() + .withName(CLUSTER_NAME) + .withNamespace(NAMESPACE) + .endMetadata() + .withNewSpec() + .withNewKafka() + .withReplicas(3) + .withListeners(new GenericKafkaListenerBuilder() + .withName("tls") + .withPort(9092) + .withType(KafkaListenerType.INTERNAL) + .withTls(true) + .build()) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endKafka() + .withNewZookeeper() + .withReplicas(3) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endZookeeper() + .endSpec() + .build(); + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerUpgradeDowngradeTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerUpgradeDowngradeTest.java index 3da6055faef..9a366f25a43 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerUpgradeDowngradeTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaReconcilerUpgradeDowngradeTest.java @@ -148,7 +148,8 @@ public void testWithAllVersionsInCR(VertxTestContext context) { new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), supplier, kafka, - versionChange + versionChange, + vertx ); KafkaStatus status = new KafkaStatus(); @@ -190,7 +191,8 @@ public void testWithoutAnyVersionsInCR(VertxTestContext context) { new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), supplier, KAFKA, - versionChange + versionChange, + vertx ); KafkaStatus status = new KafkaStatus(); @@ -242,7 +244,8 @@ public void testUpgradingWithSpecificProtocolAndMessageFormatVersions(VertxTestC new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), supplier, kafka, - versionChange + versionChange, + vertx ); KafkaStatus status = new KafkaStatus(); @@ -267,7 +270,7 @@ public void testUpgradingWithSpecificProtocolAndMessageFormatVersions(VertxTestC } static class MockKafkaReconciler extends KafkaReconciler { - public MockKafkaReconciler(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr, KafkaVersionChange versionChange) { + public MockKafkaReconciler(Reconciliation reconciliation, ResourceOperatorSupplier supplier, Kafka kafkaCr, KafkaVersionChange versionChange, Vertx vertx) { super(reconciliation, kafkaCr, null, createKafkaCluster(reconciliation, supplier, kafkaCr, versionChange), CLUSTER_CA, CLIENTS_CA, CO_CONFIG, supplier, PFA, vertx, new KafkaMetadataStateManager(reconciliation, kafkaCr, CO_CONFIG.featureGates().useKRaftEnabled())); listenerReconciliationResults = new KafkaListenersReconciler.ReconciliationResult(); } diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusTest.java index ce99ffbbb01..f4e000465d3 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusTest.java @@ -5,13 +5,10 @@ package io.strimzi.operator.cluster.operator.assembly; import io.fabric8.kubernetes.client.KubernetesClient; -import io.strimzi.api.kafka.model.common.ConditionBuilder; import io.strimzi.api.kafka.model.kafka.Kafka; import io.strimzi.api.kafka.model.kafka.KafkaBuilder; import io.strimzi.api.kafka.model.kafka.KafkaList; import io.strimzi.api.kafka.model.kafka.KafkaStatus; -import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; -import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; import io.strimzi.api.kafka.model.kafka.listener.ListenerAddressBuilder; import io.strimzi.api.kafka.model.kafka.listener.ListenerStatus; import io.strimzi.api.kafka.model.kafka.listener.ListenerStatusBuilder; @@ -24,7 +21,6 @@ import io.strimzi.operator.cluster.operator.resource.ResourceOperatorSupplier; import io.strimzi.operator.common.Reconciliation; import io.strimzi.operator.common.model.PasswordGenerator; -import io.strimzi.operator.common.model.StatusUtils; import io.strimzi.operator.common.operator.MockCertManager; import io.strimzi.operator.common.operator.resource.CrdOperator; import io.strimzi.platform.KubernetesVersion; @@ -39,7 +35,6 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.ArgumentCaptor; -import java.time.Instant; import java.util.ArrayList; import java.util.List; @@ -55,14 +50,14 @@ @SuppressWarnings({"checkstyle:ClassFanOutComplexity", "checkstyle:ClassDataAbstractionCoupling"}) @ExtendWith(VertxExtension.class) -public class KafkaStatusTest { +public abstract class KafkaStatusTest { private final KubernetesVersion kubernetesVersion = KubernetesVersion.MINIMAL_SUPPORTED_VERSION; private final MockCertManager certManager = new MockCertManager(); private final PasswordGenerator passwordGenerator = new PasswordGenerator(10, "a", "a"); - private final ClusterOperatorConfig config = ResourceUtils.dummyClusterOperatorConfig(VERSIONS); - private static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); - private final String namespace = "testns"; - private final String clusterName = "testkafka"; + private final ClusterOperatorConfig config = getClusterOperatorConfig(); + protected static final KafkaVersion.Lookup VERSIONS = KafkaVersionTestUtils.getKafkaVersionLookup(); + protected final String namespace = "testns"; + protected final String clusterName = "testkafka"; protected static Vertx vertx; @BeforeAll @@ -75,42 +70,9 @@ public static void after() { vertx.close(); } - public Kafka getKafkaCrd() { - return new KafkaBuilder() - .withNewMetadata() - .withName(clusterName) - .withNamespace(namespace) - .withGeneration(2L) - .endMetadata() - .withNewSpec() - .withNewKafka() - .withReplicas(3) - .withListeners(new GenericKafkaListenerBuilder() - .withName("plain") - .withPort(9092) - .withType(KafkaListenerType.INTERNAL) - .withTls(false) - .build()) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endKafka() - .withNewZookeeper() - .withReplicas(3) - .withNewEphemeralStorage() - .endEphemeralStorage() - .endZookeeper() - .endSpec() - .withNewStatus() - .withObservedGeneration(1L) - .withConditions(new ConditionBuilder() - .withLastTransitionTime(StatusUtils.iso8601(Instant.parse("2011-01-01T00:00:00Z"))) - .withType("NotReady") - .withStatus("True") - .build()) - .withClusterId("my-cluster-id") - .endStatus() - .build(); - } + abstract ClusterOperatorConfig getClusterOperatorConfig(); + + abstract Kafka getKafkaCrd(); @Test public void testStatusAfterSuccessfulReconciliationWithPreviousFailure(VertxTestContext context) { diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusWithSSATest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusWithSSATest.java new file mode 100644 index 00000000000..62111705799 --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusWithSSATest.java @@ -0,0 +1,86 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.fabric8.kubernetes.api.model.ManagedFieldsEntryBuilder; +import io.strimzi.api.kafka.model.common.ConditionBuilder; +import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.kafka.KafkaBuilder; +import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; +import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; +import io.strimzi.operator.cluster.ClusterOperatorConfig; +import io.strimzi.operator.cluster.ResourceUtils; +import io.strimzi.operator.common.model.StatusUtils; +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +import java.time.Instant; +import java.time.OffsetDateTime; +import java.time.ZoneOffset; +import java.util.Map; + +@SuppressWarnings({"checkstyle:ClassFanOutComplexity", "checkstyle:ClassDataAbstractionCoupling"}) +@ExtendWith(VertxExtension.class) +public class KafkaStatusWithSSATest extends KafkaStatusTest { + @Override + ClusterOperatorConfig getClusterOperatorConfig() { + return ResourceUtils.dummyClusterOperatorConfig(VERSIONS, "+UseServerSideApply"); + } + + @Override + Kafka getKafkaCrd() { + return new KafkaBuilder() + .withNewMetadata() + .withName(clusterName) + .withNamespace(namespace) + .withGeneration(2L) + .withManagedFields( + new ManagedFieldsEntryBuilder() + .withManager("test") + .withOperation("Apply") + .withApiVersion("v1") + .withTime(OffsetDateTime.now(ZoneOffset.UTC).toString()) + .withFieldsType("FieldsV1").withNewFieldsV1() + .addToAdditionalProperties( + Map.of("f:metadata", + Map.of("f:labels", + Map.of("f:test-label", Map.of()) + ) + ) + ) + .endFieldsV1() + .build() + ) + .endMetadata() + .withNewSpec() + .withNewKafka() + .withReplicas(3) + .withListeners(new GenericKafkaListenerBuilder() + .withName("plain") + .withPort(9092) + .withType(KafkaListenerType.INTERNAL) + .withTls(false) + .build()) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endKafka() + .withNewZookeeper() + .withReplicas(3) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endZookeeper() + .endSpec() + .withNewStatus() + .withObservedGeneration(1L) + .withConditions(new ConditionBuilder() + .withLastTransitionTime(StatusUtils.iso8601(Instant.parse("2011-01-01T00:00:00Z"))) + .withType("NotReady") + .withStatus("True") + .build()) + .withClusterId("my-cluster-id") + .endStatus() + .build(); + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusWithoutSSATest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusWithoutSSATest.java new file mode 100644 index 00000000000..a4be2072508 --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/KafkaStatusWithoutSSATest.java @@ -0,0 +1,65 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.strimzi.api.kafka.model.common.ConditionBuilder; +import io.strimzi.api.kafka.model.kafka.Kafka; +import io.strimzi.api.kafka.model.kafka.KafkaBuilder; +import io.strimzi.api.kafka.model.kafka.listener.GenericKafkaListenerBuilder; +import io.strimzi.api.kafka.model.kafka.listener.KafkaListenerType; +import io.strimzi.operator.cluster.ClusterOperatorConfig; +import io.strimzi.operator.cluster.ResourceUtils; +import io.strimzi.operator.common.model.StatusUtils; +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +import java.time.Instant; + +@SuppressWarnings({"checkstyle:ClassFanOutComplexity", "checkstyle:ClassDataAbstractionCoupling"}) +@ExtendWith(VertxExtension.class) +public class KafkaStatusWithoutSSATest extends KafkaStatusTest { + @Override + ClusterOperatorConfig getClusterOperatorConfig() { + return ResourceUtils.dummyClusterOperatorConfig(VERSIONS); + } + + @Override + Kafka getKafkaCrd() { + return new KafkaBuilder() + .withNewMetadata() + .withName(clusterName) + .withNamespace(namespace) + .withGeneration(2L) + .endMetadata() + .withNewSpec() + .withNewKafka() + .withReplicas(3) + .withListeners(new GenericKafkaListenerBuilder() + .withName("plain") + .withPort(9092) + .withType(KafkaListenerType.INTERNAL) + .withTls(false) + .build()) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endKafka() + .withNewZookeeper() + .withReplicas(3) + .withNewEphemeralStorage() + .endEphemeralStorage() + .endZookeeper() + .endSpec() + .withNewStatus() + .withObservedGeneration(1L) + .withConditions(new ConditionBuilder() + .withLastTransitionTime(StatusUtils.iso8601(Instant.parse("2011-01-01T00:00:00Z"))) + .withType("NotReady") + .withStatus("True") + .build()) + .withClusterId("my-cluster-id") + .endStatus() + .build(); + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/StrimziPodSetControllerMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/StrimziPodSetControllerMockTest.java index 76291fa491b..5a110981fe0 100644 --- a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/StrimziPodSetControllerMockTest.java +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/StrimziPodSetControllerMockTest.java @@ -68,7 +68,7 @@ import static org.hamcrest.Matchers.not; @ExtendWith(VertxExtension.class) -public class StrimziPodSetControllerMockTest { +public abstract class StrimziPodSetControllerMockTest { private static final String KAFKA_NAME = "foo"; private static final Map MATCHING_LABELS = Map.of("selector", "matching"); private static final String OTHER_KAFKA_NAME = "bar"; @@ -116,11 +116,11 @@ public void beforeEach(TestInfo testInfo) { vertx = Vertx.vertx(); sharedWorkerExecutor = vertx.createSharedWorkerExecutor("kubernetes-ops-pool"); - kafkaOperator = new CrdOperator<>(vertx, client, Kafka.class, KafkaList.class, Kafka.RESOURCE_KIND); - kafkaConnectOperator = new CrdOperator<>(vertx, client, KafkaConnect.class, KafkaConnectList.class, KafkaConnect.RESOURCE_KIND); - kafkaMirrorMaker2Operator = new CrdOperator<>(vertx, client, KafkaMirrorMaker2.class, KafkaMirrorMaker2List.class, KafkaMirrorMaker2.RESOURCE_KIND); - podSetOperator = new StrimziPodSetOperator(vertx, client); - podOperator = new PodOperator(vertx, client); + kafkaOperator = new CrdOperator<>(vertx, client, Kafka.class, KafkaList.class, Kafka.RESOURCE_KIND, getSSA()); + kafkaConnectOperator = new CrdOperator<>(vertx, client, KafkaConnect.class, KafkaConnectList.class, KafkaConnect.RESOURCE_KIND, getSSA()); + kafkaMirrorMaker2Operator = new CrdOperator<>(vertx, client, KafkaMirrorMaker2.class, KafkaMirrorMaker2List.class, KafkaMirrorMaker2.RESOURCE_KIND, getSSA()); + podSetOperator = new StrimziPodSetOperator(vertx, client, getSSA()); + podOperator = new PodOperator(vertx, client, getSSA()); metricsProvider = ResourceUtils.metricsProvider(); kafkaOp().inNamespace(namespace).resource(kafka(namespace, KAFKA_NAME, MATCHING_LABELS)).create(); @@ -139,6 +139,8 @@ public void afterEach() { vertx.close(); } + protected abstract boolean getSSA(); + /* * Util methods */ diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/StrimziPodSetControllerWithSSAMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/StrimziPodSetControllerWithSSAMockTest.java new file mode 100644 index 00000000000..3f75a7654b9 --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/StrimziPodSetControllerWithSSAMockTest.java @@ -0,0 +1,16 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +@ExtendWith(VertxExtension.class) +public class StrimziPodSetControllerWithSSAMockTest extends StrimziPodSetControllerMockTest { + @Override + protected boolean getSSA() { + return true; + } +} diff --git a/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/StrimziPodSetControllerWithoutSSAMockTest.java b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/StrimziPodSetControllerWithoutSSAMockTest.java new file mode 100644 index 00000000000..9c8afb1a6f2 --- /dev/null +++ b/cluster-operator/src/test/java/io/strimzi/operator/cluster/operator/assembly/StrimziPodSetControllerWithoutSSAMockTest.java @@ -0,0 +1,16 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.cluster.operator.assembly; + +import io.vertx.junit5.VertxExtension; +import org.junit.jupiter.api.extension.ExtendWith; + +@ExtendWith(VertxExtension.class) +public class StrimziPodSetControllerWithoutSSAMockTest extends StrimziPodSetControllerMockTest { + @Override + protected boolean getSSA() { + return false; + } +} diff --git a/operator-common/src/main/java/io/strimzi/operator/common/config/FeatureGate.java b/operator-common/src/main/java/io/strimzi/operator/common/config/FeatureGate.java new file mode 100644 index 00000000000..a2d5d8ed5f0 --- /dev/null +++ b/operator-common/src/main/java/io/strimzi/operator/common/config/FeatureGate.java @@ -0,0 +1,62 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.common.config; + +/** + * Feature gate class represents individual feature fate + */ +public class FeatureGate { + private final String name; + private final boolean defaultValue; + private Boolean value = null; + + /** + * Feature fate constructor + * + * @param name Name of the feature gate + * @param defaultValue Default value of the feature gate + */ + public FeatureGate(String name, boolean defaultValue) { + this.name = name; + this.defaultValue = defaultValue; + } + + /** + * @return The name of the feature gate + */ + public String getName() { + return name; + } + + /** + * @return Returns true if the value for this feature gate is already set or false if it is still null + */ + public boolean isSet() { + return value != null; + } + + /** + * Sets the value of the feature gate + * + * @param value Value of the feature gate + */ + public void setValue(boolean value) { + this.value = value; + } + + /** + * @return True if the feature gate is enabled. False otherwise. + */ + public boolean isEnabled() { + return value == null ? defaultValue : value; + } + + /** + * @return Returns True if this feature gate is enabled by default. False otherwise. + */ + public boolean isEnabledByDefault() { + return defaultValue; + } +} \ No newline at end of file diff --git a/operator-common/src/main/java/io/strimzi/operator/common/config/FeatureGatesParser.java b/operator-common/src/main/java/io/strimzi/operator/common/config/FeatureGatesParser.java new file mode 100644 index 00000000000..b83894d641c --- /dev/null +++ b/operator-common/src/main/java/io/strimzi/operator/common/config/FeatureGatesParser.java @@ -0,0 +1,64 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.common.config; + +import io.strimzi.operator.common.InvalidConfigurationException; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static java.util.Arrays.asList; + +/** + * Parses String like "+FeatureA,-FeatureB" and sets the result in a map with possible feature gates. + */ +public class FeatureGatesParser { + private List featureGates = Collections.emptyList(); + + /** + * @param featureGateConfig String with all feature gates joined by "," + */ + public FeatureGatesParser(String featureGateConfig) { + if (featureGateConfig != null && !featureGateConfig.trim().isEmpty()) { + if (featureGateConfig.matches("(\\s*[+-][a-zA-Z0-9]+\\s*,)*\\s*[+-][a-zA-Z0-9]+\\s*")) { + featureGates = asList(featureGateConfig.trim().split("\\s*,+\\s*")); + } else { + throw new InvalidConfigurationException(featureGateConfig + " is not a valid feature gate configuration"); + } + } + } + + /** + * @param possibleFeatureGateWithDefaultValues Map of possibe feature gates and its default values + */ + public void applyFor(Map possibleFeatureGateWithDefaultValues) { + for (String featureGate : featureGates) { + boolean value = '+' == featureGate.charAt(0); + featureGate = featureGate.substring(1); + + if (possibleFeatureGateWithDefaultValues.containsKey(featureGate)) { + setValueOnlyOnce(possibleFeatureGateWithDefaultValues.get(featureGate), value); + } else { + throw new InvalidConfigurationException("Unknown feature gate " + featureGate + " found in the configuration"); + } + } + } + + /** + * Sets the feature gate value if it was not set yet. But if it is already set, then it throws an exception. This + * helps to ensure that each feature gate is configured always only once. + * + * @param gate Feature gate which is being configured + * @param value Value which should be set + */ + private void setValueOnlyOnce(FeatureGate gate, boolean value) { + if (gate.isSet()) { + throw new InvalidConfigurationException("Feature gate " + gate.getName() + " is configured multiple times"); + } + + gate.setValue(value); + } +} diff --git a/operator-common/src/main/java/io/strimzi/operator/common/model/Ca.java b/operator-common/src/main/java/io/strimzi/operator/common/model/Ca.java index f035b36594a..25980be5578 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/model/Ca.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/model/Ca.java @@ -595,6 +595,7 @@ public void createRenewOrReplace(String namespace, String clusterName, Map certAnnotations = new HashMap<>(2); certAnnotations.put(ANNO_STRIMZI_IO_CA_CERT_GENERATION, String.valueOf(caCertGeneration)); + certAnnotations.put(Annotations.ANNO_STRIMZI_IO_FORCE_RENEW, "false"); if (renewalType.equals(RenewalType.POSTPONED) && this.caCertSecret.getMetadata() != null @@ -604,6 +605,7 @@ public void createRenewOrReplace(String namespace, String clusterName, Map keyAnnotations = new HashMap<>(2); keyAnnotations.put(ANNO_STRIMZI_IO_CA_KEY_GENERATION, String.valueOf(caKeyGeneration)); + keyAnnotations.put(Annotations.ANNO_STRIMZI_IO_FORCE_REPLACE, "false"); if (renewalType.equals(RenewalType.POSTPONED) && this.caKeySecret.getMetadata() != null diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNamespacedResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNamespacedResourceOperator.java index fa7454466ac..4d9bde1d3de 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNamespacedResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNamespacedResourceOperator.java @@ -9,6 +9,7 @@ import io.fabric8.kubernetes.api.model.KubernetesResourceList; import io.fabric8.kubernetes.api.model.LabelSelector; import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientException; import io.fabric8.kubernetes.client.Watcher; import io.fabric8.kubernetes.client.dsl.FilterWatchListDeletable; import io.fabric8.kubernetes.client.dsl.Informable; @@ -53,14 +54,28 @@ public abstract class AbstractNamespacedResourceOperator operation(); @@ -97,27 +112,42 @@ public Future> reconcile(Reconciliation reconciliation, Strin return Future.failedFuture("Given name " + name + " incompatible with desired name " + desired.getMetadata().getName()); } - return getAsync(namespace, name) - .compose(current -> { - if (desired != null) { - if (current == null) { - LOGGER.debugCr(reconciliation, "{} {}/{} does not exist, creating it", resourceKind, namespace, name); - return internalCreate(reconciliation, namespace, name, desired); - } else { - LOGGER.debugCr(reconciliation, "{} {}/{} already exists, updating it", resourceKind, namespace, name); - return internalUpdate(reconciliation, namespace, name, current, desired); - } - } else { - if (current != null) { - // Deletion is desired - LOGGER.debugCr(reconciliation, "{} {}/{} exist, deleting it", resourceKind, namespace, name); - return internalDelete(reconciliation, namespace, name); + if (useServerSideApply) { + if (desired != null) { + if (desired.getMetadata().getManagedFields() != null && !desired.getMetadata().getManagedFields().isEmpty()) { + LOGGER.debugCr(reconciliation, "Deleting managedFields from request before pathing resource {} {}/{}", resourceKind, namespace, name); + desired.getMetadata().setManagedFields(null); + } + + LOGGER.debugCr(reconciliation, "{} {}/{} desired, patching it", resourceKind, namespace, name); + return internalPatch(reconciliation, namespace, name, desired); + } else { + LOGGER.debugCr(reconciliation, "{} {}/{} no longer desired, deleting it", resourceKind, namespace, name); + return internalDelete(reconciliation, namespace, name); + } + } else { + return getAsync(namespace, name) + .compose(current -> { + if (desired != null) { + if (current == null) { + LOGGER.debugCr(reconciliation, "{} {}/{} does not exist, creating it", resourceKind, namespace, name); + return internalCreate(reconciliation, namespace, name, desired); + } else { + LOGGER.debugCr(reconciliation, "{} {}/{} already exists, updating it", resourceKind, namespace, name); + return internalUpdate(reconciliation, namespace, name, current, desired); + } } else { - LOGGER.debugCr(reconciliation, "{} {}/{} does not exist, noop", resourceKind, namespace, name); - return Future.succeededFuture(ReconcileResult.noop(null)); + if (current != null) { + // Deletion is desired + LOGGER.debugCr(reconciliation, "{} {}/{} exist, deleting it", resourceKind, namespace, name); + return internalDelete(reconciliation, namespace, name); + } else { + LOGGER.debugCr(reconciliation, "{} {}/{} does not exist, noop", resourceKind, namespace, name); + return Future.succeededFuture(ReconcileResult.noop(null)); + } } - } - }); + }); + } } /** @@ -248,6 +278,17 @@ protected Future> internalUpdate(Reconciliation reconciliatio } } + protected Future> internalPatch(Reconciliation reconciliation, String namespace, String name, T desired) { + try { + T result = patch(reconciliation, namespace, name, desired); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} has been patched", resourceKind, name, namespace); + return Future.succeededFuture(ReconcileResult.patchedUsingServerSideApply(result)); + } catch (Exception e) { + LOGGER.debugCr(reconciliation, "Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); + return Future.failedFuture(e); + } + } + /** * Method for patching or replacing a resource. By default, is using JSON-type patch. Overriding this method can be * used to use replace instead of patch or different patch strategies. @@ -262,6 +303,23 @@ protected T patchOrReplace(String namespace, String name, T desired) { return operation().inNamespace(namespace).withName(name).patch(PatchContext.of(PatchType.JSON), desired); } + protected T patch(Reconciliation reconciliation, String namespace, String name, T desired) { + try { + return operation().inNamespace(namespace).withName(name).patch(serverSideApplyPatchContext(false), desired); + } catch (KubernetesClientException e) { + LOGGER.warnCr(reconciliation, "{} {} in namespace {} failed to apply, using force", resourceKind, name, namespace, e); + return operation().inNamespace(namespace).withName(name).patch(serverSideApplyPatchContext(true), desired); + } + } + + protected PatchContext serverSideApplyPatchContext(boolean force) { + return new PatchContext.Builder() + .withPatchType(PatchType.SERVER_SIDE_APPLY) + .withFieldManager("strimzi-cluster-operator") + .withForce(force) + .build(); + } + /** * Creates a resource with the given namespace and name with the given desired state * and completes the given future accordingly. diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperator.java index 9cc5926595f..5b713f44338 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractNonNamespacedResourceOperator.java @@ -7,6 +7,7 @@ import io.fabric8.kubernetes.api.model.HasMetadata; import io.fabric8.kubernetes.api.model.KubernetesResourceList; import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.KubernetesClientException; import io.fabric8.kubernetes.client.Watcher; import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation; import io.fabric8.kubernetes.client.dsl.Resource; @@ -36,14 +37,29 @@ public abstract class AbstractNonNamespacedResourceOperator { private static final ReconciliationLogger LOGGER = ReconciliationLogger.create(AbstractNonNamespacedResourceOperator.class); + private boolean useServerSideApply; + + /** * Constructor. * @param vertx The vertx instance. * @param client The kubernetes client. * @param resourceKind The mind of Kubernetes resource (used for logging). + * @param useServerSideApply Whether to use server side apply */ - public AbstractNonNamespacedResourceOperator(Vertx vertx, C client, String resourceKind) { + public AbstractNonNamespacedResourceOperator(Vertx vertx, C client, String resourceKind, boolean useServerSideApply) { super(vertx, client, resourceKind); + this.useServerSideApply = useServerSideApply; + } + + /** + * Constructor. + * @param vertx The vertx instance. + * @param client The kubernetes client. + * @param resourceKind The mind of Kubernetes resource (used for logging). + */ + public AbstractNonNamespacedResourceOperator(Vertx vertx, C client, String resourceKind) { + this(vertx, client, resourceKind, false); } protected abstract NonNamespaceOperation operation(); @@ -78,27 +94,37 @@ public Future> reconcile(Reconciliation reconciliation, Strin + desired.getMetadata().getName()); } - return getAsync(name) - .compose(current -> { - if (desired != null) { - if (current == null) { - LOGGER.debugCr(reconciliation, "{} {} does not exist, creating it", resourceKind, name); - return internalCreate(reconciliation, name, desired); - } else { - LOGGER.debugCr(reconciliation, "{} {} already exists, updating it", resourceKind, name); - return internalUpdate(reconciliation, name, current, desired); - } - } else { - if (current != null) { - // Deletion is desired - LOGGER.debugCr(reconciliation, "{} {} exist, deleting it", resourceKind, name); - return internalDelete(reconciliation, name); + if (useServerSideApply) { + if (desired != null) { + LOGGER.debugCr(reconciliation, "{} {}/{} desired, patching it", resourceKind, name); + return internalPatch(reconciliation, name, desired); + } else { + LOGGER.debugCr(reconciliation, "{} {}/{} no longer desired, deleting it", resourceKind, name); + return internalDelete(reconciliation, name); + } + } else { + return getAsync(name) + .compose(current -> { + if (desired != null) { + if (current == null) { + LOGGER.debugCr(reconciliation, "{} {} does not exist, creating it", resourceKind, name); + return internalCreate(reconciliation, name, desired); + } else { + LOGGER.debugCr(reconciliation, "{} {} already exists, updating it", resourceKind, name); + return internalUpdate(reconciliation, name, current, desired); + } } else { - LOGGER.debugCr(reconciliation, "{} {} does not exist, noop", resourceKind, name); - return Future.succeededFuture(ReconcileResult.noop(null)); + if (current != null) { + // Deletion is desired + LOGGER.debugCr(reconciliation, "{} {} exist, deleting it", resourceKind, name); + return internalDelete(reconciliation, name); + } else { + LOGGER.debugCr(reconciliation, "{} {} does not exist, noop", resourceKind, name); + return Future.succeededFuture(ReconcileResult.noop(null)); + } } - } - }); + }); + } } /** @@ -163,6 +189,17 @@ protected Future> internalUpdate(Reconciliation reconciliatio } } + protected Future> internalPatch(Reconciliation reconciliation, String name, T desired) { + try { + T result = patch(reconciliation, name, desired); + LOGGER.debugCr(reconciliation, "{} {} in namespace {} has been patched", resourceKind, name); + return Future.succeededFuture(ReconcileResult.patchedUsingServerSideApply(result)); + } catch (Exception e) { + LOGGER.debugCr(reconciliation, "Caught exception while patching {} {} in namespace {}", resourceKind, name, e); + return Future.failedFuture(e); + } + } + /** * Method for patching or replacing a resource. By default, is using JSON-type patch. Overriding this method can be * used to use replace instead of patch or different patch strategies. @@ -176,6 +213,23 @@ protected T patchOrReplace(String name, T desired) { return operation().withName(name).patch(PatchContext.of(PatchType.JSON), desired); } + protected T patch(Reconciliation reconciliation, String name, T desired) { + try { + return operation().withName(name).patch(serverSideApplyPatchContext(false), desired); + } catch (KubernetesClientException e) { + LOGGER.warnCr(reconciliation, "{} {} in namespace {} failed to apply, using force", resourceKind, name, e); + return operation().withName(name).patch(serverSideApplyPatchContext(true), desired); + } + } + + protected PatchContext serverSideApplyPatchContext(boolean force) { + return new PatchContext.Builder() + .withPatchType(PatchType.SERVER_SIDE_APPLY) + .withFieldManager("strimzi-cluster-operator") //TODO find if/where this is configured for the other operations that strimzi is doing + .withForce(force) + .build(); + } + /** * Creates a resource with the name with the given desired state * and completes the given future accordingly. diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractReadyNamespacedResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractReadyNamespacedResourceOperator.java index 5a87feff7dd..e9811907510 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractReadyNamespacedResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractReadyNamespacedResourceOperator.java @@ -27,6 +27,18 @@ public abstract class AbstractReadyNamespacedResourceOperator> extends AbstractNamespacedResourceOperator { + /** + * Constructor. + * + * @param vertx The vertx instance. + * @param client The kubernetes client. + * @param resourceKind The mind of Kubernetes resource (used for logging). + * @param useServerSideApply whether to use server side apply patch requests + */ + public AbstractReadyNamespacedResourceOperator(Vertx vertx, C client, String resourceKind, boolean useServerSideApply) { + super(vertx, client, resourceKind, useServerSideApply); + } + /** * Constructor. * @@ -35,7 +47,7 @@ public abstract class AbstractReadyNamespacedResourceOperator watcher) { return operation().inAnyNamespace().watch(watcher); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractWatchableStatusedNamespacedResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractWatchableStatusedNamespacedResourceOperator.java index 7cabddbaaf0..3acd1ffa1d6 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractWatchableStatusedNamespacedResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/AbstractWatchableStatusedNamespacedResourceOperator.java @@ -37,6 +37,17 @@ public abstract class AbstractWatchableStatusedNamespacedResourceOperator< public AbstractWatchableStatusedNamespacedResourceOperator(Vertx vertx, C client, String resourceKind) { super(vertx, client, resourceKind); } + /** + * Constructor. + * + * @param vertx The vertx instance. + * @param client The kubernetes client. + * @param resourceKind The mind of Kubernetes resource (used for logging). + * @param useServerSideApply Whether to use server side apply + */ + public AbstractWatchableStatusedNamespacedResourceOperator(Vertx vertx, C client, String resourceKind, boolean useServerSideApply) { + super(vertx, client, resourceKind, useServerSideApply); + } /** * Updates status of the resource diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildConfigOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildConfigOperator.java index 79f8e405be4..f6a80005728 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildConfigOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildConfigOperator.java @@ -29,6 +29,16 @@ public BuildConfigOperator(Vertx vertx, OpenShiftClient client) { super(vertx, client, "BuildConfig"); } + /** + * Constructor + * @param vertx The Vertx instance + * @param client The OpenShift client + * @param useServerSideApply Whether to use server side apply + */ + public BuildConfigOperator(Vertx vertx, OpenShiftClient client, boolean useServerSideApply) { + super(vertx, client, "BuildConfig", useServerSideApply); + } + @Override protected MixedOperation> operation() { return client.buildConfigs(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildOperator.java index ab4d2e8e0cf..1357473cb62 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/BuildOperator.java @@ -25,6 +25,17 @@ public BuildOperator(Vertx vertx, OpenShiftClient client) { super(vertx, client, "Build"); } + /** + * Constructor + * + * @param vertx The Vertx instance + * @param client The OpenShift client + * @param useServerSideApply Whether to use server side apply + */ + public BuildOperator(Vertx vertx, OpenShiftClient client, boolean useServerSideApply) { + super(vertx, client, "Build", useServerSideApply); + } + @Override protected MixedOperation operation() { return client.builds(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ClusterRoleBindingOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ClusterRoleBindingOperator.java index 6c1737eb4ba..ff0ed39c4dd 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ClusterRoleBindingOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ClusterRoleBindingOperator.java @@ -26,6 +26,16 @@ public ClusterRoleBindingOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, "ClusterRoleBinding"); } + /** + * Constructor. + * @param vertx The Vertx instance. + * @param client The Kubernetes client. + * @param useServerSideApply Whether to use server side apply + */ + public ClusterRoleBindingOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "ClusterRoleBinding", useServerSideApply); + } + @Override protected NonNamespaceOperation> operation() { return client.rbac().clusterRoleBindings(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ConfigMapOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ConfigMapOperator.java index d397f0eb927..f34945d322b 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ConfigMapOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ConfigMapOperator.java @@ -34,6 +34,17 @@ public ConfigMapOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, "ConfigMap"); } + /** + * Constructor + * + * @param vertx The Vertx instance + * @param client The Kubernetes client + * @param useServerSideApply Whether to use server side apply + */ + public ConfigMapOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "ConfigMap", useServerSideApply); + } + @Override protected MixedOperation> operation() { return client.configMaps(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/CrdOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/CrdOperator.java index 1acc984d9be..5c2aa15cd23 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/CrdOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/CrdOperator.java @@ -49,6 +49,21 @@ public CrdOperator(Vertx vertx, C client, Class cls, Class listCls, String this.listCls = listCls; } + /** + * Constructor + * @param vertx The Vertx instance + * @param client The Kubernetes client + * @param cls The class of the CR + * @param listCls The class of the list. + * @param kind The Kind of the CR for which this operator should be used + * @param useServerSideApply Whether to use server side apply + */ + public CrdOperator(Vertx vertx, C client, Class cls, Class listCls, String kind, boolean useServerSideApply) { + super(vertx, client, kind, useServerSideApply); + this.cls = cls; + this.listCls = listCls; + } + @Override protected MixedOperation> operation() { return client.resources(cls, listCls); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentOperator.java index 6f2fe3e9820..af226815d6a 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/DeploymentOperator.java @@ -33,6 +33,17 @@ public DeploymentOperator(Vertx vertx, KubernetesClient client) { this(vertx, client, new PodOperator(vertx, client)); } + /** + * Constructor + * + * @param vertx The Vertx instance + * @param client The Kubernetes client + * @param useServerSideApply Whether to use server side apply + */ + public DeploymentOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + this(vertx, client, new PodOperator(vertx, client), useServerSideApply); + } + /** * Constructor * @@ -45,6 +56,19 @@ public DeploymentOperator(Vertx vertx, KubernetesClient client, PodOperator podO this.podOperations = podOperations; } + /** + * Constructor + * + * @param vertx Vert.x instance + * @param client Kubernetes client + * @param podOperations Pod Operator for managing pods + * @param useServerSideApply Whether to use server side apply + */ + public DeploymentOperator(Vertx vertx, KubernetesClient client, PodOperator podOperations, boolean useServerSideApply) { + super(vertx, client, "Deployment", useServerSideApply); + this.podOperations = podOperations; + } + @Override protected MixedOperation> operation() { return client.apps().deployments(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ImageStreamOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ImageStreamOperator.java index 45c79938385..53953e09fd3 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ImageStreamOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ImageStreamOperator.java @@ -23,6 +23,15 @@ public class ImageStreamOperator extends AbstractNamespacedResourceOperator> operation() { diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressOperator.java index 0a77083f56d..fd9d6d3b037 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/IngressOperator.java @@ -27,6 +27,16 @@ public IngressOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, "Ingress"); } + /** + * Constructor + * @param vertx The Vertx instance + * @param client The Kubernetes client + * @param useServerSideApply Whether to use server side apply + */ + public IngressOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "Ingress", useServerSideApply); + } + @Override protected MixedOperation> operation() { return client.network().v1().ingresses(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NetworkPolicyOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NetworkPolicyOperator.java index ff7ef34ad21..234109609d2 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NetworkPolicyOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NetworkPolicyOperator.java @@ -38,6 +38,17 @@ public NetworkPolicyOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, "NetworkPolicy"); } + /** + * Constructs the Network Policy Operator + * + * @param vertx Vert.x instance + * @param client Kubernetes client + * @param useServerSideApply Whether to use server side apply + */ + public NetworkPolicyOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "NetworkPolicy", useServerSideApply); + } + @Override protected MixedOperation> operation() { return client.network().networkPolicies(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NodeOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NodeOperator.java index 97c6083e154..ba63ca88945 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NodeOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/NodeOperator.java @@ -26,6 +26,17 @@ public NodeOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, "Node"); } + /** + * Constructor. + * + * @param vertx The Vertx instance. + * @param client The Kubernetes client. + * @param useServerSideApply Whether to use server side apply + */ + public NodeOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "Node", useServerSideApply); + } + @Override protected NonNamespaceOperation> operation() { return client.nodes(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PodDisruptionBudgetOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PodDisruptionBudgetOperator.java index df287df0b73..b7ca12c074b 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PodDisruptionBudgetOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PodDisruptionBudgetOperator.java @@ -24,6 +24,16 @@ public class PodDisruptionBudgetOperator extends AbstractNamespacedResourceOpera public PodDisruptionBudgetOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, "PodDisruptionBudget"); } + /** + * Constructs the PDB operator + * + * @param vertx Vert.x instance + * @param client Kubernetes client + * @param useServerSideApply Whether to use server side apply + */ + public PodDisruptionBudgetOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "PodDisruptionBudget", useServerSideApply); + } @Override protected MixedOperation> operation() { diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PodOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PodOperator.java index 9e09243f564..7b224042275 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PodOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/PodOperator.java @@ -24,13 +24,23 @@ public class PodOperator extends AbstractReadyNamespacedResourceOperator> operation() { return client.persistentVolumeClaims(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ReconcileResult.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ReconcileResult.java index dea4d700e7f..17f72ab7a63 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ReconcileResult.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ReconcileResult.java @@ -58,8 +58,23 @@ public String toString() { * @param Resource type for which the result is being indicated */ public static class Patched extends ReconcileResult { + private final boolean usingServerSideApply; + private Patched(R resource) { super(Optional.of(resource)); + usingServerSideApply = false; + } + + private Patched(R resource, boolean usingServerSideApply) { + super(Optional.ofNullable(resource)); + this.usingServerSideApply = usingServerSideApply; + } + + /** + * @return whether patch was applied using Server Side Apply + */ + public boolean isUsingServerSideApply() { + return usingServerSideApply; } @Override @@ -78,6 +93,15 @@ public static final Patched patched(D resource) { return new Patched<>(resource); } + /** + * @param resource The patched resource + * @param The type of resource. + * @return a reconciliation result that indicates the resource was patched using server side apply + */ + public static ReconcileResult patchedUsingServerSideApply(D resource) { + return new Patched<>(resource, true); + } + /** * Return a reconciliation result that indicates the resource was created. * @return a reconciliation result that indicates the resource was created. diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RoleBindingOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RoleBindingOperator.java index a0a337f2ab5..921b619048a 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RoleBindingOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RoleBindingOperator.java @@ -26,6 +26,16 @@ public RoleBindingOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, "RoleBinding"); } + /** + * Constructor + * @param vertx The Vertx instance + * @param client The Kubernetes client + * @param useServerSideApply Whether to use server side apply + */ + public RoleBindingOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "RoleBinding", useServerSideApply); + } + @Override protected MixedOperation> operation() { return client.rbac().roleBindings(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RoleOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RoleOperator.java index cdbad60f20f..6855145786c 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RoleOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RoleOperator.java @@ -28,6 +28,16 @@ public RoleOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, "Role"); } + /** + * Constructor + * @param vertx The Vertx instance + * @param client The Kubernetes client + * @param useServerSideApply Whether to use server side apply + */ + public RoleOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "Role", useServerSideApply); + } + @Override protected MixedOperation> operation() { return client.rbac().roles(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RouteOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RouteOperator.java index 1e1f3e3eb97..36bd28ee6b5 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RouteOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/RouteOperator.java @@ -25,6 +25,15 @@ public class RouteOperator extends AbstractNamespacedResourceOperator> operation() { diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/SecretOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/SecretOperator.java index b82982e55ed..447a9547411 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/SecretOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/SecretOperator.java @@ -25,6 +25,16 @@ public SecretOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, "Secret"); } + /** + * Constructor + * @param vertx The Vertx instance + * @param client The Kubernetes client + * @param useServerSideApply Whether to use server side apply + */ + public SecretOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "Secret", useServerSideApply); + } + @Override protected MixedOperation> operation() { return client.secrets(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperator.java index 97524ca62b9..b8aa56031ce 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceAccountOperator.java @@ -26,6 +26,16 @@ public ServiceAccountOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, "ServiceAccount"); } + /** + * Constructor + * @param vertx The Vertx instance + * @param client The Kubernetes client + * @param useServerSideApply Whether to use server side apply + */ + public ServiceAccountOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "ServiceAccount", useServerSideApply); + } + @Override protected MixedOperation operation() { return client.serviceAccounts(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceOperator.java index 9ce15800115..64e5ab486b6 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/ServiceOperator.java @@ -55,6 +55,18 @@ public ServiceOperator(Vertx vertx, KubernetesClient client) { this.endpointOperations = new EndpointOperator(vertx, client); } + /** + * Constructor + * + * @param vertx The Vertx instance + * @param client The Kubernetes client + * @param useServerSideApply Whether to use server side apply + */ + public ServiceOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "Service", useServerSideApply); + this.endpointOperations = new EndpointOperator(vertx, client); + } + @Override protected MixedOperation> operation() { return client.services(); diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StorageClassOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StorageClassOperator.java index 52af5a8884d..66e52e4c3f1 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StorageClassOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StorageClassOperator.java @@ -22,11 +22,21 @@ public class StorageClassOperator extends AbstractNonNamespacedResourceOperator< * @param vertx The Vertx instance. * @param client The Kubernetes client. */ - public StorageClassOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, "StorageClass"); } + /** + * Constructor. + * + * @param vertx The Vertx instance. + * @param client The Kubernetes client. + * @param useServerSideApply Whether to use server side apply + */ + public StorageClassOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, "StorageClass", useServerSideApply); + } + @Override protected NonNamespaceOperation> operation() { diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StrimziPodSetOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StrimziPodSetOperator.java index f4777eeed44..4efcf831495 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StrimziPodSetOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/StrimziPodSetOperator.java @@ -26,6 +26,17 @@ public StrimziPodSetOperator(Vertx vertx, KubernetesClient client) { super(vertx, client, StrimziPodSet.class, StrimziPodSetList.class, StrimziPodSet.RESOURCE_KIND); } + /** + * Constructs the StrimziPodSet operator + * + * @param vertx The Vertx instance. + * @param client The Kubernetes client. + * @param useServerSideApply Whether to use server side apply + */ + public StrimziPodSetOperator(Vertx vertx, KubernetesClient client, boolean useServerSideApply) { + super(vertx, client, StrimziPodSet.class, StrimziPodSetList.class, StrimziPodSet.RESOURCE_KIND, useServerSideApply); + } + /** * StrimziPodSetOperator overrides this method in order to use replace instead of patch. * diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractNamespacedResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractNamespacedResourceOperator.java index 05c476c5480..d424a39f0f8 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractNamespacedResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractNamespacedResourceOperator.java @@ -54,15 +54,19 @@ public abstract class AbstractNamespacedResourceOperator operation(); @@ -93,14 +97,29 @@ public CompletionStage> createOrUpdate(Reconciliation reconci * @return A CompletionStage which completes when the resource has been updated. */ public CompletionStage> reconcile(Reconciliation reconciliation, String namespace, String name, T desired) { - if (desired != null && !namespace.equals(desired.getMetadata().getNamespace())) { - return CompletableFuture.failedStage(new IllegalArgumentException("Given namespace " + namespace + " incompatible with desired namespace " + desired.getMetadata().getNamespace())); - } else if (desired != null && !name.equals(desired.getMetadata().getName())) { - return CompletableFuture.failedStage(new IllegalArgumentException("Given name " + name + " incompatible with desired name " + desired.getMetadata().getName())); - } + if (useServerSideApply) { + if (desired != null) { + if (desired.getMetadata().getManagedFields() != null && !desired.getMetadata().getManagedFields().isEmpty()) { + LOGGER.debugCr(reconciliation, "Deleting managedFields from request before pathing resource {} {}/{}", resourceKind, namespace, name); + desired.getMetadata().setManagedFields(null); + } - return CompletableFuture.supplyAsync(() -> operation().inNamespace(namespace).withName(name).get(), asyncExecutor) - .thenCompose(current -> this.reconcile(reconciliation, namespace, name, current, desired)); + LOGGER.debugCr(reconciliation, "{} {}/{} desired, patching it", resourceKind, namespace, name); + return internalPatch(reconciliation, namespace, name, desired); + } else { + LOGGER.debugCr(reconciliation, "{} {}/{} no longer desired, deleting it", resourceKind, namespace, name); + return internalDelete(reconciliation, namespace, name); + } + } else { + if (desired != null && !namespace.equals(desired.getMetadata().getNamespace())) { + return CompletableFuture.failedStage(new IllegalArgumentException("Given namespace " + namespace + " incompatible with desired namespace " + desired.getMetadata().getNamespace())); + } else if (desired != null && !name.equals(desired.getMetadata().getName())) { + return CompletableFuture.failedStage(new IllegalArgumentException("Given name " + name + " incompatible with desired name " + desired.getMetadata().getName())); + } + + return CompletableFuture.supplyAsync(() -> operation().inNamespace(namespace).withName(name).get(), asyncExecutor) + .thenCompose(current -> this.reconcile(reconciliation, namespace, name, current, desired)); + } } /** @@ -115,22 +134,37 @@ public CompletionStage> reconcile(Reconciliation reconciliati * @return A CompletionStage which completes when the resource has been updated. */ public CompletionStage> reconcile(Reconciliation reconciliation, String namespace, String name, T current, T desired) { - if (desired != null) { - if (current == null) { - LOGGER.debugCr(reconciliation, "{} {}/{} does not exist, creating it", resourceKind, namespace, name); - return internalCreate(reconciliation, namespace, name, desired); + if (useServerSideApply) { + if (desired != null) { + if (desired.getMetadata().getManagedFields() != null && !desired.getMetadata().getManagedFields().isEmpty()) { + LOGGER.debugCr(reconciliation, "Deleting managedFields from request before pathing resource {} {}/{}", resourceKind, namespace, name); + desired.getMetadata().setManagedFields(null); + } + + LOGGER.debugCr(reconciliation, "{} {}/{} desired, patching it", resourceKind, namespace, name); + return internalPatch(reconciliation, namespace, name, desired); } else { - LOGGER.debugCr(reconciliation, "{} {}/{} already exists, updating it", resourceKind, namespace, name); - return internalUpdate(reconciliation, namespace, name, current, desired); + LOGGER.debugCr(reconciliation, "{} {}/{} no longer desired, deleting it", resourceKind, namespace, name); + return internalDelete(reconciliation, namespace, name); } } else { - if (current != null) { - // Deletion is desired - LOGGER.debugCr(reconciliation, "{} {}/{} exist, deleting it", resourceKind, namespace, name); - return internalDelete(reconciliation, namespace, name); + if (desired != null) { + if (current == null) { + LOGGER.debugCr(reconciliation, "{} {}/{} does not exist, creating it", resourceKind, namespace, name); + return internalCreate(reconciliation, namespace, name, desired); + } else { + LOGGER.debugCr(reconciliation, "{} {}/{} already exists, updating it", resourceKind, namespace, name); + return internalUpdate(reconciliation, namespace, name, current, desired); + } } else { - LOGGER.debugCr(reconciliation, "{} {}/{} does not exist, noop", resourceKind, namespace, name); - return CompletableFuture.completedStage(ReconcileResult.noop(null)); + if (current != null) { + // Deletion is desired + LOGGER.debugCr(reconciliation, "{} {}/{} exist, deleting it", resourceKind, namespace, name); + return internalDelete(reconciliation, namespace, name); + } else { + LOGGER.debugCr(reconciliation, "{} {}/{} does not exist, noop", resourceKind, namespace, name); + return CompletableFuture.completedStage(ReconcileResult.noop(null)); + } } } } @@ -175,6 +209,40 @@ public CompletionStage batchReconcile(Reconciliation reconciliation, Strin }); } + protected CompletionStage> internalPatch(Reconciliation reconciliation, String namespace, String name, T desired) { + try { + return CompletableFuture.supplyAsync(() -> patch(reconciliation, namespace, name, desired), asyncExecutor) + .thenApply(ReconcileResult::patchedUsingServerSideApply) + .whenComplete((result, error) -> { + if (error == null) { + LOGGER.debugCr(reconciliation, "{} {} in namespace {} has been patched", resourceKind, name, namespace); + } else { + LOGGER.debugCr(reconciliation, "Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, error); + } + }); + } catch (Exception e) { + LOGGER.debugCr(reconciliation, "Caught exception while patching {} {} in namespace {}", resourceKind, name, namespace, e); + return CompletableFuture.failedStage(e); + } + } + + protected T patch(Reconciliation reconciliation, String namespace, String name, T desired) { + try { + return operation().inNamespace(namespace).withName(name).patch(serverSideApplyPatchContext(false), desired); + } catch (KubernetesClientException e) { + LOGGER.warnCr(reconciliation, "{} {} in namespace {} failed to apply, using force", resourceKind, name, namespace, e); + return operation().inNamespace(namespace).withName(name).patch(serverSideApplyPatchContext(true), desired); + } + } + + protected PatchContext serverSideApplyPatchContext(boolean force) { + return new PatchContext.Builder() + .withPatchType(PatchType.SERVER_SIDE_APPLY) + .withFieldManager("strimzi-cluster-operator") + .withForce(force) + .build(); + } + /** * Deletes the resource with the given namespace and name and completes the * given CompletionStage accordingly. This method will do a cascading delete. diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractWatchableNamespacedResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractWatchableNamespacedResourceOperator.java index da890720c7b..f8850125121 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractWatchableNamespacedResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractWatchableNamespacedResourceOperator.java @@ -35,9 +35,10 @@ public abstract class AbstractWatchableNamespacedResourceOperator< * @param asyncExecutor Executor to use for asynchronous subroutines * @param client The kubernetes client. * @param resourceKind The mind of Kubernetes resource (used for logging). + * @param useServerSideApply Whether to use server side apply */ - protected AbstractWatchableNamespacedResourceOperator(Executor asyncExecutor, C client, String resourceKind) { - super(asyncExecutor, client, resourceKind); + protected AbstractWatchableNamespacedResourceOperator(Executor asyncExecutor, C client, String resourceKind, boolean useServerSideApply) { + super(asyncExecutor, client, resourceKind, useServerSideApply); } protected Watch watchInAnyNamespace(Watcher watcher) { diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractWatchableStatusedNamespacedResourceOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractWatchableStatusedNamespacedResourceOperator.java index 77c2347f2c2..cff61a0ea18 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractWatchableStatusedNamespacedResourceOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/AbstractWatchableStatusedNamespacedResourceOperator.java @@ -34,9 +34,10 @@ public abstract class AbstractWatchableStatusedNamespacedResourceOperator< * @param asyncExecutor Executor to use for asynchronous subroutines * @param client The kubernetes client. * @param resourceKind The mind of Kubernetes resource (used for logging). + * @param useServerSideApply Whether to use server side apply */ - protected AbstractWatchableStatusedNamespacedResourceOperator(Executor asyncExecutor, C client, String resourceKind) { - super(asyncExecutor, client, resourceKind); + protected AbstractWatchableStatusedNamespacedResourceOperator(Executor asyncExecutor, C client, String resourceKind, boolean useServerSideApply) { + super(asyncExecutor, client, resourceKind, useServerSideApply); } /** diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/CrdOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/CrdOperator.java index 00968937214..585152fa85d 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/CrdOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/CrdOperator.java @@ -44,11 +44,11 @@ public class CrdOperator cls, Class listCls, String kind) { - super(asyncExecutor, client, kind); + public CrdOperator(Executor asyncExecutor, C client, Class cls, Class listCls, String kind, boolean useServerSideApply) { + super(asyncExecutor, client, kind, useServerSideApply); this.cls = cls; this.listCls = listCls; } diff --git a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperator.java b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperator.java index 6f280164a2d..6f87dde5dcb 100644 --- a/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperator.java +++ b/operator-common/src/main/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperator.java @@ -22,9 +22,10 @@ public class SecretOperator extends AbstractNamespacedResourceOperator expectedAnnotations = new HashMap<>(); + expectedAnnotations.putAll(startingPod.getMetadata().getAnnotations()); + expectedAnnotations.putAll(createdPod.getMetadata().getAnnotations()); + + Checkpoint async = context.checkpoint(1); + pr.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, createdPod).onComplete(createResult -> { + context.verify(() -> assertThat(createResult.succeeded(), is(true))); + context.verify(() -> assertThat(pr.list(NAMESPACE, Labels.EMPTY).stream() + .map(p -> p.getMetadata().getName()) + .collect(Collectors.toList()), hasItem(name))); + + context.verify(() -> assertThat(pr.list(NAMESPACE, Labels.EMPTY).stream() + .filter(pod -> pod.getMetadata().getName().equals(name)) + .map(p -> p.getMetadata().getAnnotations()) + .collect(Collectors.toList()), is(singletonList(expectedAnnotations)))); + async.flag(); + }); + } + + @Test + public void testShouldForceFieldsToBeManagedByStrimzi(VertxTestContext context) { + vertx.createSharedWorkerExecutor("kubernetes-ops-pool", 10); + PodOperator pr = new PodOperator(vertx, client, true); + + String name = RESOURCE_NAME; + + Pod startingPod = STARTING_POD.withNewMetadata() + .withNamespace(NAMESPACE) + .withName(name) + .withAnnotations(Map.of("test-annotation", "test-value")).endMetadata().build(); + client.pods().inNamespace(NAMESPACE).resource(startingPod).create(); + + List managedFields = pr.list(NAMESPACE, Labels.EMPTY).stream() + .filter(pod -> pod.getMetadata().getName().equals(name)) + .flatMap(p -> p.getMetadata().getManagedFields().stream()) + .filter(entry -> entry.getManager().equals("fabric8-kubernetes-client")) + .toList(); + + assertThat(managedFields.isEmpty(), is(false)); + + Pod createdPod = STARTING_POD.withNewMetadata() + .withNamespace(NAMESPACE) + .withName(name) + .withAnnotations(Map.of("test-annotation", "a-new-value")).endMetadata().build(); + + Map expectedAnnotations = new HashMap<>(); + expectedAnnotations.putAll(createdPod.getMetadata().getAnnotations()); + + Checkpoint async = context.checkpoint(1); + pr.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, createdPod).onComplete(createResult -> { + context.verify(() -> assertThat(createResult.succeeded(), is(true))); + context.verify(() -> assertThat(pr.list(NAMESPACE, Labels.EMPTY).stream() + .map(p -> p.getMetadata().getName()) + .collect(Collectors.toList()), hasItem(name))); + + context.verify(() -> assertThat(pr.list(NAMESPACE, Labels.EMPTY).stream() + .filter(pod -> pod.getMetadata().getName().equals(name)) + .map(p -> p.getMetadata().getAnnotations()) + .collect(Collectors.toList()), is(singletonList(expectedAnnotations)))); + + //assert that strimzi manages the new annotation + context.verify(() -> { + var foundManagedFields = pr.list(NAMESPACE, Labels.EMPTY).stream() + .filter(pod -> pod.getMetadata().getName().equals(name)) + .flatMap(p -> p.getMetadata().getManagedFields().stream()) + .toList(); + assertThat(annotationManagedBy("test-annotation", "strimzi-cluster-operator", foundManagedFields), is(true)); + }); + + async.flag(); + }); + } + + private boolean annotationManagedBy(String annotationName, String owner, List managedFieldsEntries) { + return managedFieldsEntries.stream() + .filter(managedFieldsEntry -> managedFieldsEntry.getManager().equals(owner)) + .anyMatch(managedFieldsEntry -> { + var properties = managedFieldsEntry.getFieldsV1().getAdditionalProperties(); + Map metadata = (Map) properties.get("f:metadata"); + Map annotations = (Map) metadata.get("f:annotations"); + return annotations.containsKey("f:" + annotationName); + }); + } + + @Test + public void testShouldRemoveAnnotationsFromPreviousPod(VertxTestContext context) { + vertx.createSharedWorkerExecutor("kubernetes-ops-pool", 10); + PodOperator pr = new PodOperator(vertx, client, true); + + String name = RESOURCE_NAME; + Pod startingPod = STARTING_POD.withNewMetadata() + .withNamespace(NAMESPACE) + .withName(name) + .withAnnotations(Map.of("existing-annotation", "test-value")).endMetadata().build(); + client.pods().inNamespace(NAMESPACE).resource(startingPod).create(); + + Pod createdPod = STARTING_POD.withNewMetadata() + .withNamespace(NAMESPACE) + .withName(name) + .withAnnotations(Map.of("created-annotation", "test-value")).endMetadata().build(); + + Pod updatedPod = STARTING_POD.withNewMetadata() + .withNamespace(NAMESPACE) + .withName(name) + .withAnnotations(Map.of("updated-annotation", "test-value")).endMetadata().build(); + + Map expectedAnnotationsAfterUpdate = new HashMap<>(); + expectedAnnotationsAfterUpdate.putAll(startingPod.getMetadata().getAnnotations()); + expectedAnnotationsAfterUpdate.putAll(createdPod.getMetadata().getAnnotations()); + + Map expectedAnnotationsAfterReconcile = new HashMap<>(); + expectedAnnotationsAfterReconcile.putAll(startingPod.getMetadata().getAnnotations()); + expectedAnnotationsAfterReconcile.putAll(updatedPod.getMetadata().getAnnotations()); + + Checkpoint async = context.checkpoint(1); + pr.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, createdPod).onComplete(createResult -> { + context.verify(() -> assertThat(createResult.succeeded(), is(true))); + context.verify(() -> assertThat(pr.list(NAMESPACE, Labels.EMPTY).stream() + .map(p -> p.getMetadata().getName()) + .collect(Collectors.toList()), hasItem(name))); + + context.verify(() -> assertThat(pr.list(NAMESPACE, Labels.EMPTY).stream() + .filter(pod -> pod.getMetadata().getName().equals(name)) + .map(p -> p.getMetadata().getAnnotations()) + .collect(Collectors.toList()), is(singletonList(expectedAnnotationsAfterUpdate)))); + + + pr.reconcile(Reconciliation.DUMMY_RECONCILIATION, NAMESPACE, name, updatedPod).onComplete(updatedResult -> { + context.verify(() -> assertThat(updatedResult.succeeded(), is(true))); + + context.verify(() -> assertThat(pr.list(NAMESPACE, Labels.EMPTY).stream() + .filter(pod -> pod.getMetadata().getName().equals(name)) + .map(p -> p.getMetadata().getAnnotations()) + .collect(Collectors.toList()), is(singletonList(expectedAnnotationsAfterReconcile)))); + + async.flag(); + }); + }); + } + + @Test + public void testShouldCreatePodWhenOneDoesNotExist(VertxTestContext context) { + vertx.createSharedWorkerExecutor("kubernetes-ops-pool", 10); + PodOperator pr = new PodOperator(vertx, client, true); + + String name = RESOURCE_NAME; + Pod createdPod = STARTING_POD.withNewMetadata() + .withNamespace(NAMESPACE) + .withName(name) + .withAnnotations(Map.of("created-annotation", "test-value")).endMetadata().build(); + + Map expectedAnnotations = new HashMap<>(); + expectedAnnotations.putAll(createdPod.getMetadata().getAnnotations()); + + Checkpoint async = context.checkpoint(1); + pr.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, createdPod).onComplete(createResult -> { + context.verify(() -> assertThat(createResult.succeeded(), is(true))); + context.verify(() -> assertThat(pr.list(NAMESPACE, Labels.EMPTY).stream() + .map(p -> p.getMetadata().getName()) + .collect(Collectors.toList()), hasItem(name))); + + context.verify(() -> assertThat(pr.list(NAMESPACE, Labels.EMPTY).stream() + .filter(pod -> pod.getMetadata().getName().equals(name)) + .map(p -> p.getMetadata().getAnnotations()) + .collect(Collectors.toList()), is(singletonList(expectedAnnotations)))); + async.flag(); + }); + } +} diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/PodOperatorServerSideApplyTest.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/PodOperatorServerSideApplyTest.java new file mode 100644 index 00000000000..2335adb75f5 --- /dev/null +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/PodOperatorServerSideApplyTest.java @@ -0,0 +1,310 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.common.operator.resource; + +import io.fabric8.kubernetes.api.model.DeletionPropagation; +import io.fabric8.kubernetes.api.model.KubernetesResourceList; +import io.fabric8.kubernetes.api.model.Pod; +import io.fabric8.kubernetes.api.model.PodBuilder; +import io.fabric8.kubernetes.api.model.PodList; +import io.fabric8.kubernetes.client.GracePeriodConfigurable; +import io.fabric8.kubernetes.client.KubernetesClient; +import io.fabric8.kubernetes.client.Watch; +import io.fabric8.kubernetes.client.Watcher; +import io.fabric8.kubernetes.client.dsl.Deletable; +import io.fabric8.kubernetes.client.dsl.FilterWatchListDeletable; +import io.fabric8.kubernetes.client.dsl.MixedOperation; +import io.fabric8.kubernetes.client.dsl.NonNamespaceOperation; +import io.fabric8.kubernetes.client.dsl.PodResource; +import io.fabric8.kubernetes.client.dsl.Resource; +import io.fabric8.kubernetes.client.dsl.base.PatchContext; +import io.strimzi.operator.common.Reconciliation; +import io.strimzi.operator.common.model.Labels; +import io.vertx.core.Vertx; +import io.vertx.junit5.Checkpoint; +import io.vertx.junit5.VertxTestContext; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; + +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.matches; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class PodOperatorServerSideApplyTest extends + AbstractReadyResourceOperatorTest { + @Override + protected Class clientType() { + return KubernetesClient.class; + } + + @Override + protected Class resourceType() { + return Resource.class; + } + + @Override + protected Pod resource(String name) { + return new PodBuilder() + .withNewMetadata() + .withNamespace(NAMESPACE) + .withName(name) + .endMetadata() + .withNewSpec() + .withHostname("foo") + .endSpec() + .build(); + } + + @Override + protected Pod modifiedResource(String name) { + return new PodBuilder(resource(name)) + .editSpec() + .withHostname("bar") + .endSpec() + .build(); + } + + @Override + protected void mocker(KubernetesClient client, MixedOperation op) { + when(client.pods()).thenReturn(op); + } + + @Override + protected PodOperator createResourceOperations(Vertx vertx, KubernetesClient mockClient) { + return new PodOperator(vertx, mockClient, true); + } + + @Override + @Test + public void testSuccessfulCreation(VertxTestContext context) { + //this is rewritten as using ServerSideApply the get and create will no longer be called + Pod resource = resource(); + Resource mockResource = mock(resourceType()); + + when(mockResource.get()).thenReturn(null); + when(mockResource.patch(any(PatchContext.class), any(Pod.class))).thenReturn(resource); + + NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class); + when(mockNameable.withName(matches(resource.getMetadata().getName()))).thenReturn(mockResource); + when(mockNameable.resource(eq(resource))).thenReturn(mockResource); + + MixedOperation mockCms = mock(MixedOperation.class); + when(mockCms.inNamespace(matches(resource.getMetadata().getNamespace()))).thenReturn(mockNameable); + + KubernetesClient mockClient = mock(KubernetesClient.class); + mocker(mockClient, mockCms); + + AbstractNamespacedResourceOperator op = createResourceOperationsWithMockedReadiness(vertx, mockClient); + + Checkpoint async = context.checkpoint(); + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource).onComplete(context.succeeding(rr -> context.verify(() -> { + verify(mockResource).patch(any(PatchContext.class), any(Pod.class)); + async.flag(); + }))); + } + + @Override + @Test + public void testCreateWhenExistsWithChangeIsAPatch(VertxTestContext context) { + //this is rewritten as using ServerSideApply the get will no longer be called + Pod resource = resource(); + Resource mockResource = mock(resourceType()); + when(mockResource.get()).thenReturn(resource); + + when(mockResource.patch(any(), (Pod) any())).thenReturn(resource); + + NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class); + when(mockNameable.withName(matches(resource.getMetadata().getName()))).thenReturn(mockResource); + + MixedOperation mockCms = mock(MixedOperation.class); + when(mockCms.inNamespace(matches(resource.getMetadata().getNamespace()))).thenReturn(mockNameable); + + KubernetesClient mockClient = mock(KubernetesClient.class); + mocker(mockClient, mockCms); + + AbstractNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); + + Checkpoint async = context.checkpoint(); + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, modifiedResource()).onComplete(context.succeeding(rr -> context.verify(() -> { + verify(mockResource).patch(any(PatchContext.class), any(Pod.class)); + verify(mockResource, never()).create(); + async.flag(); + }))); + } + + @Override + @Disabled + public void testCreateOrUpdateThrowsWhenCreateThrows(VertxTestContext context) { + //not valid as we no longer perform the create (this is done as a patch) + } + + @Override + @Disabled + public void testExistenceCheckThrows(VertxTestContext context) { + //not valid as we no longer perform the get (the get is only done prior to delete) + } + + @Test + public void testDeleteWhenResourceDoesNotExistIsANop(VertxTestContext context) { + //this is rewritten as using ServerSideApply the delete will now be called whenever desired is null + Deletable mockDeletable = mock(Deletable.class); + when(mockDeletable.delete()).thenReturn(List.of()); + GracePeriodConfigurable mockDeletableGrace = mock(GracePeriodConfigurable.class); + when(mockDeletableGrace.delete()).thenReturn(List.of()); + + Pod resource = resource(); + Resource mockResource = mock(resourceType()); + when(mockResource.withPropagationPolicy(eq(DeletionPropagation.FOREGROUND))).thenReturn(mockDeletableGrace); + when(mockDeletableGrace.withGracePeriod(anyLong())).thenReturn(mockDeletable); + AtomicBoolean watchClosed = new AtomicBoolean(false); + when(mockResource.watch(any())).thenAnswer(invocation -> { + Watcher watcher = invocation.getArgument(0); + watcher.eventReceived(Watcher.Action.DELETED, resource); + return (Watch) () -> { + watchClosed.set(true); + }; + }); + + NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class); + when(mockNameable.withName(matches(RESOURCE_NAME))).thenReturn(mockResource); + + MixedOperation mockCms = mock(MixedOperation.class); + when(mockCms.inNamespace(matches(NAMESPACE))).thenReturn(mockNameable); + + KubernetesClient mockClient = mock(KubernetesClient.class); + mocker(mockClient, mockCms); + + AbstractNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); + + Checkpoint async = context.checkpoint(); + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, resource.getMetadata().getNamespace(), resource.getMetadata().getName(), null) + .onComplete(context.succeeding(rr -> context.verify(() -> { + verify(mockDeletable).delete(); + async.flag(); + }))); + } + + @Test + public void testCreateWhenExistsWithoutChangeIsNotAPatch(VertxTestContext context) { + //this is rewritten as using ServerSideApply the patch will now be called whenever desired is not null + Pod resource = resource(); + Resource mockResource = mock(resourceType()); + when(mockResource.withPropagationPolicy(DeletionPropagation.FOREGROUND)).thenReturn(mockResource); + when(mockResource.patch(any(PatchContext.class), any(Pod.class))).thenReturn(resource); + + NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class); + when(mockNameable.withName(matches(resource.getMetadata().getName()))).thenReturn(mockResource); + + MixedOperation mockCms = mock(MixedOperation.class); + when(mockCms.inNamespace(matches(resource.getMetadata().getNamespace()))).thenReturn(mockNameable); + + KubernetesClient mockClient = mock(KubernetesClient.class); + mocker(mockClient, mockCms); + + AbstractNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); + + Checkpoint async = context.checkpoint(); + op.createOrUpdate(Reconciliation.DUMMY_RECONCILIATION, resource()).onComplete(context.succeeding(rr -> context.verify(() -> { + verify(mockResource, never()).get(); + verify(mockResource).patch(any(PatchContext.class), any(Pod.class)); + verify(mockResource, never()).create(); + async.flag(); + }))); + } + + @Test + public void testBatchReconciliation(VertxTestContext context) { + Map selector = Map.of("labelA", "a", "labelB", "b"); + + Pod resource1 = resource("resource-1"); + Pod resource2 = resource("resource-2"); + Pod resource2Mod = modifiedResource("resource-2"); + Pod resource3 = resource("resource-3"); + + // For resource1 we need to mock the async deletion process as well + Deletable mockDeletable1 = mock(Deletable.class); + when(mockDeletable1.delete()).thenReturn(List.of()); + GracePeriodConfigurable mockDeletableGrace1 = mock(GracePeriodConfigurable.class); + when(mockDeletableGrace1.withGracePeriod(anyLong())).thenReturn(mockDeletable1); + Resource mockResource1 = mock(resourceType()); + AtomicBoolean watchClosed = new AtomicBoolean(false); + AtomicBoolean watchCreated = new AtomicBoolean(false); + when(mockResource1.get()).thenAnswer(invocation -> { + // First get needs to return the resource to trigger deletion + // Next gets return null since the resource was already deleted + if (watchCreated.get()) { + return null; + } else { + return resource1; + } + }); + when(mockResource1.withPropagationPolicy(DeletionPropagation.FOREGROUND)).thenReturn(mockDeletableGrace1); + when(mockResource1.watch(any())).thenAnswer(invocation -> { + watchCreated.set(true); + return (Watch) () -> { + watchClosed.set(true); + }; + }); + + Resource mockResource2 = mock(resourceType()); + when(mockResource2.get()).thenReturn(resource2); + when(mockResource2.patch(any(PatchContext.class), eq(resource2Mod))).thenReturn(resource2Mod); + + Resource mockResource3 = mock(resourceType()); + when(mockResource3.get()).thenReturn(null); + when(mockResource3.patch(any(PatchContext.class), eq(resource3))).thenReturn(resource3); + + KubernetesResourceList mockResourceList = mock(KubernetesResourceList.class); + when(mockResourceList.getItems()).thenReturn(List.of(resource1, resource2)); + + FilterWatchListDeletable mockListable = mock(FilterWatchListDeletable.class); + when(mockListable.list(any())).thenReturn(mockResourceList); + + NonNamespaceOperation mockNameable = mock(NonNamespaceOperation.class); + when(mockNameable.withLabels(eq(selector))).thenReturn(mockListable); + when(mockNameable.withName(eq("resource-1"))).thenReturn(mockResource1); + when(mockNameable.withName(eq("resource-2"))).thenReturn(mockResource2); + when(mockNameable.withName(eq("resource-3"))).thenReturn(mockResource3); + when(mockNameable.resource(eq(resource3))).thenReturn(mockResource3); + + MixedOperation mockCms = mock(MixedOperation.class); + when(mockCms.inNamespace(anyString())).thenReturn(mockNameable); + + KubernetesClient mockClient = mock(KubernetesClient.class); + mocker(mockClient, mockCms); + + AbstractNamespacedResourceOperator op = createResourceOperations(vertx, mockClient); + + Checkpoint async = context.checkpoint(); + op.batchReconcile(Reconciliation.DUMMY_RECONCILIATION, NAMESPACE, List.of(resource2Mod, resource3), Labels.fromMap(selector)).onComplete(context.succeeding(i -> context.verify(() -> { + verify(mockResource1, atLeast(1)).get(); + verify(mockResource1, never()).patch(any(), any()); + verify(mockResource1, never()).create(); + verify(mockDeletable1, times(1)).delete(); + + verify(mockResource2, times(1)).patch(any(), eq(resource2Mod)); + verify(mockResource2, never()).create(); + verify(mockResource2, never()).delete(); + + verify(mockResource3, never()).patch(any(), any()); + verify(mockResource3, times(1)).patch(any(), eq(resource3)); + verify(mockResource3, never()).delete(); + + async.flag(); + }))); + } +} diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/StrimziPodSetCrdOperatorServerSideApplyIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/StrimziPodSetCrdOperatorServerSideApplyIT.java new file mode 100644 index 00000000000..15223b5dab9 --- /dev/null +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/StrimziPodSetCrdOperatorServerSideApplyIT.java @@ -0,0 +1,137 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.common.operator.resource; + +import com.fasterxml.jackson.databind.ObjectMapper; +import io.fabric8.kubernetes.api.model.ManagedFieldsEntry; +import io.strimzi.api.kafka.model.podset.StrimziPodSet; +import io.strimzi.api.kafka.model.podset.StrimziPodSetBuilder; +import io.strimzi.operator.common.Reconciliation; +import io.vertx.core.Promise; +import io.vertx.junit5.Checkpoint; +import io.vertx.junit5.VertxExtension; +import io.vertx.junit5.VertxTestContext; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; + +import java.util.List; +import java.util.Map; + +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; + +/** + * The main purpose of the Integration Tests for the operators is to test them against a real Kubernetes cluster. + * Real Kubernetes cluster has often some quirks such as some fields being immutable, some fields in the spec section + * being created by the Kubernetes API etc. These things are hard to test with mocks. These IT tests make it easy to + * test them against real clusters. + */ +@ExtendWith(VertxExtension.class) +public class StrimziPodSetCrdOperatorServerSideApplyIT extends StrimziPodSetCrdOperatorIT { + protected static final Logger LOGGER = LogManager.getLogger(StrimziPodSetCrdOperatorServerSideApplyIT.class); + + private final ObjectMapper mapper = new ObjectMapper(); + + @Override + protected StrimziPodSetOperator operator() { + return new StrimziPodSetOperator(vertx, client, true); + } + + @Test + public void testStrimziPodSetAnnotationsAreUpdated(VertxTestContext context) { + String resourceName = getResourceName(RESOURCE_NAME); + Checkpoint async = context.checkpoint(); + String namespace = getNamespace(); + + StrimziPodSetOperator op = operator(); + + Promise updateAnnotations = Promise.promise(); + + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, getResource(resourceName)) + .onComplete(context.succeeding(i -> { + })) + .compose(rrCreated -> { + //we don't use the cluster resource as it already contains managedfields property and we cant send that in a patch + StrimziPodSet updated = getResourceWithModifiedAnnotations(getResource(resourceName)); + return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, updated); + }) + .compose(i -> op.getAsync(namespace, resourceName)) // We need to get it again + .onComplete(context.succeeding(result -> context.verify(() -> { + updateAnnotations.complete(); + assertThat(result.getMetadata().getAnnotations().containsKey("new-test-annotation"), is(true)); + }))); + + updateAnnotations.future() + .compose(v -> { + return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, null); + }) + .onComplete(context.succeeding(v -> async.flag())); + } + + @Test + public void testStrimziPodSetManagedFieldsAreOwnedByStrimziCorrectly(VertxTestContext context) { + String resourceName = getResourceName(RESOURCE_NAME); + Checkpoint async = context.checkpoint(); + String namespace = getNamespace(); + + StrimziPodSetOperator op = operator(); + + Promise updateAnnotations = Promise.promise(); + + client.resource(getResourceWithStartingAnnotations(getResource(resourceName))).create(); + + op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, getResourceWithModifiedAnnotations(getResource(resourceName))) + .onComplete(context.succeeding(result -> context.verify(() -> { + updateAnnotations.complete(); + assertThat(result.resource().getMetadata().getAnnotations().containsKey("test-annotation"), is(true)); + assertThat(result.resource().getMetadata().getAnnotations().containsKey("new-test-annotation"), is(true)); + assertThat(annotationManagedBy("test-annotation", "strimzi-cluster-operator", result.resource().getMetadata().getManagedFields()), is(false)); + assertThat(annotationManagedBy("new-test-annotation", "strimzi-cluster-operator", result.resource().getMetadata().getManagedFields()), is(true)); + }))); + + updateAnnotations.future() + .compose(v -> { + return op.reconcile(Reconciliation.DUMMY_RECONCILIATION, namespace, resourceName, null); + }) + .onComplete(context.succeeding(v -> async.flag())); + } + + private StrimziPodSet getResourceWithStartingAnnotations(StrimziPodSet previousResource) { + return new StrimziPodSetBuilder(previousResource) + .withNewMetadata() + .withName(previousResource.getMetadata().getName()) + .withNamespace(previousResource.getMetadata().getNamespace()) + .withAnnotations(Map.of("test-annotation", "test-value")) + .endMetadata() + .build(); + } + + private StrimziPodSet getResourceWithModifiedAnnotations(StrimziPodSet previousResource) { + return new StrimziPodSetBuilder(previousResource) + .withNewMetadata() + .withName(previousResource.getMetadata().getName()) + .withNamespace(previousResource.getMetadata().getNamespace()) + .withAnnotations(Map.of("new-test-annotation", "new-test-value")) + .endMetadata() + .build(); + } + + private boolean annotationManagedBy(String annotationName, String owner, List managedFieldsEntries) { + return managedFieldsEntries.stream() + .filter(managedFieldsEntry -> managedFieldsEntry.getManager().equals(owner)) + .anyMatch(managedFieldsEntry -> { + var properties = managedFieldsEntry.getFieldsV1().getAdditionalProperties(); + Map metadata = (Map) properties.get("f:metadata"); + if (metadata != null) { + Map annotations = (Map) metadata.get("f:annotations"); + return annotations.containsKey("f:" + annotationName); + } else { + return false; + } + }); + } +} \ No newline at end of file diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/KafkaUserCrdOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/KafkaUserCrdOperatorIT.java index f7a9e332a94..478615bb79f 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/KafkaUserCrdOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/KafkaUserCrdOperatorIT.java @@ -28,7 +28,7 @@ public class KafkaUserCrdOperatorIT extends AbstractCustomResourceOperatorIT operator() { - return new CrdOperator<>(ForkJoinPool.commonPool(), client, KafkaUser.class, KafkaUserList.class, KafkaUser.RESOURCE_KIND); + return new CrdOperator<>(ForkJoinPool.commonPool(), client, KafkaUser.class, KafkaUserList.class, KafkaUser.RESOURCE_KIND, false); } @Override diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperatorIT.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperatorIT.java index 376d7d25a73..6d6848e5183 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperatorIT.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperatorIT.java @@ -48,6 +48,6 @@ void assertResources(Secret expected, Secret actual) { @Override AbstractNamespacedResourceOperator> operator() { - return new SecretOperator(asyncExecutor, client); + return new SecretOperator(asyncExecutor, client, false); } } diff --git a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperatorTest.java b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperatorTest.java index f6616afb3d4..34b7b1b2d02 100644 --- a/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperatorTest.java +++ b/operator-common/src/test/java/io/strimzi/operator/common/operator/resource/concurrent/SecretOperatorTest.java @@ -52,6 +52,6 @@ protected void mocker(KubernetesClient mockClient, MixedOperation> createResourceOperations(KubernetesClient mockClient) { - return new SecretOperator(asyncExecutor, mockClient); + return new SecretOperator(asyncExecutor, mockClient, false); } } diff --git a/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/SecretUtils.java b/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/SecretUtils.java index 0c0ca04e485..45ea4ca9e18 100644 --- a/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/SecretUtils.java +++ b/systemtest/src/main/java/io/strimzi/systemtest/utils/kubeUtils/objects/SecretUtils.java @@ -28,10 +28,12 @@ import java.security.cert.CertificateException; import java.security.cert.CertificateFactory; import java.security.cert.X509Certificate; +import java.util.Arrays; import java.util.Base64; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.Objects; import static io.strimzi.test.k8s.KubeClusterResource.kubeClient; @@ -209,9 +211,14 @@ public static void waitForUserPasswordChange(String namespaceName, String secret } public static String annotateSecret(String namespaceName, String secretName, String annotationKey, String annotationValue) { + return annotateSecret(namespaceName, secretName, annotationKey, annotationValue, false); + } + + public static String annotateSecret(String namespaceName, String secretName, String annotationKey, String annotationValue, boolean force) { LOGGER.info("Annotating Secret: {}/{} with annotation {}={}", namespaceName, secretName, annotationKey, annotationValue); + String[] cmd = {"annotate", force ? "--overwrite" : null, "secret", secretName, annotationKey + "=" + annotationValue}; return ResourceManager.cmdKubeClient().namespace(namespaceName) - .execInCurrentNamespace("annotate", "secret", secretName, annotationKey + "=" + annotationValue) + .execInCurrentNamespace(Arrays.stream(cmd).filter(Objects::nonNull).toArray(String[]::new)) .out() .trim(); } diff --git a/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java b/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java index 1b110645151..f3c989d28ec 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/mirrormaker/MirrorMaker2ST.java @@ -1299,13 +1299,13 @@ void testKMM2RollAfterSecretsCertsUpdateTLS() { LOGGER.info("Renew Clients CA secret for Source cluster via annotation"); String sourceClientsCaSecretName = KafkaResources.clientsCaCertificateSecretName(testStorage.getSourceClusterName()); - SecretUtils.annotateSecret(testStorage.getNamespaceName(), sourceClientsCaSecretName, Annotations.ANNO_STRIMZI_IO_FORCE_RENEW, "true"); + SecretUtils.annotateSecret(testStorage.getNamespaceName(), sourceClientsCaSecretName, Annotations.ANNO_STRIMZI_IO_FORCE_RENEW, "true", true); brokerSourcePods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), brokerSourceSelector, 1, brokerSourcePods); mmSnapshot = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), testStorage.getMM2Selector(), 1, mmSnapshot); LOGGER.info("Renew Clients CA secret for target cluster via annotation"); String targetClientsCaSecretName = KafkaResources.clientsCaCertificateSecretName(testStorage.getTargetClusterName()); - SecretUtils.annotateSecret(testStorage.getNamespaceName(), targetClientsCaSecretName, Annotations.ANNO_STRIMZI_IO_FORCE_RENEW, "true"); + SecretUtils.annotateSecret(testStorage.getNamespaceName(), targetClientsCaSecretName, Annotations.ANNO_STRIMZI_IO_FORCE_RENEW, "true", true); brokerTargetPods = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), controlTargetSelector, 1, brokerTargetPods); mmSnapshot = RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), testStorage.getMM2Selector(), 1, mmSnapshot); @@ -1334,7 +1334,7 @@ void testKMM2RollAfterSecretsCertsUpdateTLS() { LOGGER.info("Renew Cluster CA secret for Source clusters via annotation"); String sourceClusterCaSecretName = KafkaResources.clusterCaCertificateSecretName(testStorage.getSourceClusterName()); - SecretUtils.annotateSecret(testStorage.getNamespaceName(), sourceClusterCaSecretName, Annotations.ANNO_STRIMZI_IO_FORCE_RENEW, "true"); + SecretUtils.annotateSecret(testStorage.getNamespaceName(), sourceClusterCaSecretName, Annotations.ANNO_STRIMZI_IO_FORCE_RENEW, "true", true); if (!Environment.isKRaftModeEnabled()) { RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), controlSourceSelector, 1, controlSourcePods); @@ -1345,7 +1345,7 @@ void testKMM2RollAfterSecretsCertsUpdateTLS() { LOGGER.info("Renew Cluster CA secret for target clusters via annotation"); String targetClusterCaSecretName = KafkaResources.clusterCaCertificateSecretName(testStorage.getTargetClusterName()); - SecretUtils.annotateSecret(testStorage.getNamespaceName(), targetClusterCaSecretName, Annotations.ANNO_STRIMZI_IO_FORCE_RENEW, "true"); + SecretUtils.annotateSecret(testStorage.getNamespaceName(), targetClusterCaSecretName, Annotations.ANNO_STRIMZI_IO_FORCE_RENEW, "true", true); if (!Environment.isKRaftModeEnabled()) { RollingUpdateUtils.waitTillComponentHasRolledAndPodsReady(testStorage.getNamespaceName(), controlTargetSelector, 1, controlTargetPods); diff --git a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java index a1059c43b2b..40bb7ea2461 100644 --- a/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java +++ b/systemtest/src/test/java/io/strimzi/systemtest/rollingupdate/AlternativeReconcileTriggersST.java @@ -52,6 +52,7 @@ import java.util.Base64; import java.util.Collections; import java.util.Map; +import java.util.function.Function; import java.util.stream.Collectors; import static io.strimzi.systemtest.TestConstants.INTERNAL_CLIENTS_USED; @@ -148,10 +149,16 @@ void testManualTriggeringRollingUpdate() { RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getBrokerSelector(), 3, brokerPods); - // wait when annotation will be removed from kafka + // checks that annotation is absent or "false" + Function annotationAbsentOrFalse = (String resource) -> { + var annotationValue = StrimziPodSetUtils + .getAnnotationsOfStrimziPodSet(testStorage.getNamespaceName(), testStorage.getBrokerComponentName()) + .getOrDefault(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE, "true"); + return "false".equalsIgnoreCase(annotationValue); + }; + TestUtils.waitFor("CO removes rolling update annotation", TestConstants.WAIT_FOR_ROLLING_UPDATE_INTERVAL, TestConstants.GLOBAL_TIMEOUT, - () -> StrimziPodSetUtils.getAnnotationsOfStrimziPodSet(testStorage.getNamespaceName(), testStorage.getBrokerComponentName()) == null - || !StrimziPodSetUtils.getAnnotationsOfStrimziPodSet(testStorage.getNamespaceName(), testStorage.getBrokerComponentName()).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)); + () -> annotationAbsentOrFalse.apply(testStorage.getNamespaceName())); resourceManager.createResourceWithWait(clients.consumerTlsStrimzi(testStorage.getClusterName())); ClientUtils.waitForConsumerClientSuccess(testStorage); @@ -169,10 +176,9 @@ void testManualTriggeringRollingUpdate() { RollingUpdateUtils.waitTillComponentHasRolled(testStorage.getNamespaceName(), testStorage.getControllerSelector(), 3, controllerPods); - // wait when annotation will be removed + // wait when annotation will be removed / updated to false TestUtils.waitFor("CO removes rolling update annotation", TestConstants.WAIT_FOR_ROLLING_UPDATE_INTERVAL, TestConstants.GLOBAL_TIMEOUT, - () -> StrimziPodSetUtils.getAnnotationsOfStrimziPodSet(testStorage.getNamespaceName(), testStorage.getControllerComponentName()) == null - || !StrimziPodSetUtils.getAnnotationsOfStrimziPodSet(testStorage.getNamespaceName(), testStorage.getControllerComponentName()).containsKey(Annotations.ANNO_STRIMZI_IO_MANUAL_ROLLING_UPDATE)); + () -> annotationAbsentOrFalse.apply(testStorage.getNamespaceName())); } clients = new KafkaClientsBuilder(clients) diff --git a/user-operator/src/main/java/io/strimzi/operator/user/FeatureGates.java b/user-operator/src/main/java/io/strimzi/operator/user/FeatureGates.java new file mode 100644 index 00000000000..c7aa6f97eaa --- /dev/null +++ b/user-operator/src/main/java/io/strimzi/operator/user/FeatureGates.java @@ -0,0 +1,47 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.user; + +import io.strimzi.operator.common.config.FeatureGate; +import io.strimzi.operator.common.config.FeatureGatesParser; + +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Class for handling the configuration of feature gates + */ +public class FeatureGates { + private static final String USE_SERVER_SIDE_APPLY = "UseServerSideApply"; + + private final Map featureGates = Map.ofEntries( + Map.entry(USE_SERVER_SIDE_APPLY, new FeatureGate(USE_SERVER_SIDE_APPLY, false)) + ); + + /** + * Constructs the feature gates configuration. + * + * @param featureGateConfig String with comma separated list of enabled or disabled feature gates + */ + public FeatureGates(String featureGateConfig) { + new FeatureGatesParser(featureGateConfig).applyFor(featureGates); + } + + /** + * @return Returns true when the UseServerSideApply feature gate is enabled + */ + public boolean useServerSideApply() { + return featureGates.get(USE_SERVER_SIDE_APPLY).isEnabled(); + } + + @Override + public String toString() { + String featureGatesValues = featureGates.entrySet() + .stream() + .map(featureGate -> featureGate.getKey() + "=" + featureGate.getValue().isEnabled()) + .collect(Collectors.joining(",")); + return "FeatureGates(%s)".formatted(featureGatesValues); + } +} diff --git a/user-operator/src/main/java/io/strimzi/operator/user/Main.java b/user-operator/src/main/java/io/strimzi/operator/user/Main.java index 32607908cd7..9571e23d646 100644 --- a/user-operator/src/main/java/io/strimzi/operator/user/Main.java +++ b/user-operator/src/main/java/io/strimzi/operator/user/Main.java @@ -71,14 +71,15 @@ public static void main(String[] args) { // Create and log UserOperatorConfig UserOperatorConfig config = UserOperatorConfig.buildFromMap(System.getenv()); + FeatureGates featureGates = config.featureGates(); LOGGER.info("UserOperator configuration is {}", config); // Create KubernetesClient, AdminClient and KafkaUserOperator classes ExecutorService kafkaUserOperatorExecutor = Executors.newFixedThreadPool(config.getUserOperationsThreadPoolSize(), new OperatorWorkThreadFactory()); KubernetesClient client = new OperatorKubernetesClientBuilder("strimzi-user-operator", Main.class.getPackage().getImplementationVersion()).build(); - SecretOperator secretOperator = new SecretOperator(kafkaUserOperatorExecutor, client); + SecretOperator secretOperator = new SecretOperator(kafkaUserOperatorExecutor, client, featureGates.useServerSideApply()); Admin adminClient = createAdminClient(config, secretOperator, new DefaultAdminClientProvider()); - var kafkaUserCrdOperator = new CrdOperator<>(kafkaUserOperatorExecutor, client, KafkaUser.class, KafkaUserList.class, "KafkaUser"); + var kafkaUserCrdOperator = new CrdOperator<>(kafkaUserOperatorExecutor, client, KafkaUser.class, KafkaUserList.class, "KafkaUser", featureGates.useServerSideApply()); KafkaUserOperator kafkaUserOperator = new KafkaUserOperator( config, diff --git a/user-operator/src/main/java/io/strimzi/operator/user/UserOperatorConfig.java b/user-operator/src/main/java/io/strimzi/operator/user/UserOperatorConfig.java index 3cd5f05b566..d59f46fa036 100644 --- a/user-operator/src/main/java/io/strimzi/operator/user/UserOperatorConfig.java +++ b/user-operator/src/main/java/io/strimzi/operator/user/UserOperatorConfig.java @@ -6,6 +6,7 @@ import io.strimzi.operator.common.model.Labels; import io.strimzi.operator.common.operator.resource.ConfigParameter; +import io.strimzi.operator.common.operator.resource.ConfigParameterParser; import java.util.Collections; import java.util.HashMap; @@ -128,6 +129,11 @@ public class UserOperatorConfig { */ public static final ConfigParameter> MAINTENANCE_TIME_WINDOWS = new ConfigParameter<>("STRIMZI_MAINTENANCE_TIME_WINDOWS", SEMICOLON_SEPARATED_LIST, "", CONFIG_VALUES); + /** + * Configuration string with feature gates settings + */ + public static final ConfigParameter FEATURE_GATES = new ConfigParameter<>("STRIMZI_FEATURE_GATES", parseFeatureGates(), "", CONFIG_VALUES); + private final Map map; /** @@ -154,6 +160,10 @@ public static UserOperatorConfig buildFromMap(Map map) { return new UserOperatorConfig(generatedMap); } + static ConfigParameterParser parseFeatureGates() { + return FeatureGates::new; + } + /** * @return Set of configuration key/names */ @@ -366,6 +376,12 @@ public int getClientsCaRenewalDays() { return get(CERTS_RENEWAL_DAYS); } + /** + * @return Feature gates configuration + */ + public FeatureGates featureGates() { + return get(FEATURE_GATES); + } @Override public String toString() { diff --git a/user-operator/src/test/java/io/strimzi/operator/user/UserControllerMockTest.java b/user-operator/src/test/java/io/strimzi/operator/user/UserControllerMockTest.java index 6cf29b3b924..b480ffbc9a2 100644 --- a/user-operator/src/test/java/io/strimzi/operator/user/UserControllerMockTest.java +++ b/user-operator/src/test/java/io/strimzi/operator/user/UserControllerMockTest.java @@ -34,7 +34,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; @@ -81,8 +80,8 @@ public void beforeEach(TestInfo testInfo) { namespace = testInfo.getTestMethod().orElseThrow().getName().toLowerCase(Locale.ROOT); mockKube.prepareNamespace(namespace); - secretOperator = new SecretOperator(ForkJoinPool.commonPool(), client); - kafkaUserOps = new CrdOperator<>(ForkJoinPool.commonPool(), client, KafkaUser.class, KafkaUserList.class, "KafkaUser"); + secretOperator = new SecretOperator(ForkJoinPool.commonPool(), client, false); + kafkaUserOps = new CrdOperator<>(ForkJoinPool.commonPool(), client, KafkaUser.class, KafkaUserList.class, "KafkaUser", false); mockKafkaUserOperator = mock(KafkaUserOperator.class); } @@ -458,12 +457,12 @@ private void testReconciliationWithClientErrorStatusUpdate(int errorCode, String return CompletableFuture.completedFuture(status); }); - AtomicBoolean statusUpdateInvoked = new AtomicBoolean(false); + var statusUpdateInvokedLatch = new CountDownLatch(1); var spiedKafkaUserOps = spy(kafkaUserOps); doAnswer(i -> { KubernetesClientException error = new KubernetesClientException(errorDescription + " (expected)", errorCode, null); - statusUpdateInvoked.set(true); + statusUpdateInvokedLatch.countDown(); return CompletableFuture.failedStage(error); }).when(spiedKafkaUserOps).updateStatusAsync(any(), any()); @@ -481,7 +480,13 @@ private void testReconciliationWithClientErrorStatusUpdate(int errorCode, String // Test try { kafkaUserOps.resource(namespace, ResourceUtils.createKafkaUserTls(namespace)).create(); - kafkaUserOps.resource(namespace, NAME).waitUntilCondition(i -> statusUpdateInvoked.get(), 10_000, TimeUnit.MILLISECONDS); + kafkaUserOps.resource(namespace, NAME).waitUntilCondition(i -> { + try { + return statusUpdateInvokedLatch.await(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + return false; + } + }, 10_000, TimeUnit.MILLISECONDS); KafkaUser user = kafkaUserOps.get(namespace, NAME); @@ -513,10 +518,10 @@ void testReconciliationWithRuntimeErrorStatusUpdate() { }); var spiedKafkaUserOps = spy(kafkaUserOps); - AtomicBoolean statusUpdateInvoked = new AtomicBoolean(false); + var statusUpdateInvokedLatch = new CountDownLatch(1); doAnswer(i -> { - statusUpdateInvoked.set(true); + statusUpdateInvokedLatch.countDown(); return CompletableFuture.failedStage(new RuntimeException("Test exception (expected)")); }).when(spiedKafkaUserOps).updateStatusAsync(any(), any()); @@ -534,7 +539,13 @@ void testReconciliationWithRuntimeErrorStatusUpdate() { // Test try { kafkaUserOps.resource(namespace, ResourceUtils.createKafkaUserTls(namespace)).create(); - kafkaUserOps.resource(namespace, NAME).waitUntilCondition(i -> statusUpdateInvoked.get(), 10_000, TimeUnit.MILLISECONDS); + kafkaUserOps.resource(namespace, NAME).waitUntilCondition(i -> { + try { + return statusUpdateInvokedLatch.await(10, TimeUnit.SECONDS); + } catch (InterruptedException e) { + return false; + } + }, 10_000, TimeUnit.MILLISECONDS); KafkaUser user = kafkaUserOps.get(namespace, NAME); diff --git a/user-operator/src/test/java/io/strimzi/operator/user/UserOperatorConfigTest.java b/user-operator/src/test/java/io/strimzi/operator/user/UserOperatorConfigTest.java index 9a558b3717f..862bbf66959 100644 --- a/user-operator/src/test/java/io/strimzi/operator/user/UserOperatorConfigTest.java +++ b/user-operator/src/test/java/io/strimzi/operator/user/UserOperatorConfigTest.java @@ -32,6 +32,7 @@ public class UserOperatorConfigTest { ENV_VARS.put(UserOperatorConfig.CERTS_RENEWAL_DAYS.key(), "10"); ENV_VARS.put(UserOperatorConfig.ACLS_ADMIN_API_SUPPORTED.key(), "false"); ENV_VARS.put(UserOperatorConfig.SCRAM_SHA_PASSWORD_LENGTH.key(), "20"); + ENV_VARS.put(UserOperatorConfig.FEATURE_GATES.key(), "+UseServerSideApply"); Map labels = new HashMap<>(2); @@ -65,6 +66,14 @@ public void testFromMap() { assertThat(config.getUserOperationsThreadPoolSize(), is(4)); } + @Test + public void shouldParseCorrectlyFeatureGates() { + UserOperatorConfig config = UserOperatorConfig.buildFromMap(ENV_VARS); + FeatureGates featureGates = config.featureGates(); + + assertThat(featureGates.useServerSideApply(), is(true)); + } + @Test public void testFromMapNamespaceEnvVarMissingThrows() { Map envVars = new HashMap<>(UserOperatorConfigTest.ENV_VARS); diff --git a/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorMockTest.java b/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorMockTest.java index 3cf3da389bf..f453eed5627 100644 --- a/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorMockTest.java +++ b/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorMockTest.java @@ -63,7 +63,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -public class KafkaUserOperatorMockTest { +public abstract class KafkaUserOperatorMockTest { private final static ExecutorService EXECUTOR = Executors.newSingleThreadExecutor(); private final CertManager mockCertManager = new MockCertManager(); @@ -107,8 +107,8 @@ public void beforeEach(TestInfo testInfo) { namespace = testInfo.getTestMethod().orElseThrow().getName().toLowerCase(Locale.ROOT); mockKube.prepareNamespace(namespace); - secretOps = new SecretOperator(EXECUTOR, client); - kafkaUserOps = new CrdOperator<>(EXECUTOR, client, KafkaUser.class, KafkaUserList.class, "KafkaUser"); + secretOps = new SecretOperator(EXECUTOR, client, getSSA()); + kafkaUserOps = new CrdOperator<>(EXECUTOR, client, KafkaUser.class, KafkaUserList.class, "KafkaUser", getSSA()); mockCaSecrets(); mockKafka(); @@ -119,6 +119,8 @@ public void afterEach() { client.namespaces().withName(namespace).delete(); } + protected abstract boolean getSSA(); + private void mockCaSecrets() { secretOps.resource(namespace, ResourceUtils.createClientsCaCertSecret(namespace)).create(); secretOps.resource(namespace, ResourceUtils.createClientsCaKeySecret(namespace)).create(); @@ -169,7 +171,6 @@ private void mockKafka() { when(quotasOps.getAllUsers()).thenReturn(CompletableFuture.completedStage(Set.of("quotas-user-1", "quotas-user-2"))); } - @Test public void testCreateTlsUser() throws ExecutionException, InterruptedException { KafkaUser user = ResourceUtils.createKafkaUserTls(namespace); user = Crds.kafkaUserOperation(client).resource(user).create(); @@ -240,8 +241,8 @@ public void testCreateTlsUser() throws ExecutionException, InterruptedException public void testCreateExternalTlsUser() throws ExecutionException, InterruptedException { KafkaUser user = new KafkaUserBuilder(ResourceUtils.createKafkaUserTls(namespace)) .editSpec() - .withNewKafkaUserTlsExternalClientAuthentication() - .endKafkaUserTlsExternalClientAuthentication() + .withNewKafkaUserTlsExternalClientAuthentication() + .endKafkaUserTlsExternalClientAuthentication() .endSpec() .build(); user = Crds.kafkaUserOperation(client).resource(user).create(); @@ -300,16 +301,16 @@ public void testCreateExternalTlsUser() throws ExecutionException, InterruptedEx public void testCreateTlsUserWithACLsDisabled() throws ExecutionException, InterruptedException { KafkaUser user = new KafkaUserBuilder() .withNewMetadata() - .withName(ResourceUtils.NAME) - .withNamespace(namespace) + .withName(ResourceUtils.NAME) + .withNamespace(namespace) .endMetadata() .withNewSpec() - .withNewKafkaUserTlsClientAuthentication() - .endKafkaUserTlsClientAuthentication() - .withNewQuotas() - .withConsumerByteRate(1_024 * 1_024) - .withProducerByteRate(1_024 * 1_024) - .endQuotas() + .withNewKafkaUserTlsClientAuthentication() + .endKafkaUserTlsClientAuthentication() + .withNewQuotas() + .withConsumerByteRate(1_024 * 1_024) + .withProducerByteRate(1_024 * 1_024) + .endQuotas() .endSpec() .build(); user = Crds.kafkaUserOperation(client).resource(user).create(); @@ -511,7 +512,7 @@ public void testCreateScramShaUserWithProvidedPassword() throws ExecutionExcepti final String desiredPassword = "12345678"; Secret desiredPasswordSecret = new SecretBuilder() .withNewMetadata() - .withName("my-secret") + .withName("my-secret") .withNamespace(namespace) .endMetadata() .addToData("my-password", Base64.getEncoder().encodeToString(desiredPassword.getBytes(StandardCharsets.UTF_8))) @@ -519,16 +520,16 @@ public void testCreateScramShaUserWithProvidedPassword() throws ExecutionExcepti secretOps.resource(namespace, desiredPasswordSecret).create(); KafkaUser user = new KafkaUserBuilder(ResourceUtils.createKafkaUserScramSha(namespace)) - .editSpec() + .editSpec() .withNewKafkaUserScramSha512ClientAuthentication() - .withNewPassword() - .withNewValueFrom() - .withNewSecretKeyRef("my-password", "my-secret", false) - .endValueFrom() - .endPassword() + .withNewPassword() + .withNewValueFrom() + .withNewSecretKeyRef("my-password", "my-secret", false) + .endValueFrom() + .endPassword() .endKafkaUserScramSha512ClientAuthentication() - .endSpec() - .build(); + .endSpec() + .build(); user = Crds.kafkaUserOperation(client).resource(user).create(); KafkaUserOperator op = new KafkaUserOperator(ResourceUtils.createUserOperatorConfig(namespace), mockCertManager, secretOps, kafkaUserOps, scramOps, quotasOps, aclOps); @@ -595,16 +596,16 @@ public void testCreateScramShaUserWithProvidedPassword() throws ExecutionExcepti @Test public void testCreateScramShaUserWithMissingPassword() { KafkaUser user = new KafkaUserBuilder(ResourceUtils.createKafkaUserScramSha(namespace)) - .editSpec() + .editSpec() .withNewKafkaUserScramSha512ClientAuthentication() - .withNewPassword() - .withNewValueFrom() - .withNewSecretKeyRef("my-password", "my-secret", false) - .endValueFrom() - .endPassword() + .withNewPassword() + .withNewValueFrom() + .withNewSecretKeyRef("my-password", "my-secret", false) + .endValueFrom() + .endPassword() .endKafkaUserScramSha512ClientAuthentication() - .endSpec() - .build(); + .endSpec() + .build(); user = Crds.kafkaUserOperation(client).resource(user).create(); KafkaUserOperator op = new KafkaUserOperator(ResourceUtils.createUserOperatorConfig(namespace), mockCertManager, secretOps, kafkaUserOps, scramOps, quotasOps, aclOps); @@ -837,8 +838,8 @@ public void testUpdateTlsUserToTlsExternal() throws ExecutionException, Interrup KafkaUser user = new KafkaUserBuilder(ResourceUtils.createKafkaUserTls(namespace)) .editSpec() - .withNewKafkaUserTlsExternalClientAuthentication() - .endKafkaUserTlsExternalClientAuthentication() + .withNewKafkaUserTlsExternalClientAuthentication() + .endKafkaUserTlsExternalClientAuthentication() .endSpec() .build(); user = Crds.kafkaUserOperation(client).resource(user).create(); @@ -1314,7 +1315,7 @@ public void testDeleteTlsUserWithSecretPrefix() throws ExecutionException, Inter String secretPrefix = "my-test-"; Secret existingUserSecret = new SecretBuilder(ResourceUtils.createUserSecretTls(namespace)) .editMetadata() - .withName(secretPrefix + ResourceUtils.NAME) + .withName(secretPrefix + ResourceUtils.NAME) .endMetadata() .build(); secretOps.resource(namespace, existingUserSecret).create(); @@ -1479,14 +1480,14 @@ public void testStatusNotReadyWhenACLReconciliationFails() { public void testReconcileAll() throws ExecutionException, InterruptedException { KafkaUser user1 = new KafkaUserBuilder(ResourceUtils.createKafkaUserTls(namespace)) .editMetadata() - .withName("cr-user-1") + .withName("cr-user-1") .endMetadata() .build(); Crds.kafkaUserOperation(client).inNamespace(namespace).resource(user1).create(); KafkaUser user2 = new KafkaUserBuilder(ResourceUtils.createKafkaUserScramSha(namespace)) .editMetadata() - .withName("cr-user-2") + .withName("cr-user-2") .endMetadata() .build(); Crds.kafkaUserOperation(client).inNamespace(namespace).resource(user2).create(); @@ -1507,14 +1508,14 @@ public void testReconcileAll() throws ExecutionException, InterruptedException { public void testReconcileAllWithoutACLs() throws ExecutionException, InterruptedException { KafkaUser user1 = new KafkaUserBuilder(ResourceUtils.createKafkaUserTls(namespace)) .editMetadata() - .withName("cr-user-1") + .withName("cr-user-1") .endMetadata() .build(); Crds.kafkaUserOperation(client).inNamespace(namespace).resource(user1).create(); KafkaUser user2 = new KafkaUserBuilder(ResourceUtils.createKafkaUserScramSha(namespace)) .editMetadata() - .withName("cr-user-2") + .withName("cr-user-2") .endMetadata() .build(); Crds.kafkaUserOperation(client).inNamespace(namespace).resource(user2).create(); diff --git a/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorWithSSAMockTest.java b/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorWithSSAMockTest.java new file mode 100644 index 00000000000..5b855d21c44 --- /dev/null +++ b/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorWithSSAMockTest.java @@ -0,0 +1,12 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.user.operator; + +public class KafkaUserOperatorWithSSAMockTest extends KafkaUserOperatorMockTest { + @Override + protected boolean getSSA() { + return true; + } +} diff --git a/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorWithoutSSAMockTest.java b/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorWithoutSSAMockTest.java new file mode 100644 index 00000000000..c7b6749cd1a --- /dev/null +++ b/user-operator/src/test/java/io/strimzi/operator/user/operator/KafkaUserOperatorWithoutSSAMockTest.java @@ -0,0 +1,12 @@ +/* + * Copyright Strimzi authors. + * License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html). + */ +package io.strimzi.operator.user.operator; + +public class KafkaUserOperatorWithoutSSAMockTest extends KafkaUserOperatorMockTest { + @Override + protected boolean getSSA() { + return false; + } +} \ No newline at end of file