diff --git a/exp/runtime/hooks/api/v1alpha1/common_types.go b/exp/runtime/hooks/api/v1alpha1/common_types.go index c7ace37dc81a..0836ce5cfc3a 100644 --- a/exp/runtime/hooks/api/v1alpha1/common_types.go +++ b/exp/runtime/hooks/api/v1alpha1/common_types.go @@ -20,6 +20,9 @@ import ( "k8s.io/apimachinery/pkg/runtime" ) +// PendingHooksAnnotation is the annotation used to keep a track of pending runtime hooks. +const PendingHooksAnnotation = "hooks.x-cluster.k8s.io/pending-hooks" + // ResponseObject is a runtime object extended with methods to handle response-specific fields. // +kubebuilder:object:generate=false type ResponseObject interface { diff --git a/internal/controllers/topology/cluster/desired_state.go b/internal/controllers/topology/cluster/desired_state.go index 609ff2745a5e..5eb5979581f1 100644 --- a/internal/controllers/topology/cluster/desired_state.go +++ b/internal/controllers/topology/cluster/desired_state.go @@ -35,6 +35,7 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" + "sigs.k8s.io/cluster-api/internal/hooks" tlog "sigs.k8s.io/cluster-api/internal/log" runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog" ) @@ -313,6 +314,33 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc // Nb. We do not return early in the function if the control plane is already at the desired version so as // to know if the control plane is being upgraded. This information // is required when updating the TopologyReconciled condition on the cluster. + + // Let's call the AfterControlPlaneUpgrade now that the control plane is upgraded. + if feature.Gates.Enabled(feature.RuntimeSDK) { + // Call the hook only if it is marked. If it is not marked it means we don't need ot call the + // hook because we didn't go through an upgrade or we already called the hook after the upgrade. + if hooks.IsPending(runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster) { + hookRequest := &runtimehooksv1.AfterControlPlaneUpgradeRequest{ + Cluster: *s.Current.Cluster, + KubernetesVersion: desiredVersion, + } + hookResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{} + if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { + return "", errors.Wrapf(err, "error calling the %s hook", runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade)) + } + s.HookResponseTracker.Add(runtimehooksv1.AfterControlPlaneUpgrade, hookResponse) + if hookResponse.RetryAfterSeconds != 0 { + // We have to block the upgrade of the Machine deployments. + s.UpgradeTracker.MachineDeployments.HoldUpgrades(true) + } else { + // We are done with the hook for now. We don't need to call it anymore. Unmark it. + if err := hooks.MarkAsDone(ctx, r.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil { + return "", errors.Wrapf(err, "failed to unmark the %s hook", runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade)) + } + } + } + } + return *currentVersion, nil } @@ -354,9 +382,15 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc // Cannot pickup the new version right now. Need to try again later. return *currentVersion, nil } + + // We are picking up the new version here. + // Mark the AfterControlPlaneUpgrade and the AfterClusterUpgrade hooks so that we call them once we are done with the upgrade. + if err := hooks.MarkAsPending(ctx, r.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, runtimehooksv1.AfterClusterUpgrade); err != nil { + return "", errors.Wrapf(err, "failed to mark the %s hook", []string{runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade), runtimecatalog.HookName(runtimehooksv1.AfterClusterUpgrade)}) + } } - // Control plane and machine deployments are stable. + // Control plane and machine deployments are stable. All the required hook are called. // Ready to pick up the topology version. return desiredVersion, nil } diff --git a/internal/controllers/topology/cluster/desired_state_test.go b/internal/controllers/topology/cluster/desired_state_test.go index 925793da6a37..0480d136a77d 100644 --- a/internal/controllers/topology/cluster/desired_state_test.go +++ b/internal/controllers/topology/cluster/desired_state_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -37,6 +38,7 @@ import ( "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" + "sigs.k8s.io/cluster-api/internal/hooks" runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" @@ -549,273 +551,660 @@ func TestComputeControlPlane(t *testing.T) { } func TestComputeControlPlaneVersion(t *testing.T) { - defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)() + t.Run("Compute control plane version under various circumstances", func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)() + + // Note: the version used by the machine deployments does + // not affect how we determining the control plane version. + // We only want to know if the machine deployments are stable. + // + // A machine deployment is considered stable if all the following are true: + // - md.spec.replicas == md.status.replicas + // - md.spec.replicas == md.status.updatedReplicas + // - md.spec.replicas == md.status.readyReplicas + // - md.Generation < md.status.observedGeneration + // + // A machine deployment is considered upgrading if any of the above conditions + // is false. + machineDeploymentStable := builder.MachineDeployment("test-namespace", "md1"). + WithGeneration(int64(1)). + WithReplicas(int32(2)). + WithStatus(clusterv1.MachineDeploymentStatus{ + ObservedGeneration: 2, + Replicas: 2, + UpdatedReplicas: 2, + AvailableReplicas: 2, + ReadyReplicas: 2, + }). + Build() + machineDeploymentRollingOut := builder.MachineDeployment("test-namespace", "md2"). + WithGeneration(int64(1)). + WithReplicas(int32(2)). + WithStatus(clusterv1.MachineDeploymentStatus{ + ObservedGeneration: 2, + Replicas: 1, + UpdatedReplicas: 1, + AvailableReplicas: 1, + ReadyReplicas: 1, + }). + Build() - // Note: the version used by the machine deployments does - // not affect how we determining the control plane version. - // We only want to know if the machine deployments are stable. - // - // A machine deployment is considered stable if all the following are true: - // - md.spec.replicas == md.status.replicas - // - md.spec.replicas == md.status.updatedReplicas - // - md.spec.replicas == md.status.readyReplicas - // - md.Generation < md.status.observedGeneration - // - // A machine deployment is considered upgrading if any of the above conditions - // is false. - machineDeploymentStable := builder.MachineDeployment("test-namespace", "md1"). - WithGeneration(int64(1)). - WithReplicas(int32(2)). - WithStatus(clusterv1.MachineDeploymentStatus{ - ObservedGeneration: 2, - Replicas: 2, - UpdatedReplicas: 2, - AvailableReplicas: 2, - ReadyReplicas: 2, - }). - Build() - machineDeploymentRollingOut := builder.MachineDeployment("test-namespace", "md2"). - WithGeneration(int64(1)). - WithReplicas(int32(2)). - WithStatus(clusterv1.MachineDeploymentStatus{ - ObservedGeneration: 2, - Replicas: 1, - UpdatedReplicas: 1, - AvailableReplicas: 1, - ReadyReplicas: 1, - }). - Build() + nonBlockingBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{ + CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ + CommonResponse: runtimehooksv1.CommonResponse{ + Status: runtimehooksv1.ResponseStatusSuccess, + }, + }, + } - nonBlockingBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{ - CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ - CommonResponse: runtimehooksv1.CommonResponse{ - Status: runtimehooksv1.ResponseStatusSuccess, + blockingBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{ + CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ + CommonResponse: runtimehooksv1.CommonResponse{ + Status: runtimehooksv1.ResponseStatusSuccess, + }, + RetryAfterSeconds: int32(10), }, - }, - } + } - blockingBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{ - CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ - CommonResponse: runtimehooksv1.CommonResponse{ - Status: runtimehooksv1.ResponseStatusSuccess, + failureBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{ + CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ + CommonResponse: runtimehooksv1.CommonResponse{ + Status: runtimehooksv1.ResponseStatusFailure, + }, }, - RetryAfterSeconds: int32(10), - }, - } + } - failureBeforeClusterUpgradeResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{ - CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ - CommonResponse: runtimehooksv1.CommonResponse{ - Status: runtimehooksv1.ResponseStatusFailure, + catalog := runtimecatalog.New() + _ = runtimehooksv1.AddToCatalog(catalog) + + beforeClusterUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterUpgrade) + if err != nil { + panic("unable to compute GVH") + } + + tests := []struct { + name string + hookResponse *runtimehooksv1.BeforeClusterUpgradeResponse + topologyVersion string + controlPlaneObj *unstructured.Unstructured + machineDeploymentsState scope.MachineDeploymentsStateMap + expectedVersion string + wantErr bool + }{ + { + name: "should return cluster.spec.topology.version if creating a new control plane", + topologyVersion: "v1.2.3", + controlPlaneObj: nil, + expectedVersion: "v1.2.3", }, - }, - } + { + // Control plane is not upgrading implies that controlplane.spec.version is equal to controlplane.status.version. + // Control plane is not scaling implies that controlplane.spec.replicas is equal to controlplane.status.replicas, + // Controlplane.status.updatedReplicas and controlplane.status.readyReplicas. + name: "should return cluster.spec.topology.version if the control plane is not upgrading and not scaling", + hookResponse: nonBlockingBeforeClusterUpgradeResponse, + topologyVersion: "v1.2.3", + controlPlaneObj: builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": "v1.2.2", + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": "v1.2.2", + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build(), + expectedVersion: "v1.2.3", + }, + { + // Control plane is considered upgrading if controlplane.spec.version is not equal to controlplane.status.version. + name: "should return controlplane.spec.version if the control plane is upgrading", + topologyVersion: "v1.2.3", + controlPlaneObj: builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": "v1.2.2", + }). + WithStatusFields(map[string]interface{}{ + "status.version": "v1.2.1", + }). + Build(), + expectedVersion: "v1.2.2", + }, + { + // Control plane is considered scaling if controlplane.spec.replicas is not equal to any of + // controlplane.status.replicas, controlplane.status.readyReplicas, controlplane.status.updatedReplicas. + name: "should return controlplane.spec.version if the control plane is scaling", + topologyVersion: "v1.2.3", + controlPlaneObj: builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": "v1.2.2", + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": "v1.2.2", + "status.replicas": int64(1), + "status.updatedReplicas": int64(1), + "status.readyReplicas": int64(1), + }). + Build(), + expectedVersion: "v1.2.2", + }, + { + name: "should return controlplane.spec.version if control plane is not upgrading and not scaling and one of the machine deployments is rolling out", + topologyVersion: "v1.2.3", + controlPlaneObj: builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": "v1.2.2", + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": "v1.2.2", + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build(), + machineDeploymentsState: scope.MachineDeploymentsStateMap{ + "md1": &scope.MachineDeploymentState{Object: machineDeploymentStable}, + "md2": &scope.MachineDeploymentState{Object: machineDeploymentRollingOut}, + }, + expectedVersion: "v1.2.2", + }, + { + name: "should return cluster.spec.topology.version if control plane is not upgrading and not scaling and none of the machine deployments are rolling out - hook returns non blocking response", + hookResponse: nonBlockingBeforeClusterUpgradeResponse, + topologyVersion: "v1.2.3", + controlPlaneObj: builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": "v1.2.2", + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": "v1.2.2", + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build(), + machineDeploymentsState: scope.MachineDeploymentsStateMap{ + "md1": &scope.MachineDeploymentState{Object: machineDeploymentStable}, + "md2": &scope.MachineDeploymentState{Object: machineDeploymentStable}, + }, + expectedVersion: "v1.2.3", + }, + { + name: "should return the controlplane.spec.version if the BeforeClusterUpgrade hooks returns a blocking response", + hookResponse: blockingBeforeClusterUpgradeResponse, + topologyVersion: "v1.2.3", + controlPlaneObj: builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": "v1.2.2", + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": "v1.2.2", + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build(), + machineDeploymentsState: scope.MachineDeploymentsStateMap{ + "md1": &scope.MachineDeploymentState{Object: machineDeploymentStable}, + "md2": &scope.MachineDeploymentState{Object: machineDeploymentStable}, + }, + expectedVersion: "v1.2.2", + }, + { + name: "should fail if the BeforeClusterUpgrade hooks returns a failure response", + hookResponse: failureBeforeClusterUpgradeResponse, + topologyVersion: "v1.2.3", + controlPlaneObj: builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": "v1.2.2", + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": "v1.2.2", + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build(), + machineDeploymentsState: scope.MachineDeploymentsStateMap{ + "md1": &scope.MachineDeploymentState{Object: machineDeploymentStable}, + "md2": &scope.MachineDeploymentState{Object: machineDeploymentStable}, + }, + expectedVersion: "v1.2.2", + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - catalog := runtimecatalog.New() - _ = runtimehooksv1.AddToCatalog(catalog) + s := &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{ + Version: tt.topologyVersion, + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }}, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + }, + }, + ControlPlane: &scope.ControlPlaneState{Object: tt.controlPlaneObj}, + MachineDeployments: tt.machineDeploymentsState, + }, + UpgradeTracker: scope.NewUpgradeTracker(), + HookResponseTracker: scope.NewHookResponseTracker(), + } - beforeClusterUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterUpgrade) - if err != nil { - panic("unable to compute GVH") - } + runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). + WithCatalog(catalog). + WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{ + beforeClusterUpgradeGVH: tt.hookResponse, + }). + Build() - tests := []struct { - name string - hookResponse *runtimehooksv1.BeforeClusterUpgradeResponse - topologyVersion string - controlPlaneObj *unstructured.Unstructured - machineDeploymentsState scope.MachineDeploymentsStateMap - expectedVersion string - wantErr bool - }{ - { - name: "should return cluster.spec.topology.version if creating a new control plane", - topologyVersion: "v1.2.3", - controlPlaneObj: nil, - expectedVersion: "v1.2.3", - }, - { - // Control plane is not upgrading implies that controlplane.spec.version is equal to controlplane.status.version. - // Control plane is not scaling implies that controlplane.spec.replicas is equal to controlplane.status.replicas, - // Controlplane.status.updatedReplicas and controlplane.status.readyReplicas. - name: "should return cluster.spec.topology.version if the control plane is not upgrading and not scaling", - hookResponse: nonBlockingBeforeClusterUpgradeResponse, - topologyVersion: "v1.2.3", - controlPlaneObj: builder.ControlPlane("test1", "cp1"). - WithSpecFields(map[string]interface{}{ - "spec.version": "v1.2.2", - "spec.replicas": int64(2), - }). - WithStatusFields(map[string]interface{}{ - "status.version": "v1.2.2", - "status.replicas": int64(2), - "status.updatedReplicas": int64(2), - "status.readyReplicas": int64(2), - }). - Build(), - expectedVersion: "v1.2.3", - }, - { - // Control plane is considered upgrading if controlplane.spec.version is not equal to controlplane.status.version. - name: "should return controlplane.spec.version if the control plane is upgrading", - topologyVersion: "v1.2.3", - controlPlaneObj: builder.ControlPlane("test1", "cp1"). - WithSpecFields(map[string]interface{}{ - "spec.version": "v1.2.2", - }). - WithStatusFields(map[string]interface{}{ - "status.version": "v1.2.1", - }). - Build(), - expectedVersion: "v1.2.2", - }, - { - // Control plane is considered scaling if controlplane.spec.replicas is not equal to any of - // controlplane.status.replicas, controlplane.status.readyReplicas, controlplane.status.updatedReplicas. - name: "should return controlplane.spec.version if the control plane is scaling", - topologyVersion: "v1.2.3", - controlPlaneObj: builder.ControlPlane("test1", "cp1"). - WithSpecFields(map[string]interface{}{ - "spec.version": "v1.2.2", - "spec.replicas": int64(2), - }). - WithStatusFields(map[string]interface{}{ - "status.version": "v1.2.2", - "status.replicas": int64(1), - "status.updatedReplicas": int64(1), - "status.readyReplicas": int64(1), - }). - Build(), - expectedVersion: "v1.2.2", - }, - { - name: "should return controlplane.spec.version if control plane is not upgrading and not scaling and one of the machine deployments is rolling out", - topologyVersion: "v1.2.3", - controlPlaneObj: builder.ControlPlane("test1", "cp1"). - WithSpecFields(map[string]interface{}{ - "spec.version": "v1.2.2", - "spec.replicas": int64(2), - }). - WithStatusFields(map[string]interface{}{ - "status.version": "v1.2.2", - "status.replicas": int64(2), - "status.updatedReplicas": int64(2), - "status.readyReplicas": int64(2), - }). - Build(), - machineDeploymentsState: scope.MachineDeploymentsStateMap{ - "md1": &scope.MachineDeploymentState{Object: machineDeploymentStable}, - "md2": &scope.MachineDeploymentState{Object: machineDeploymentRollingOut}, + fakeClient := fake.NewClientBuilder().WithObjects(s.Current.Cluster).Build() + + r := &Reconciler{ + Client: fakeClient, + APIReader: fakeClient, + RuntimeClient: runtimeClient, + } + version, err := r.computeControlPlaneVersion(ctx, s) + if tt.wantErr { + g.Expect(err).NotTo(BeNil()) + } else { + g.Expect(err).To(BeNil()) + g.Expect(version).To(Equal(tt.expectedVersion)) + } + }) + } + }) + + t.Run("Calling AfterControlPlaneUpgrade hook", func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)() + + catalog := runtimecatalog.New() + _ = runtimehooksv1.AddToCatalog(catalog) + + afterControlPlaneUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.AfterControlPlaneUpgrade) + if err != nil { + panic(err) + } + + blockingResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{ + CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ + RetryAfterSeconds: int32(10), + CommonResponse: runtimehooksv1.CommonResponse{ + Status: runtimehooksv1.ResponseStatusSuccess, + }, }, - expectedVersion: "v1.2.2", - }, - { - name: "should return cluster.spec.topology.version if control plane is not upgrading and not scaling and none of the machine deployments are rolling out", - hookResponse: nonBlockingBeforeClusterUpgradeResponse, - topologyVersion: "v1.2.3", - controlPlaneObj: builder.ControlPlane("test1", "cp1"). - WithSpecFields(map[string]interface{}{ - "spec.version": "v1.2.2", - "spec.replicas": int64(2), - }). - WithStatusFields(map[string]interface{}{ - "status.version": "v1.2.2", - "status.replicas": int64(2), - "status.updatedReplicas": int64(2), - "status.readyReplicas": int64(2), - }). - Build(), - machineDeploymentsState: scope.MachineDeploymentsStateMap{ - "md1": &scope.MachineDeploymentState{Object: machineDeploymentStable}, - "md2": &scope.MachineDeploymentState{Object: machineDeploymentStable}, + } + nonBlockingResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{ + CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ + RetryAfterSeconds: int32(0), + CommonResponse: runtimehooksv1.CommonResponse{ + Status: runtimehooksv1.ResponseStatusSuccess, + }, }, - expectedVersion: "v1.2.3", - }, - { - name: "should return the controlplane.spec.version if the BeforeClusterUpgrade hooks returns a blocking response", - hookResponse: blockingBeforeClusterUpgradeResponse, - topologyVersion: "v1.2.3", - controlPlaneObj: builder.ControlPlane("test1", "cp1"). - WithSpecFields(map[string]interface{}{ - "spec.version": "v1.2.2", - "spec.replicas": int64(2), - }). - WithStatusFields(map[string]interface{}{ - "status.version": "v1.2.2", - "status.replicas": int64(2), - "status.updatedReplicas": int64(2), - "status.readyReplicas": int64(2), - }). - Build(), - machineDeploymentsState: scope.MachineDeploymentsStateMap{ - "md1": &scope.MachineDeploymentState{Object: machineDeploymentStable}, - "md2": &scope.MachineDeploymentState{Object: machineDeploymentStable}, + } + failureResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{ + CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ + CommonResponse: runtimehooksv1.CommonResponse{ + Status: runtimehooksv1.ResponseStatusFailure, + }, }, - expectedVersion: "v1.2.2", - }, - { - name: "should fail if the BeforeClusterUpgrade hooks returns a failure response", - hookResponse: failureBeforeClusterUpgradeResponse, - topologyVersion: "v1.2.3", - controlPlaneObj: builder.ControlPlane("test1", "cp1"). - WithSpecFields(map[string]interface{}{ - "spec.version": "v1.2.2", - "spec.replicas": int64(2), - }). - WithStatusFields(map[string]interface{}{ - "status.version": "v1.2.2", - "status.replicas": int64(2), - "status.updatedReplicas": int64(2), - "status.readyReplicas": int64(2), - }). - Build(), - machineDeploymentsState: scope.MachineDeploymentsStateMap{ - "md1": &scope.MachineDeploymentState{Object: machineDeploymentStable}, - "md2": &scope.MachineDeploymentState{Object: machineDeploymentStable}, + } + + topologyVersion := "v1.2.3" + lowerVersion := "v1.2.2" + controlPlaneStable := builder.ControlPlane("test-ns", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": topologyVersion, + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": topologyVersion, + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build() + + controlPlaneUpgrading := builder.ControlPlane("test-ns", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": topologyVersion, + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": lowerVersion, + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build() + + controlPlaneProvisioning := builder.ControlPlane("test-ns", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": "v1.2.2", + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": "", + }). + Build() + + tests := []struct { + name string + s *scope.Scope + hookResponse *runtimehooksv1.AfterControlPlaneUpgradeResponse + wantMarked bool + wantHookToBeCalled bool + wantAllowMDUpgrades bool + wantErr bool + }{ + { + name: "should not call hook if it is not marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: topologyVersion, + ControlPlane: clusterv1.ControlPlaneTopology{}, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneStable, + }, + }, + UpgradeTracker: scope.NewUpgradeTracker(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + wantMarked: false, + wantHookToBeCalled: false, + wantErr: false, }, - expectedVersion: "v1.2.2", - wantErr: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) + { + name: "should not call hook if the control plane is provisioning - hook is marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: topologyVersion, + ControlPlane: clusterv1.ControlPlaneTopology{}, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterControlPlaneUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneProvisioning, + }, + }, + UpgradeTracker: scope.NewUpgradeTracker(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + wantMarked: true, + wantHookToBeCalled: false, + wantErr: false, + }, + { + name: "should not call hook if the control plane is upgrading - hook is marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: topologyVersion, + ControlPlane: clusterv1.ControlPlaneTopology{}, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterControlPlaneUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneUpgrading, + }, + }, + UpgradeTracker: scope.NewUpgradeTracker(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + wantMarked: true, + wantHookToBeCalled: false, + wantErr: false, + }, + { + name: "should call hook if the control plane is at desired version - non blocking response should unmark hook and allow MD upgrades", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: topologyVersion, + ControlPlane: clusterv1.ControlPlaneTopology{}, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterControlPlaneUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneStable, + }, + }, + UpgradeTracker: scope.NewUpgradeTracker(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + hookResponse: nonBlockingResponse, + wantMarked: false, + wantHookToBeCalled: true, + wantAllowMDUpgrades: true, + wantErr: false, + }, + { + name: "should call hook if the control plane is at desired version - blocking response should leave the hook as marked and block MD upgrades", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: topologyVersion, + ControlPlane: clusterv1.ControlPlaneTopology{}, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterControlPlaneUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneStable, + }, + }, + UpgradeTracker: scope.NewUpgradeTracker(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + hookResponse: blockingResponse, + wantMarked: true, + wantHookToBeCalled: true, + wantAllowMDUpgrades: false, + wantErr: false, + }, + { + name: "should call hook if the control plane is at desired version - failure response should leave the hook as marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + Version: topologyVersion, + ControlPlane: clusterv1.ControlPlaneTopology{}, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterControlPlaneUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneStable, + }, + }, + UpgradeTracker: scope.NewUpgradeTracker(), + HookResponseTracker: scope.NewHookResponseTracker(), + }, + hookResponse: failureResponse, + wantMarked: true, + wantHookToBeCalled: true, + wantErr: true, + }, + } - s := &scope.Scope{ - Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{ - Version: tt.topologyVersion, - ControlPlane: clusterv1.ControlPlaneTopology{ - Replicas: pointer.Int32(2), + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). + WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{ + afterControlPlaneUpgradeGVH: tt.hookResponse, + }). + WithCatalog(catalog). + Build() + + fakeClient := fake.NewClientBuilder().WithObjects(tt.s.Current.Cluster).Build() + + r := &Reconciler{ + Client: fakeClient, + APIReader: fakeClient, + RuntimeClient: fakeRuntimeClient, + } + + _, err := r.computeControlPlaneVersion(ctx, tt.s) + g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.AfterControlPlaneUpgrade) == 1).To(Equal(tt.wantHookToBeCalled)) + g.Expect(hooks.IsPending(runtimehooksv1.AfterControlPlaneUpgrade, tt.s.Current.Cluster)).To(Equal(tt.wantMarked)) + g.Expect(err != nil).To(Equal(tt.wantErr)) + if tt.wantHookToBeCalled && !tt.wantErr { + g.Expect(tt.s.UpgradeTracker.MachineDeployments.AllowUpgrade()).To(Equal(tt.wantAllowMDUpgrades)) + } + }) + } + }) + + t.Run("marking AfterClusterUpgrade and AfterControlPlaneUpgrade hooks", func(t *testing.T) { + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)() + + catalog := runtimecatalog.New() + _ = runtimehooksv1.AddToCatalog(catalog) + beforeClusterUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.BeforeClusterUpgrade) + if err != nil { + panic("unable to compute GVH") + } + beforeClusterUpgradeNonBlockingResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{ + CommonRetryResponse: runtimehooksv1.CommonRetryResponse{ + CommonResponse: runtimehooksv1.CommonResponse{ + Status: runtimehooksv1.ResponseStatusSuccess, + }, + }, + } + + controlPlaneStable := builder.ControlPlane("test-ns", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": "v1.2.2", + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": "v1.2.2", + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build() + + s := &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{Topology: &clusterv1.Topology{ + Version: "v1.2.3", + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }}, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", }, - }}, - Current: &scope.ClusterState{ - Cluster: &clusterv1.Cluster{}, - ControlPlane: &scope.ControlPlaneState{Object: tt.controlPlaneObj}, - MachineDeployments: tt.machineDeploymentsState, }, - UpgradeTracker: scope.NewUpgradeTracker(), - HookResponseTracker: scope.NewHookResponseTracker(), - } + ControlPlane: &scope.ControlPlaneState{Object: controlPlaneStable}, + }, + UpgradeTracker: scope.NewUpgradeTracker(), + HookResponseTracker: scope.NewHookResponseTracker(), + } - runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). - WithCatalog(catalog). - WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{ - beforeClusterUpgradeGVH: tt.hookResponse, - }). - Build() + runtimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). + WithCatalog(catalog). + WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{ + beforeClusterUpgradeGVH: beforeClusterUpgradeNonBlockingResponse, + }). + Build() - r := &Reconciler{ - RuntimeClient: runtimeClient, - } - version, err := r.computeControlPlaneVersion(ctx, s) - if tt.wantErr { - g.Expect(err).NotTo(BeNil()) - } else { - g.Expect(err).To(BeNil()) - g.Expect(version).To(Equal(tt.expectedVersion)) - } - }) - } + fakeClient := fake.NewClientBuilder().WithObjects(s.Current.Cluster).Build() + + r := &Reconciler{ + Client: fakeClient, + APIReader: fakeClient, + RuntimeClient: runtimeClient, + } + + desiredVersion, err := r.computeControlPlaneVersion(ctx, s) + g := NewWithT(t) + g.Expect(err).To(BeNil()) + // When successfully picking up the new version the AfterControlPlaneUpgrade and AfterClusterUpgrade hooks should be marked + g.Expect(desiredVersion).To(Equal("v1.2.3")) + g.Expect(hooks.IsPending(runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster)).To(BeTrue()) + g.Expect(hooks.IsPending(runtimehooksv1.AfterClusterUpgrade, s.Current.Cluster)).To(BeTrue()) + }) } func TestComputeCluster(t *testing.T) { diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index e5c3384788f8..0cc0b0a5a370 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -32,10 +32,14 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" + "sigs.k8s.io/cluster-api/internal/hooks" tlog "sigs.k8s.io/cluster-api/internal/log" + runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog" "sigs.k8s.io/cluster-api/internal/topology/check" ) @@ -60,6 +64,12 @@ func (r *Reconciler) reconcileState(ctx context.Context, s *scope.Scope) error { return err } + if feature.Gates.Enabled(feature.RuntimeSDK) { + if err := r.callAfterHooks(ctx, s); err != nil { + return err + } + } + // Reconcile desired state of the InfrastructureCluster object. if err := r.reconcileInfrastructureCluster(ctx, s); err != nil { return err @@ -179,6 +189,112 @@ func getOwnerReferenceFrom(obj, owner client.Object) *metav1.OwnerReference { return nil } +func (r *Reconciler) callAfterHooks(ctx context.Context, s *scope.Scope) error { + if err := r.callAfterControlPlaneInitialized(ctx, s); err != nil { + return err + } + + if err := r.callAfterClusterUpgrade(ctx, s); err != nil { + return err + } + + return nil +} + +func (r *Reconciler) callAfterControlPlaneInitialized(ctx context.Context, s *scope.Scope) error { + /* + TODO: Working comment - DELETE AFTER: + - If the cluster topology is being created then mark the AfterControlPlaneInitialized hook so that we can call it later. + */ + if s.Current.Cluster.Spec.InfrastructureRef == nil && s.Current.Cluster.Spec.ControlPlaneRef == nil { + if err := hooks.MarkAsPending(ctx, r.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneInitialized); err != nil { + return errors.Wrapf(err, "failed to mark %s hook", runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneInitialized)) + } + } + + if hooks.IsPending(runtimehooksv1.AfterControlPlaneInitialized, s.Current.Cluster) { + if isControlPlaneInitialized(s.Current.Cluster) { + hookRequest := &runtimehooksv1.AfterControlPlaneInitializedRequest{ + Cluster: *s.Current.Cluster, + } + hookResponse := &runtimehooksv1.AfterControlPlaneInitializedResponse{} + if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneInitialized, s.Current.Cluster, hookRequest, hookResponse); err != nil { + return errors.Wrapf(err, "failed to call %s hook", runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneInitialized)) + } + s.HookResponseTracker.Add(runtimehooksv1.AfterControlPlaneInitialized, hookResponse) + if err := hooks.MarkAsDone(ctx, r.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneInitialized); err != nil { + return errors.Wrapf(err, "failed to unmark %s hook", runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneInitialized)) + } + } + } + + return nil +} + +func isControlPlaneInitialized(cluster *clusterv1.Cluster) bool { + for _, condition := range cluster.GetConditions() { + // TODO: Should we check for the ControlPlaneInitialized condition or the ControlPlaneReadyCondition? + // From the description of the hook it looks like it should be the ControlPlaneReadyCondition - but need to double check. + if condition.Type == clusterv1.ControlPlaneInitializedCondition { + if condition.Status == corev1.ConditionTrue { + return true + } + } + } + return false +} + +func (r *Reconciler) callAfterClusterUpgrade(ctx context.Context, s *scope.Scope) error { + /* + TODO: Working comment - DELETE LATER: + - if the AfterClusterUpgrade hook is pending then check that the cluster is fully upgraded. If it is fully upgraded then call the hook. + - A cluster is full upgraded if + - Control plane is not upgrading + - Control plane is not scaling + - Control plane is not pending an upgrade + - MachineDeployments are not currently rolling out + - MAchineDeployments are not about to roll out + - MachineDeployments are not pending an upgrade + */ + if hooks.IsPending(runtimehooksv1.AfterClusterUpgrade, s.Current.Cluster) { + cpUpgrading, err := contract.ControlPlane().IsUpgrading(s.Current.ControlPlane.Object) + if err != nil { + return errors.Wrap(err, "failed to check if control plane is upgrading") + } + + var cpScaling bool + if s.Blueprint.Topology.ControlPlane.Replicas != nil { + cpScaling, err = contract.ControlPlane().IsScaling(s.Current.ControlPlane.Object) + if err != nil { + return errors.Wrap(err, "failed to check if the control plane is scaling") + } + } + + if !cpUpgrading && !cpScaling && !s.UpgradeTracker.ControlPlane.PendingUpgrade && // Control Plane checks + len(s.UpgradeTracker.MachineDeployments.RolloutNames()) == 0 && // Machine deployments are not rollout out or not about to roll out + !s.UpgradeTracker.MachineDeployments.PendingUpgrade() { // Machine Deployments is are not pending an upgrade + // Everything is stable and the cluster can be considered fully upgraded. + hookRequest := &runtimehooksv1.AfterClusterUpgradeRequest{ + Cluster: *s.Current.Cluster, + KubernetesVersion: s.Current.Cluster.Spec.Topology.Version, + } + hookResponse := &runtimehooksv1.AfterClusterUpgradeResponse{} + if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterClusterUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { + return errors.Wrapf(err, "failed to call %s hook", runtimecatalog.HookName(runtimehooksv1.AfterClusterUpgrade)) + } + s.HookResponseTracker.Add(runtimehooksv1.AfterClusterUpgrade, hookResponse) + // The hook is successfully called. We can unmark the hook. + // TODO: follow up check - what if the cluster object in current is not updated with the latest tracking annotation. + // Is that possible? + if err := hooks.MarkAsDone(ctx, r.Client, s.Current.Cluster, runtimehooksv1.AfterClusterUpgrade); err != nil { + return errors.Wrapf(err, "failed to unmark the %s hook", runtimecatalog.HookName(runtimehooksv1.AfterClusterUpgrade)) + } + } + } + + return nil +} + // reconcileInfrastructureCluster reconciles the desired state of the InfrastructureCluster object. func (r *Reconciler) reconcileInfrastructureCluster(ctx context.Context, s *scope.Scope) error { ctx, _ = tlog.LoggerFrom(ctx).WithObject(s.Desired.InfrastructureCluster).Into(ctx) diff --git a/internal/controllers/topology/cluster/reconcile_state_test.go b/internal/controllers/topology/cluster/reconcile_state_test.go index 6c14652fb496..cb8a2d87d7bc 100644 --- a/internal/controllers/topology/cluster/reconcile_state_test.go +++ b/internal/controllers/topology/cluster/reconcile_state_test.go @@ -31,12 +31,18 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" + "sigs.k8s.io/cluster-api/internal/hooks" + runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog" + fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" . "sigs.k8s.io/cluster-api/internal/test/matchers" ) @@ -269,6 +275,583 @@ func TestReconcileShim(t *testing.T) { }) } +func TestReconcile_callAfterControlPlaneInitialized(t *testing.T) { + catalog := runtimecatalog.New() + _ = runtimehooksv1.AddToCatalog(catalog) + + afterControlPlaneInitializedGVH, err := catalog.GroupVersionHook(runtimehooksv1.AfterControlPlaneInitialized) + if err != nil { + panic(err) + } + + successResponse := &runtimehooksv1.AfterControlPlaneInitializedResponse{ + + CommonResponse: runtimehooksv1.CommonResponse{ + Status: runtimehooksv1.ResponseStatusSuccess, + }, + } + failureResponse := &runtimehooksv1.AfterControlPlaneInitializedResponse{ + CommonResponse: runtimehooksv1.CommonResponse{ + Status: runtimehooksv1.ResponseStatusFailure, + }, + } + + tests := []struct { + name string + cluster *clusterv1.Cluster + hookResponse *runtimehooksv1.AfterControlPlaneInitializedResponse + wantMarked bool + wantHookToBeCalled bool + wantError bool + }{ + { + name: "hook should be marked if the cluster is about to be created", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + }, + Spec: clusterv1.ClusterSpec{}, + }, + hookResponse: successResponse, + wantMarked: true, + wantHookToBeCalled: false, + wantError: false, + }, + { + name: "hook should be called if it is marked and the control plane is ready - the hook should become unmarked for a success response", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterControlPlaneInitialized", + }, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{}, + InfrastructureRef: &corev1.ObjectReference{}, + }, + Status: clusterv1.ClusterStatus{ + Conditions: clusterv1.Conditions{ + clusterv1.Condition{ + Type: clusterv1.ControlPlaneInitializedCondition, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + hookResponse: successResponse, + wantMarked: false, + wantHookToBeCalled: true, + wantError: false, + }, + { + name: "hook should be called if it is marked and the control plane is ready - the hook should remain marked for a failure response", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterControlPlaneInitialized", + }, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{}, + InfrastructureRef: &corev1.ObjectReference{}, + }, + Status: clusterv1.ClusterStatus{ + Conditions: clusterv1.Conditions{ + clusterv1.Condition{ + Type: clusterv1.ControlPlaneInitializedCondition, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + hookResponse: failureResponse, + wantMarked: true, + wantHookToBeCalled: true, + wantError: true, + }, + { + name: "hook should not be called if it is marked and the control plane is not ready - the hook should remain marked", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterControlPlaneInitialized", + }, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{}, + InfrastructureRef: &corev1.ObjectReference{}, + }, + Status: clusterv1.ClusterStatus{ + Conditions: clusterv1.Conditions{ + clusterv1.Condition{ + Type: clusterv1.ControlPlaneInitializedCondition, + Status: corev1.ConditionFalse, + }, + }, + }, + }, + hookResponse: failureResponse, + wantMarked: true, + wantHookToBeCalled: false, + wantError: false, + }, + { + name: "hook should not be called if it is not marked", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{}, + InfrastructureRef: &corev1.ObjectReference{}, + }, + Status: clusterv1.ClusterStatus{ + Conditions: clusterv1.Conditions{ + clusterv1.Condition{ + Type: clusterv1.ControlPlaneInitializedCondition, + Status: corev1.ConditionTrue, + }, + }, + }, + }, + hookResponse: failureResponse, + wantMarked: false, + wantHookToBeCalled: false, + wantError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + s := &scope.Scope{ + Current: &scope.ClusterState{ + Cluster: tt.cluster, + }, + HookResponseTracker: scope.NewHookResponseTracker(), + } + + fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). + WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{ + afterControlPlaneInitializedGVH: tt.hookResponse, + }). + WithCatalog(catalog). + Build() + + fakeClient := fake.NewClientBuilder().WithObjects(tt.cluster).Build() + + r := &Reconciler{ + Client: fakeClient, + APIReader: fakeClient, + RuntimeClient: fakeRuntimeClient, + } + + err := r.callAfterControlPlaneInitialized(ctx, s) + g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.AfterControlPlaneInitialized) == 1).To(Equal(tt.wantHookToBeCalled)) + g.Expect(hooks.IsPending(runtimehooksv1.AfterControlPlaneInitialized, tt.cluster)).To(Equal(tt.wantMarked)) + g.Expect(err != nil).To(Equal(tt.wantError)) + }) + } +} + +func TestReconcile_callAfterClusterUpgrade(t *testing.T) { + catalog := runtimecatalog.New() + _ = runtimehooksv1.AddToCatalog(catalog) + + afterClusterUpgradeGVH, err := catalog.GroupVersionHook(runtimehooksv1.AfterClusterUpgrade) + if err != nil { + panic(err) + } + + successResponse := &runtimehooksv1.AfterClusterUpgradeResponse{ + + CommonResponse: runtimehooksv1.CommonResponse{ + Status: runtimehooksv1.ResponseStatusSuccess, + }, + } + failureResponse := &runtimehooksv1.AfterClusterUpgradeResponse{ + CommonResponse: runtimehooksv1.CommonResponse{ + Status: runtimehooksv1.ResponseStatusFailure, + }, + } + + topologyVersion := "v1.2.3" + lowerVersion := "v1.2.2" + controlPlaneStableAtTopologyVersion := builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": topologyVersion, + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": topologyVersion, + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build() + controlPlaneStableAtLowerVersion := builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": lowerVersion, + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": lowerVersion, + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build() + controlPlaneUpgrading := builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": topologyVersion, + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": lowerVersion, + "status.replicas": int64(2), + "status.updatedReplicas": int64(2), + "status.readyReplicas": int64(2), + }). + Build() + controlPlaneScaling := builder.ControlPlane("test1", "cp1"). + WithSpecFields(map[string]interface{}{ + "spec.version": topologyVersion, + "spec.replicas": int64(2), + }). + WithStatusFields(map[string]interface{}{ + "status.version": topologyVersion, + "status.replicas": int64(1), + "status.updatedReplicas": int64(1), + "status.readyReplicas": int64(1), + }). + Build() + + tests := []struct { + name string + s *scope.Scope + hookResponse *runtimehooksv1.AfterClusterUpgradeResponse + wantMarked bool + wantHookToBeCalled bool + wantError bool + }{ + { + name: "hook should not be called if it is not marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + }, + Spec: clusterv1.ClusterSpec{}, + }, + }, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: scope.NewUpgradeTracker(), + }, + wantMarked: false, + hookResponse: successResponse, + wantHookToBeCalled: false, + wantError: false, + }, + { + name: "hook should not be called if the control plane is upgrading - hook is marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneUpgrading, + }, + }, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: scope.NewUpgradeTracker(), + }, + wantMarked: true, + hookResponse: successResponse, + wantHookToBeCalled: false, + wantError: false, + }, + { + name: "hook should not be called if the control plane is scaling - hook is marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneScaling, + }, + }, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: scope.NewUpgradeTracker(), + }, + wantMarked: true, + hookResponse: successResponse, + wantHookToBeCalled: false, + wantError: false, + }, + { + name: "hook should not be called if the control plane is stable at a lower version and is pending an upgrade - hook is marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneStableAtLowerVersion, + }, + }, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.PendingUpgrade = true + return ut + }(), + }, + wantMarked: true, + hookResponse: successResponse, + wantHookToBeCalled: false, + wantError: false, + }, + { + name: "hook should not be called if the control plane is stable at desired version but MDs are rolling out - hook is marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneStableAtTopologyVersion, + }, + }, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.PendingUpgrade = false + ut.MachineDeployments.MarkRollingOut("md1") + return ut + }(), + }, + wantMarked: true, + hookResponse: successResponse, + wantHookToBeCalled: false, + wantError: false, + }, + { + name: "hook should not be called if the control plane is stable at desired version but MDs are pending upgrade - hook is marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{}, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneStableAtTopologyVersion, + }, + }, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: func() *scope.UpgradeTracker { + ut := scope.NewUpgradeTracker() + ut.ControlPlane.PendingUpgrade = false + ut.MachineDeployments.MarkPendingUpgrade("md1") + return ut + }(), + }, + wantMarked: true, + hookResponse: successResponse, + wantHookToBeCalled: false, + wantError: false, + }, + { + name: "hook should be called if the control plane and MDs are stable at the topology version - success response should unmark the hook", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{ + Topology: &clusterv1.Topology{ + Version: topologyVersion, + }, + }, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneStableAtTopologyVersion, + }, + }, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: scope.NewUpgradeTracker(), + }, + wantMarked: false, + hookResponse: successResponse, + wantHookToBeCalled: true, + wantError: false, + }, + { + name: "hook should be called if the control plane and MDs are stable at the topology version - failure response should leave the hook marked", + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + Topology: &clusterv1.Topology{ + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: pointer.Int32(2), + }, + }, + }, + Current: &scope.ClusterState{ + Cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + Spec: clusterv1.ClusterSpec{ + Topology: &clusterv1.Topology{ + Version: topologyVersion, + }, + }, + }, + ControlPlane: &scope.ControlPlaneState{ + Object: controlPlaneStableAtTopologyVersion, + }, + }, + HookResponseTracker: scope.NewHookResponseTracker(), + UpgradeTracker: scope.NewUpgradeTracker(), + }, + wantMarked: true, + hookResponse: failureResponse, + wantHookToBeCalled: true, + wantError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + + fakeRuntimeClient := fakeruntimeclient.NewRuntimeClientBuilder(). + WithCallAllExtensionResponses(map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject{ + afterClusterUpgradeGVH: tt.hookResponse, + }). + WithCatalog(catalog). + Build() + + fakeClient := fake.NewClientBuilder().WithObjects(tt.s.Current.Cluster).Build() + + r := &Reconciler{ + Client: fakeClient, + APIReader: fakeClient, + RuntimeClient: fakeRuntimeClient, + } + + err := r.callAfterClusterUpgrade(ctx, tt.s) + g.Expect(fakeRuntimeClient.CallAllCount(runtimehooksv1.AfterClusterUpgrade) == 1).To(Equal(tt.wantHookToBeCalled)) + g.Expect(hooks.IsPending(runtimehooksv1.AfterClusterUpgrade, tt.s.Current.Cluster)).To(Equal(tt.wantMarked)) + g.Expect(err != nil).To(Equal(tt.wantError)) + }) + } +} + func TestReconcileCluster(t *testing.T) { cluster1 := builder.Cluster(metav1.NamespaceDefault, "cluster1"). Build() diff --git a/internal/controllers/topology/cluster/scope/upgradetracker.go b/internal/controllers/topology/cluster/scope/upgradetracker.go index ca9f82613118..f3c52413dd9b 100644 --- a/internal/controllers/topology/cluster/scope/upgradetracker.go +++ b/internal/controllers/topology/cluster/scope/upgradetracker.go @@ -48,6 +48,7 @@ type ControlPlaneUpgradeTracker struct { type MachineDeploymentUpgradeTracker struct { pendingNames sets.String rollingOutNames sets.String + holdUpgrades bool } // NewUpgradeTracker returns an upgrade tracker with empty tracking information. @@ -77,12 +78,21 @@ func (m *MachineDeploymentUpgradeTracker) RolloutNames() []string { return m.rollingOutNames.List() } +// HoldUpgrades is used to set if any subsequent upgrade operations should be paused. +// If HoldUpgrades is called with `true` then AllowUpgrade would return false. +func (m *MachineDeploymentUpgradeTracker) HoldUpgrades(val bool) { + m.holdUpgrades = val +} + // AllowUpgrade returns true if a MachineDeployment is allowed to upgrade, // returns false otherwise. // Note: If AllowUpgrade returns true the machine deployment will pick up // the topology version. This will eventually trigger a machine deployment // rollout. func (m *MachineDeploymentUpgradeTracker) AllowUpgrade() bool { + if m.holdUpgrades { + return false + } return m.rollingOutNames.Len() < maxMachineDeploymentUpgradeConcurrency } diff --git a/internal/hooks/tracking.go b/internal/hooks/tracking.go new file mode 100644 index 000000000000..49317a35e35a --- /dev/null +++ b/internal/hooks/tracking.go @@ -0,0 +1,113 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package hooks has helper functions for Runtime Hooks. +package hooks + +import ( + "context" + "strings" + + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog" + "sigs.k8s.io/cluster-api/util/patch" +) + +// MarkAsPending sets the information on the object to signify that the hook is marked. +func MarkAsPending(ctx context.Context, c client.Client, obj client.Object, hooks ...runtimecatalog.Hook) (retErr error) { + patchHelper, err := patch.NewHelper(obj, c) + if err != nil { + return errors.Wrap(err, "failed to create patch helper") + } + + // read the annotation of the objects and add the hook to the comma separated list + hookNames := []string{} + for _, hook := range hooks { + hookNames = append(hookNames, runtimecatalog.HookName(hook)) + } + annotations := obj.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[runtimehooksv1.PendingHooksAnnotation] = addToCommaSeparatedList(annotations[runtimehooksv1.PendingHooksAnnotation], hookNames...) + obj.SetAnnotations(annotations) + + if err := patchHelper.Patch(ctx, obj); err != nil { + return errors.Wrap(err, "failed to apply patch") + } + + return nil +} + +// IsPending returns true if the hook is marked on the object. +func IsPending(hook runtimecatalog.Hook, obj client.Object) bool { + hookName := runtimecatalog.HookName(hook) + annotations := obj.GetAnnotations() + if annotations == nil { + return false + } + return isInCommaSeparatedList(annotations[runtimehooksv1.PendingHooksAnnotation], hookName) +} + +// MarkAsDone remove the information on the object that represents tha hook is marked. +func MarkAsDone(ctx context.Context, c client.Client, obj client.Object, hooks ...runtimecatalog.Hook) (retErr error) { + patchHelper, err := patch.NewHelper(obj, c) + if err != nil { + return errors.Wrap(err, "failed to create patch helper") + } + + // read the annotation of the objects and add the hook to the comma separated list + hookNames := []string{} + for _, hook := range hooks { + hookNames = append(hookNames, runtimecatalog.HookName(hook)) + } + annotations := obj.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[runtimehooksv1.PendingHooksAnnotation] = removeFromCommaSeparatedList(annotations[runtimehooksv1.PendingHooksAnnotation], hookNames...) + if annotations[runtimehooksv1.PendingHooksAnnotation] == "" { + delete(annotations, runtimehooksv1.PendingHooksAnnotation) + } + obj.SetAnnotations(annotations) + + if err := patchHelper.Patch(ctx, obj); err != nil { + return errors.Wrap(err, "failed to apply patch") + } + + return nil +} + +func addToCommaSeparatedList(list string, items ...string) string { + set := sets.NewString(strings.Split(list, ",")...) + set.Insert(items...) + return strings.Join(set.List(), ",") +} + +func isInCommaSeparatedList(list, item string) bool { + set := sets.NewString(strings.Split(list, ",")...) + return set.Has(item) +} + +func removeFromCommaSeparatedList(list string, items ...string) string { + set := sets.NewString(strings.Split(list, ",")...) + set.Delete(items...) + return strings.Join(set.List(), ",") +} diff --git a/internal/hooks/tracking_test.go b/internal/hooks/tracking_test.go new file mode 100644 index 000000000000..95bcaa236b80 --- /dev/null +++ b/internal/hooks/tracking_test.go @@ -0,0 +1,213 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hooks + +import ( + "context" + "testing" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + runtimecatalog "sigs.k8s.io/cluster-api/internal/runtime/catalog" +) + +func TestIsMarked(t *testing.T) { + tests := []struct { + name string + obj client.Object + hook runtimecatalog.Hook + want bool + }{ + { + name: "should return true if the hook is marked", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + }, + hook: runtimehooksv1.AfterClusterUpgrade, + want: true, + }, + { + name: "should return true if the hook is marked - other hooks are marked too", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade,AfterControlPlaneUpgrade", + }, + }, + }, + hook: runtimehooksv1.AfterClusterUpgrade, + want: true, + }, + { + name: "should return false if the hook is not marked", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + }, + }, + hook: runtimehooksv1.AfterClusterUpgrade, + want: false, + }, + { + name: "should return false if the hook is not marked - other hooks are marked", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterControlPlaneUpgrade", + }, + }, + }, + hook: runtimehooksv1.AfterClusterUpgrade, + want: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + g.Expect(IsPending(tt.hook, tt.obj)).To(Equal(tt.want)) + }) + } +} + +func TestMark(t *testing.T) { + tests := []struct { + name string + obj client.Object + hook runtimecatalog.Hook + }{ + { + name: "should add the marker if not already present", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + }, + }, + hook: runtimehooksv1.AfterClusterUpgrade, + }, + { + name: "should add the marker if not already present - other hooks are present", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterControlPlaneUpgrade", + }, + }, + }, + hook: runtimehooksv1.AfterClusterUpgrade, + }, + { + name: "should pass if the marker is already present", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + }, + hook: runtimehooksv1.AfterClusterUpgrade, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithObjects(tt.obj).Build() + ctx := context.Background() + g.Expect(MarkAsPending(ctx, fakeClient, tt.obj, tt.hook)).To(Succeed()) + annotations := tt.obj.GetAnnotations() + g.Expect(annotations[runtimehooksv1.PendingHooksAnnotation]).To(ContainSubstring(runtimecatalog.HookName(tt.hook))) + }) + } +} + +func TestUnmark(t *testing.T) { + tests := []struct { + name string + obj client.Object + hook runtimecatalog.Hook + }{ + { + name: "should pass if the marker is not already present", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + }, + }, + hook: runtimehooksv1.AfterClusterUpgrade, + }, + { + name: "should remove if the marker is already present", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade", + }, + }, + }, + hook: runtimehooksv1.AfterClusterUpgrade, + }, + { + name: "should remove if the marker is already present among multiple hooks", + obj: &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-ns", + Annotations: map[string]string{ + runtimehooksv1.PendingHooksAnnotation: "AfterClusterUpgrade,AfterControlPlaneUpgrade", + }, + }, + }, + hook: runtimehooksv1.AfterClusterUpgrade, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + fakeClient := fake.NewClientBuilder().WithObjects(tt.obj).Build() + ctx := context.Background() + g.Expect(MarkAsDone(ctx, fakeClient, tt.obj, tt.hook)).To(Succeed()) + annotations := tt.obj.GetAnnotations() + g.Expect(annotations[runtimehooksv1.PendingHooksAnnotation]).NotTo(ContainSubstring(runtimecatalog.HookName(tt.hook))) + }) + } +} diff --git a/internal/runtime/client/fake/fake_client.go b/internal/runtime/client/fake/fake_client.go index 509f76978d5d..2c0aec5ca01d 100644 --- a/internal/runtime/client/fake/fake_client.go +++ b/internal/runtime/client/fake/fake_client.go @@ -69,26 +69,34 @@ func (f *RuntimeClientBuilder) MarkReady(ready bool) *RuntimeClientBuilder { } // Build returns the fake runtime client. -func (f *RuntimeClientBuilder) Build() runtimeclient.Client { - return &runtimeClient{ +func (f *RuntimeClientBuilder) Build() *RuntimeClient { + return &RuntimeClient{ isReady: f.ready, callAllResponses: f.callAllResponses, callResponses: f.callResponses, catalog: f.catalog, + callAllTracker: map[string]int{}, } } -var _ runtimeclient.Client = &runtimeClient{} +var _ runtimeclient.Client = &RuntimeClient{} -type runtimeClient struct { +// RuntimeClient is a fake implementation of runtimeclient.Client. +type RuntimeClient struct { isReady bool catalog *runtimecatalog.Catalog callAllResponses map[runtimecatalog.GroupVersionHook]runtimehooksv1.ResponseObject callResponses map[string]runtimehooksv1.ResponseObject + + callAllTracker map[string]int } // CallAllExtensions implements Client. -func (fc *runtimeClient) CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject metav1.Object, request runtime.Object, response runtimehooksv1.ResponseObject) error { +func (fc *RuntimeClient) CallAllExtensions(ctx context.Context, hook runtimecatalog.Hook, forObject metav1.Object, request runtime.Object, response runtimehooksv1.ResponseObject) error { + defer func() { + fc.callAllTracker[runtimecatalog.HookName(hook)]++ + }() + gvh, err := fc.catalog.GroupVersionHook(hook) if err != nil { return errors.Wrap(err, "failed to compute GVH") @@ -109,7 +117,7 @@ func (fc *runtimeClient) CallAllExtensions(ctx context.Context, hook runtimecata } // CallExtension implements Client. -func (fc *runtimeClient) CallExtension(ctx context.Context, _ runtimecatalog.Hook, _ metav1.Object, name string, request runtime.Object, response runtimehooksv1.ResponseObject) error { +func (fc *RuntimeClient) CallExtension(ctx context.Context, _ runtimecatalog.Hook, _ metav1.Object, name string, request runtime.Object, response runtimehooksv1.ResponseObject) error { expectedResponse, ok := fc.callResponses[name] if !ok { // This should actually panic because an error here would mean a mistake in the test setup. @@ -127,25 +135,31 @@ func (fc *runtimeClient) CallExtension(ctx context.Context, _ runtimecatalog.Hoo } // Discover implements Client. -func (fc *runtimeClient) Discover(context.Context, *runtimev1.ExtensionConfig) (*runtimev1.ExtensionConfig, error) { +func (fc *RuntimeClient) Discover(context.Context, *runtimev1.ExtensionConfig) (*runtimev1.ExtensionConfig, error) { panic("unimplemented") } // IsReady implements Client. -func (fc *runtimeClient) IsReady() bool { +func (fc *RuntimeClient) IsReady() bool { return fc.isReady } // Register implements Client. -func (fc *runtimeClient) Register(extensionConfig *runtimev1.ExtensionConfig) error { +func (fc *RuntimeClient) Register(extensionConfig *runtimev1.ExtensionConfig) error { panic("unimplemented") } // Unregister implements Client. -func (fc *runtimeClient) Unregister(extensionConfig *runtimev1.ExtensionConfig) error { +func (fc *RuntimeClient) Unregister(extensionConfig *runtimev1.ExtensionConfig) error { panic("unimplemented") } -func (fc *runtimeClient) WarmUp(extensionConfigList *runtimev1.ExtensionConfigList) error { +// WarmUp implements Client. +func (fc *RuntimeClient) WarmUp(extensionConfigList *runtimev1.ExtensionConfigList) error { panic("unimplemented") } + +// CallAllCount return the number of times a hooks was called. +func (fc *RuntimeClient) CallAllCount(hook runtimecatalog.Hook) int { + return fc.callAllTracker[runtimecatalog.HookName(hook)] +}