diff --git a/api/v1beta1/machinedeployment_types.go b/api/v1beta1/machinedeployment_types.go index 73e612e577a5..1ada91dc42ec 100644 --- a/api/v1beta1/machinedeployment_types.go +++ b/api/v1beta1/machinedeployment_types.go @@ -54,8 +54,18 @@ const ( // proportions in case the deployment has surge replicas. MaxReplicasAnnotation = "machinedeployment.clusters.x-k8s.io/max-replicas" - // MachineDeploymentUniqueLabel is the label applied to Machines - // in a MachineDeployment containing the hash of the template. + // MachineDeploymentUniqueLabel is used to uniquely identify the Machines of a MachineSet. + // The MachineDeployment controller will set this label on a MachineSet when it is created. + // The label is also applied to the Machines of the MachineSet and used in the MachineSet selector. + // Note: For the lifetime of the MachineSet the label's value has to stay the same, otherwise the + // MachineSet selector would no longer match its Machines. + // Note: In previous Cluster API versions (< v1.4.0), the label value was the hash of the full machine template. + // With the introduction of in-place mutation the machine template of the MachineSet can change. + // Because of that it is impossible that the label's value to always be the hash of the full machine template. + // (Because the hash changes when the machine template changes). + // As a result, we use the hash of the machine template while ignoring all in-place mutable fields, i.e. the + // machine template with only fields that could trigger a rollout for the machine-template-hash, making it + // independent of the changes to any in-place mutable fields. MachineDeploymentUniqueLabel = "machine-template-hash" ) diff --git a/internal/controllers/machinedeployment/machinedeployment_controller.go b/internal/controllers/machinedeployment/machinedeployment_controller.go index 8d77fd5742c1..ca0358eb1014 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller.go @@ -37,6 +37,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" + "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -50,6 +51,10 @@ var ( machineDeploymentKind = clusterv1.GroupVersion.WithKind("MachineDeployment") ) +// machineDeploymentManagerName is the manager name used for Server-Side-Apply (SSA) operations +// in the MachineDeployment controller. +const machineDeploymentManagerName = "capi-machinedeployment" + // +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch // +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch // +kubebuilder:rbac:groups=core,resources=nodes,verbs=get;list;watch;create;update;patch;delete @@ -248,6 +253,19 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, } } + // Loop over all MachineSets and cleanup managed fields. + // We do this so that MachineSets that were created/patched before (< v1.4.0) the controller adopted + // Server-Side-Apply (SSA) can also work with SSA. Otherwise, fields would be co-owned by our "old" "manager" and + // "capi-machinedeployment" and then we would not be able to e.g. drop labels and annotations. + // Note: We are cleaning up managed fields for all MachineSets, so we're able to remove this code in a few + // Cluster API releases. If we do this only for selected MachineSets, we would have to keep this code forever. + for idx := range msList { + machineSet := msList[idx] + if err := ssa.CleanUpManagedFieldsForSSAAdoption(ctx, machineSet, machineDeploymentManagerName, r.Client); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to clean up managedFields of MachineSet %s", klog.KObj(machineSet)) + } + } + if d.Spec.Paused { return ctrl.Result{}, r.sync(ctx, d, msList) } @@ -302,7 +320,7 @@ func (r *Reconciler) getMachineSetsForDeployment(ctx context.Context, d *cluster continue } - // Attempt to adopt machine if it meets previous conditions and it has no controller references. + // Attempt to adopt MachineSet if it meets previous conditions and it has no controller references. if metav1.GetControllerOf(ms) == nil { if err := r.adoptOrphan(ctx, d, ms); err != nil { log.Error(err, "Failed to adopt MachineSet into MachineDeployment") diff --git a/internal/controllers/machinedeployment/machinedeployment_controller_test.go b/internal/controllers/machinedeployment/machinedeployment_controller_test.go index 7acbb6c90e9d..351c6959fecc 100644 --- a/internal/controllers/machinedeployment/machinedeployment_controller_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_controller_test.go @@ -18,6 +18,7 @@ package machinedeployment import ( "testing" + "time" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" @@ -31,6 +32,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" + "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -140,15 +142,17 @@ func TestMachineDeploymentReconciler(t *testing.T) { } infraTmpl := &unstructured.Unstructured{ Object: map[string]interface{}{ + "kind": "GenericInfrastructureMachineTemplate", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "md-template", + "namespace": namespace.Name, + }, "spec": map[string]interface{}{ "template": infraResource, }, }, } - infraTmpl.SetKind("GenericInfrastructureMachineTemplate") - infraTmpl.SetAPIVersion("infrastructure.cluster.x-k8s.io/v1beta1") - infraTmpl.SetName("md-template") - infraTmpl.SetNamespace(namespace.Name) t.Log("Creating the infrastructure template") g.Expect(env.Create(ctx, infraTmpl)).To(Succeed()) @@ -264,10 +268,41 @@ func TestMachineDeploymentReconciler(t *testing.T) { }, timeout).Should(BeEquivalentTo(desiredMachineDeploymentReplicas)) // - // Update a MachineDeployment, expect Reconcile to be called and a new MachineSet to appear. + // Update the InfraStructureRef of the MachineDeployment, expect Reconcile to be called and a new MachineSet to appear. // - t.Log("Setting a label on the MachineDeployment") - modifyFunc = func(d *clusterv1.MachineDeployment) { d.Spec.Template.Labels["updated"] = "true" } + + t.Log("Updating the InfrastructureRef on the MachineDeployment") + // Create the InfrastructureTemplate + // Create infrastructure template resource. + infraTmpl2 := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "GenericInfrastructureMachineTemplate", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "md-template-2", + "namespace": namespace.Name, + }, + "spec": map[string]interface{}{ + "template": map[string]interface{}{ + "kind": "GenericInfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{}, + "spec": map[string]interface{}{ + "size": "5xlarge", + }, + }, + }, + }, + } + t.Log("Creating the infrastructure template") + g.Expect(env.Create(ctx, infraTmpl2)).To(Succeed()) + + infraTmpl2Ref := corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "GenericInfrastructureMachineTemplate", + Name: "md-template-2", + } + modifyFunc = func(d *clusterv1.MachineDeployment) { d.Spec.Template.Spec.InfrastructureRef = infraTmpl2Ref } g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) g.Eventually(func() int { if err := env.List(ctx, machineSets, msListOpts...); err != nil { @@ -276,20 +311,73 @@ func TestMachineDeploymentReconciler(t *testing.T) { return len(machineSets.Items) }, timeout).Should(BeEquivalentTo(2)) + // Update the Labels of the MachineDeployment, expect Reconcile to be called and the MachineSet to be updated in-place. + t.Log("Setting a label on the MachineDeployment") + modifyFunc = func(d *clusterv1.MachineDeployment) { d.Spec.Template.Labels["updated"] = "true" } + g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) + g.Eventually(func(g Gomega) { + g.Expect(env.List(ctx, machineSets, msListOpts...)).To(Succeed()) + // Verify we still only have 2 MachineSets. + g.Expect(machineSets.Items).To(HaveLen(2)) + // Verify that the new MachineSet gets the updated labels. + g.Expect(machineSets.Items[0].Spec.Template.Labels).To(HaveKeyWithValue("updated", "true")) + // Verify that the old MachineSet does not get the updated labels. + g.Expect(machineSets.Items[1].Spec.Template.Labels).ShouldNot(HaveKeyWithValue("updated", "true")) + }, timeout).Should(Succeed()) + + // Update the NodeDrainTimout, NodeDeletionTimeout, NodeVolumeDetachTimeout of the MachineDeployment, + // expect the Reconcile to be called and the MachineSet to be updated in-place. + t.Log("Setting NodeDrainTimout, NodeDeletionTimeout, NodeVolumeDetachTimeout on the MachineDeployment") + duration10s := metav1.Duration{Duration: 10 * time.Second} + modifyFunc = func(d *clusterv1.MachineDeployment) { + d.Spec.Template.Spec.NodeDrainTimeout = &duration10s + d.Spec.Template.Spec.NodeDeletionTimeout = &duration10s + d.Spec.Template.Spec.NodeVolumeDetachTimeout = &duration10s + } + g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) + g.Eventually(func(g Gomega) { + g.Expect(env.List(ctx, machineSets, msListOpts...)).Should(Succeed()) + // Verify we still only have 2 MachineSets. + g.Expect(machineSets.Items).To(HaveLen(2)) + // Verify the NodeDrainTimeout value is updated + g.Expect(machineSets.Items[0].Spec.Template.Spec.NodeDrainTimeout).Should(And( + Not(BeNil()), + HaveValue(Equal(duration10s)), + ), "NodeDrainTimout value does not match expected") + // Verify the NodeDeletionTimeout value is updated + g.Expect(machineSets.Items[0].Spec.Template.Spec.NodeDeletionTimeout).Should(And( + Not(BeNil()), + HaveValue(Equal(duration10s)), + ), "NodeDeletionTimeout value does not match expected") + // Verify the NodeVolumeDetachTimeout value is updated + g.Expect(machineSets.Items[0].Spec.Template.Spec.NodeVolumeDetachTimeout).Should(And( + Not(BeNil()), + HaveValue(Equal(duration10s)), + ), "NodeVolumeDetachTimeout value does not match expected") + + // Verify that the old machine set keeps the old values. + g.Expect(machineSets.Items[1].Spec.Template.Spec.NodeDrainTimeout).Should(BeNil()) + g.Expect(machineSets.Items[1].Spec.Template.Spec.NodeDeletionTimeout).Should(BeNil()) + g.Expect(machineSets.Items[1].Spec.Template.Spec.NodeVolumeDetachTimeout).Should(BeNil()) + }).Should(Succeed()) + + // Update the DeletePolicy of the MachineDeployment, + // expect the Reconcile to be called and the MachineSet to be updated in-place. t.Log("Updating deletePolicy on the MachineDeployment") modifyFunc = func(d *clusterv1.MachineDeployment) { d.Spec.Strategy.RollingUpdate.DeletePolicy = pointer.String("Newest") } g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) - g.Eventually(func() string { - if err := env.List(ctx, machineSets, msListOpts...); err != nil { - return "" - } - return machineSets.Items[0].Spec.DeletePolicy - }, timeout).Should(Equal("Newest")) + g.Eventually(func(g Gomega) { + g.Expect(env.List(ctx, machineSets, msListOpts...)).Should(Succeed()) + // Verify we still only have 2 MachineSets. + g.Expect(machineSets.Items).To(HaveLen(2)) + // Verify the DeletePolicy value is updated + g.Expect(machineSets.Items[0].Spec.DeletePolicy).Should(Equal("Newest")) - // Verify that the old machine set retains its delete policy - g.Expect(machineSets.Items[1].Spec.DeletePolicy).To(Equal("Oldest")) + // Verify that the old machine set retains its delete policy + g.Expect(machineSets.Items[1].Spec.DeletePolicy).To(Equal("Oldest")) + }).Should(Succeed()) // Verify that all the MachineSets have the expected OwnerRef. t.Log("Verifying MachineSet owner references") @@ -307,15 +395,15 @@ func TestMachineDeploymentReconciler(t *testing.T) { }, timeout).Should(BeTrue()) t.Log("Locating the newest MachineSet") - var thirdMachineSet *clusterv1.MachineSet + var newestMachineSet *clusterv1.MachineSet for i := range machineSets.Items { ms := &machineSets.Items[i] if ms.UID != secondMachineSet.UID { - thirdMachineSet = ms + newestMachineSet = ms break } } - g.Expect(thirdMachineSet).NotTo(BeNil()) + g.Expect(newestMachineSet).NotTo(BeNil()) t.Log("Verifying the initial MachineSet is deleted") g.Eventually(func() int { @@ -330,7 +418,7 @@ func TestMachineDeploymentReconciler(t *testing.T) { continue } // Skip over Machines controlled by other (previous) MachineSets - if !metav1.IsControlledBy(&m, thirdMachineSet) { + if !metav1.IsControlledBy(&m, newestMachineSet) { continue } providerID := fakeInfrastructureRefReady(m.Spec.InfrastructureRef, infraResource, g) @@ -343,38 +431,10 @@ func TestMachineDeploymentReconciler(t *testing.T) { return len(machineSets.Items) }, timeout*3).Should(BeEquivalentTo(1)) - // - // Update a MachineDeployment spec.Selector.Matchlabels spec.Template.Labels - // expect Reconcile to be called and a new MachineSet to appear - // expect old MachineSets with old labels to be deleted - // - oldLabels := deployment.Spec.Selector.MatchLabels - - // Change labels and selector to a new set of labels which doesn't have any overlap with the previous labels. - newLabels := map[string]string{ - "new-key": "new-value", - clusterv1.ClusterNameLabel: testCluster.Name, - } - - t.Log("Updating MachineDeployment labels") - modifyFunc = func(d *clusterv1.MachineDeployment) { - d.Spec.Selector.MatchLabels = newLabels - d.Spec.Template.Labels = newLabels - } - g.Expect(updateMachineDeployment(ctx, env, deployment, modifyFunc)).To(Succeed()) - - t.Log("Verifying if a new MachineSet with updated labels are created") - g.Eventually(func() int { - listOpts := client.MatchingLabels(newLabels) - if err := env.List(ctx, machineSets, listOpts); err != nil { - return -1 - } - return len(machineSets.Items) - }, timeout).Should(BeEquivalentTo(1)) - newms := machineSets.Items[0] - t.Log("Verifying new MachineSet has desired number of replicas") g.Eventually(func() bool { + g.Expect(env.List(ctx, machineSets, msListOpts...)).Should(Succeed()) + newms := machineSets.Items[0] // Set the all non-deleted machines as ready with a NodeRef, so the MachineSet controller can proceed // to properly set AvailableReplicas. foundMachines := &clusterv1.MachineList{} @@ -392,23 +452,9 @@ func TestMachineDeploymentReconciler(t *testing.T) { fakeMachineNodeRef(&m, providerID, g) } - listOpts := client.MatchingLabels(newLabels) - if err := env.List(ctx, machineSets, listOpts); err != nil { - return false - } - return machineSets.Items[0].Status.Replicas == desiredMachineDeploymentReplicas + return newms.Status.Replicas == desiredMachineDeploymentReplicas }, timeout*5).Should(BeTrue()) - t.Log("Verifying MachineSets with old labels are deleted") - g.Eventually(func() int { - listOpts := client.MatchingLabels(oldLabels) - if err := env.List(ctx, machineSets, listOpts); err != nil { - return -1 - } - - return len(machineSets.Items) - }, timeout*10).Should(BeEquivalentTo(0)) - t.Log("Verifying MachineDeployment has correct Conditions") g.Eventually(func() bool { key := client.ObjectKey{Name: deployment.Name, Namespace: deployment.Namespace} @@ -421,6 +467,183 @@ func TestMachineDeploymentReconciler(t *testing.T) { }) } +func TestMachineDeploymentReconciler_CleanUpManagedFieldsForSSAAdoption(t *testing.T) { + setup := func(t *testing.T, g *WithT) (*corev1.Namespace, *clusterv1.Cluster) { + t.Helper() + + t.Log("Creating the namespace") + ns, err := env.CreateNamespace(ctx, machineDeploymentNamespace) + g.Expect(err).To(BeNil()) + + t.Log("Creating the Cluster") + cluster := &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Namespace: ns.Name, Name: "test-cluster"}} + g.Expect(env.Create(ctx, cluster)).To(Succeed()) + + t.Log("Creating the Cluster Kubeconfig Secret") + g.Expect(env.CreateKubeconfigSecret(ctx, cluster)).To(Succeed()) + + return ns, cluster + } + + teardown := func(t *testing.T, g *WithT, ns *corev1.Namespace, cluster *clusterv1.Cluster) { + t.Helper() + + t.Log("Deleting the Cluster") + g.Expect(env.Delete(ctx, cluster)).To(Succeed()) + t.Log("Deleting the namespace") + g.Expect(env.Delete(ctx, ns)).To(Succeed()) + } + + g := NewWithT(t) + namespace, testCluster := setup(t, g) + defer teardown(t, g, namespace, testCluster) + + labels := map[string]string{ + "foo": "bar", + clusterv1.ClusterNameLabel: testCluster.Name, + } + version := "v1.10.3" + deployment := &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "md-", + Namespace: namespace.Name, + Labels: map[string]string{ + clusterv1.ClusterNameLabel: testCluster.Name, + }, + }, + Spec: clusterv1.MachineDeploymentSpec{ + Paused: true, // Set this to true as we do not want to test the other parts of the reconciler in this test. + ClusterName: testCluster.Name, + MinReadySeconds: pointer.Int32(0), + Replicas: pointer.Int32(2), + RevisionHistoryLimit: pointer.Int32(0), + Selector: metav1.LabelSelector{ + // We're using the same labels for spec.selector and spec.template.labels. + MatchLabels: labels, + }, + Strategy: &clusterv1.MachineDeploymentStrategy{ + Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, + RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ + MaxUnavailable: intOrStrPtr(0), + MaxSurge: intOrStrPtr(1), + DeletePolicy: pointer.String("Oldest"), + }, + }, + Template: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: labels, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: testCluster.Name, + Version: &version, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "GenericInfrastructureMachineTemplate", + Name: "md-template", + }, + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: pointer.String("data-secret-name"), + }, + }, + }, + }, + } + msListOpts := []client.ListOption{ + client.InNamespace(namespace.Name), + client.MatchingLabels(labels), + } + + // Create infrastructure template resource. + infraResource := map[string]interface{}{ + "kind": "GenericInfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{}, + "spec": map[string]interface{}{ + "size": "3xlarge", + }, + } + infraTmpl := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "kind": "GenericInfrastructureMachineTemplate", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "md-template", + "namespace": namespace.Name, + }, + "spec": map[string]interface{}{ + "template": infraResource, + }, + }, + } + t.Log("Creating the infrastructure template") + g.Expect(env.Create(ctx, infraTmpl)).To(Succeed()) + + // Create the MachineDeployment object and expect Reconcile to be called. + t.Log("Creating the MachineDeployment") + g.Expect(env.Create(ctx, deployment)).To(Succeed()) + + // Create a MachineSet for the MachineDeployment. + classicManagerMS := &clusterv1.MachineSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "MachineSet", + APIVersion: clusterv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: deployment.Name + "-" + "classic-ms", + Namespace: testCluster.Namespace, + Labels: labels, + }, + Spec: clusterv1.MachineSetSpec{ + ClusterName: testCluster.Name, + Replicas: pointer.Int32(0), + MinReadySeconds: 0, + Selector: metav1.LabelSelector{ + MatchLabels: labels, + }, + Template: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: labels, + }, + Spec: clusterv1.MachineSpec{ + ClusterName: testCluster.Name, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "GenericInfrastructureMachineTemplate", + Name: "md-template", + }, + Bootstrap: clusterv1.Bootstrap{ + DataSecretName: pointer.String("data-secret-name"), + }, + Version: &version, + }, + }, + }, + } + ssaManagerMS := classicManagerMS.DeepCopy() + ssaManagerMS.Name = deployment.Name + "-" + "ssa-ms" + + // Create one using the "old manager". + g.Expect(env.Create(ctx, classicManagerMS, client.FieldOwner("manager"))).To(Succeed()) + + // Create one using SSA. + g.Expect(env.Patch(ctx, ssaManagerMS, client.Apply, client.FieldOwner(machineDeploymentManagerName), client.ForceOwnership)).To(Succeed()) + + // Verify that for both the MachineSets the ManagedFields are updated. + g.Eventually(func(g Gomega) { + machineSets := &clusterv1.MachineSetList{} + g.Expect(env.List(ctx, machineSets, msListOpts...)).To(Succeed()) + + g.Expect(machineSets.Items).To(HaveLen(2)) + for _, ms := range machineSets.Items { + // Verify the ManagedFields are updated. + g.Expect(ms.GetManagedFields()).Should( + ContainElement(ssa.MatchManagedFieldsEntry(machineDeploymentManagerName, metav1.ManagedFieldsOperationApply))) + g.Expect(ms.GetManagedFields()).ShouldNot( + ContainElement(ssa.MatchManagedFieldsEntry("manager", metav1.ManagedFieldsOperationUpdate))) + } + }).Should(Succeed()) +} + func TestMachineSetToDeployments(t *testing.T) { g := NewWithT(t) diff --git a/internal/controllers/machinedeployment/machinedeployment_rolling.go b/internal/controllers/machinedeployment/machinedeployment_rolling.go index 250a3db8e2c2..a40dd5c2fe6c 100644 --- a/internal/controllers/machinedeployment/machinedeployment_rolling.go +++ b/internal/controllers/machinedeployment/machinedeployment_rolling.go @@ -91,7 +91,7 @@ func (r *Reconciler) reconcileNewMachineSet(ctx context.Context, allMSs []*clust return r.scaleMachineSet(ctx, newMS, *(deployment.Spec.Replicas), deployment) } - newReplicasCount, err := mdutil.NewMSNewReplicas(deployment, allMSs, newMS) + newReplicasCount, err := mdutil.NewMSNewReplicas(deployment, allMSs, *newMS.Spec.Replicas) if err != nil { return err } diff --git a/internal/controllers/machinedeployment/machinedeployment_sync.go b/internal/controllers/machinedeployment/machinedeployment_sync.go index af8935cded70..8436bfb7e33b 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync.go @@ -20,19 +20,23 @@ import ( "context" "fmt" "sort" - "strconv" + "time" + "github.com/go-logr/logr" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" apirand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/retry" "k8s.io/klog/v2" + "k8s.io/utils/pointer" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/internal/controllers/machinedeployment/mdutil" @@ -86,173 +90,284 @@ func (r *Reconciler) getAllMachineSetsAndSyncRevision(ctx context.Context, d *cl return newMS, allOldMSs, nil } -// Returns a machine set that matches the intent of the given deployment. Returns nil if the new machine set doesn't exist yet. -// 1. Get existing new MS (the MS that the given deployment targets, whose machine template is the same as deployment's). -// 2. If there's existing new MS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old MSes. -// 3. If there's no existing new MS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas. -// Note that the machine-template-hash will be added to adopted MSes and machines. -func (r *Reconciler) getNewMachineSet(ctx context.Context, d *clusterv1.MachineDeployment, msList, oldMSs []*clusterv1.MachineSet, createIfNotExisted bool) (*clusterv1.MachineSet, error) { - log := ctrl.LoggerFrom(ctx) - - existingNewMS := mdutil.FindNewMachineSet(d, msList) - - // Calculate the max revision number among all old MSes - maxOldRevision := mdutil.MaxRevision(oldMSs, log) - - // Calculate revision number for this new machine set - newRevision := strconv.FormatInt(maxOldRevision+1, 10) - - // Latest machine set exists. We need to sync its annotations (includes copying all but - // annotationsToSkip from the parent deployment, and update revision, desiredReplicas, - // and maxReplicas) and also update the revision annotation in the deployment with the - // latest revision. - if existingNewMS != nil { - msCopy := existingNewMS.DeepCopy() - patchHelper, err := patch.NewHelper(msCopy, r.Client) +// Returns a MachineSet that matches the intent of the given MachineDeployment. +// If there does not exist such a MachineSet and createIfNotExisted is true, create a new MachineSet. +// If there is already such a MachineSet, update it to propagate in-place mutable fields from the MachineDeployment. +func (r *Reconciler) getNewMachineSet(ctx context.Context, d *clusterv1.MachineDeployment, msList, oldMSs []*clusterv1.MachineSet, createIfNotExists bool) (*clusterv1.MachineSet, error) { + // Try to find a MachineSet which matches the MachineDeployments intent, while ignore diffs between + // the in-place mutable fields. + // If we find a matching MachineSet we just update it to propagate any changes to the in-place mutable + // fields and thus we do not trigger an unnecessary rollout (i.e. create a new MachineSet). + // If we don't find a matching MachineSet, we need a rollout and thus create a new MachineSet. + // Note: The in-place mutable fields can be just updated inline, because they do not affect the actual machines + // themselves (i.e. the infrastructure and the software running on the Machines not the Machine object). + matchingMS := mdutil.FindNewMachineSet(d, msList) + + // If there is a MachineSet that matches the intent of the MachineDeployment, update the MachineSet + // to propagate all in-place mutable fields from MachineDeployment to the MachineSet. + if matchingMS != nil { + updatedMS, err := r.updateMachineSet(ctx, d, matchingMS, oldMSs) if err != nil { return nil, err } - // Set existing new machine set's annotation - annotationsUpdated := mdutil.SetNewMachineSetAnnotations(d, msCopy, newRevision, true, log) - - minReadySecondsNeedsUpdate := msCopy.Spec.MinReadySeconds != *d.Spec.MinReadySeconds - deletePolicyNeedsUpdate := d.Spec.Strategy.RollingUpdate.DeletePolicy != nil && msCopy.Spec.DeletePolicy != *d.Spec.Strategy.RollingUpdate.DeletePolicy - if annotationsUpdated || minReadySecondsNeedsUpdate || deletePolicyNeedsUpdate { - msCopy.Spec.MinReadySeconds = *d.Spec.MinReadySeconds - - if deletePolicyNeedsUpdate { - msCopy.Spec.DeletePolicy = *d.Spec.Strategy.RollingUpdate.DeletePolicy - } - - return nil, patchHelper.Patch(ctx, msCopy) - } - - // Apply revision annotation from existingNewMS if it is missing from the deployment. + // Ensure MachineDeployment has the latest MachineSet revision in its revision annotation. err = r.updateMachineDeployment(ctx, d, func(innerDeployment *clusterv1.MachineDeployment) { - mdutil.SetDeploymentRevision(d, msCopy.Annotations[clusterv1.RevisionAnnotation]) + mdutil.SetDeploymentRevision(d, updatedMS.Annotations[clusterv1.RevisionAnnotation]) }) - return msCopy, err + if err != nil { + return nil, errors.Wrap(err, "failed to update revision annotation on MachineDeployment") + } + return updatedMS, nil } - if !createIfNotExisted { + if !createIfNotExists { return nil, nil } - // new MachineSet does not exist, create one. - newMSTemplate := *d.Spec.Template.DeepCopy() - hash, err := mdutil.ComputeSpewHash(&newMSTemplate) + // Create a new MachineSet and wait until the new MachineSet exists in the cache. + newMS, err := r.createMachineSetAndWait(ctx, d, oldMSs) if err != nil { return nil, err } - machineTemplateSpecHash := fmt.Sprintf("%d", hash) - newMSTemplate.Labels = mdutil.CloneAndAddLabel(d.Spec.Template.Labels, - clusterv1.MachineDeploymentUniqueLabel, machineTemplateSpecHash) - - // Add machineTemplateHash label to selector. - newMSSelector := mdutil.CloneSelectorAndAddLabel(&d.Spec.Selector, - clusterv1.MachineDeploymentUniqueLabel, machineTemplateSpecHash) - minReadySeconds := int32(0) - if d.Spec.MinReadySeconds != nil { - minReadySeconds = *d.Spec.MinReadySeconds + // Ensure MachineDeployment has the latest MachineSet revision in its revision annotation. + err = r.updateMachineDeployment(ctx, d, func(innerDeployment *clusterv1.MachineDeployment) { + mdutil.SetDeploymentRevision(d, newMS.Annotations[clusterv1.RevisionAnnotation]) + }) + if err != nil { + return nil, errors.Wrap(err, "failed to update revision annotation on MachineDeployment") } - // Create new MachineSet - newMS := clusterv1.MachineSet{ - ObjectMeta: metav1.ObjectMeta{ - // Make the name deterministic, to ensure idempotence - Name: d.Name + "-" + apirand.SafeEncodeString(machineTemplateSpecHash), - Namespace: d.Namespace, - Labels: make(map[string]string), - // Note: by setting the ownerRef on creation we signal to the MachineSet controller that this is not a stand-alone MachineSet. - OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, machineDeploymentKind)}, - }, - Spec: clusterv1.MachineSetSpec{ - ClusterName: d.Spec.ClusterName, - Replicas: new(int32), - MinReadySeconds: minReadySeconds, - Selector: *newMSSelector, - Template: newMSTemplate, - }, + return newMS, nil +} + +// updateMachineSet updates an existing MachineSet to propagate in-place mutable fields from the MachineDeployment. +func (r *Reconciler) updateMachineSet(ctx context.Context, deployment *clusterv1.MachineDeployment, ms *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + log := ctrl.LoggerFrom(ctx) + + // Compute the desired MachineSet. + updatedMS, err := r.computeDesiredMachineSet(deployment, ms, oldMSs, log) + if err != nil { + return nil, errors.Wrapf(err, "failed to update MachineSet %q", klog.KObj(ms)) } - // Set the labels from newMSTemplate as top-level labels for the new MS. - // Note: We can't just set `newMSTemplate.Labels` directly and thus "share" the labels map between top-level and - // .spec.template.metadata.labels. This would mean that adding the MachineDeploymentNameLabel later top-level - // would also add the label to .spec.template.metadata.labels. - for k, v := range newMSTemplate.Labels { - newMS.Labels[k] = v + // Update the MachineSet to propagate in-place mutable fields from the MachineDeployment. + patchOptions := []client.PatchOption{ + client.ForceOwnership, + client.FieldOwner(machineDeploymentManagerName), + } + if err := r.Client.Patch(ctx, updatedMS, client.Apply, patchOptions...); err != nil { + r.recorder.Eventf(deployment, corev1.EventTypeWarning, "FailedUpdate", "Failed to update MachineSet %s: %v", klog.KObj(updatedMS), err) + return nil, errors.Wrapf(err, "failed to update MachineSet %s", klog.KObj(updatedMS)) } - // Enforce that the MachineDeploymentNameLabel label is set - // Note: the MachineDeploymentNameLabel is added by the default webhook to MachineDeployment.spec.template.labels if spec.selector is empty. - newMS.Labels[clusterv1.MachineDeploymentNameLabel] = d.Name + log.V(4).Info("Updated MachineSet", "MachineSet", klog.KObj(updatedMS)) + return updatedMS, nil +} + +// createMachineSetAndWait creates a new MachineSet with the desired intent of the MachineDeployment. +// It waits for the cache to be updated with the newly created MachineSet. +func (r *Reconciler) createMachineSetAndWait(ctx context.Context, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet) (*clusterv1.MachineSet, error) { + log := ctrl.LoggerFrom(ctx) - if d.Spec.Strategy.RollingUpdate.DeletePolicy != nil { - newMS.Spec.DeletePolicy = *d.Spec.Strategy.RollingUpdate.DeletePolicy + // Compute the desired MachineSet. + newMS, err := r.computeDesiredMachineSet(deployment, nil, oldMSs, log) + if err != nil { + return nil, errors.Wrap(err, "failed to create new MachineSet") } - // Add foregroundDeletion finalizer to MachineSet if the MachineDeployment has it - finalizerSet := sets.Set[string]{}.Insert(d.Finalizers...) - if finalizerSet.Has(metav1.FinalizerDeleteDependents) { - controllerutil.AddFinalizer(&newMS, metav1.FinalizerDeleteDependents) + // Create the MachineSet. + patchOptions := []client.PatchOption{ + client.ForceOwnership, + client.FieldOwner(machineDeploymentManagerName), } + if err = r.Client.Patch(ctx, newMS, client.Apply, patchOptions...); err != nil { + r.recorder.Eventf(deployment, corev1.EventTypeWarning, "FailedCreate", "Failed to create MachineSet %s: %v", klog.KObj(newMS), err) + return nil, errors.Wrapf(err, "failed to create new MachineSet %s", klog.KObj(newMS)) + } + log.V(4).Info("Created new MachineSet", "MachineSet", klog.KObj(newMS)) + r.recorder.Eventf(deployment, corev1.EventTypeNormal, "SuccessfulCreate", "Created MachineSet %s", klog.KObj(newMS)) - allMSs := append(oldMSs, &newMS) - newReplicasCount, err := mdutil.NewMSNewReplicas(d, allMSs, &newMS) - if err != nil { - return nil, err + // Keep trying to get the MachineSet. This will force the cache to update and prevent any future reconciliation of + // the MachineDeployment to reconcile with an outdated list of MachineSets which could lead to unwanted creation of + // a duplicate MachineSet. + var pollErrors []error + if err := wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { + ms := &clusterv1.MachineSet{} + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(newMS), ms); err != nil { + // Do not return error here. Continue to poll even if we hit an error + // so that we avoid existing because of transient errors like network flakes. + // Capture all the errors and return the aggregate error if the poll fails eventually. + pollErrors = append(pollErrors, err) + return false, nil + } + return true, nil + }); err != nil { + return nil, errors.Wrapf(kerrors.NewAggregate(pollErrors), "failed to get the MachineSet %s after creation", klog.KObj(newMS)) } + return newMS, nil +} - *(newMS.Spec.Replicas) = newReplicasCount +// computeDesiredMachineSet computes the desired MachineSet. +// This MachineSet will be used during reconciliation to: +// * create a MachineSet +// * update an existing MachineSet +// Because we are using Server-Side-Apply we always have to calculate the full object. +// There are small differences in how we calculate the MachineSet depending on if it +// is a create or update. Example: for a new MachineSet we have to calculate a new name, +// while for an existing MachineSet we have to use the name of the existing MachineSet. +func (r *Reconciler) computeDesiredMachineSet(deployment *clusterv1.MachineDeployment, existingMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet, log logr.Logger) (*clusterv1.MachineSet, error) { + var name string + var uid types.UID + var finalizers []string + var uniqueIdentifierLabelValue string + var machineTemplateSpec clusterv1.MachineSpec + var replicas int32 + var err error + + // For a new MachineSet: + // * compute a new uniqueIdentifier, a new MachineSet name, finalizers, replicas and + // machine template spec (take the one from MachineDeployment) + if existingMS == nil { + // Note: In previous Cluster API versions (< v1.4.0), the label value was the hash of the full machine + // template. With the introduction of in-place mutation the machine template of the MachineSet can change. + // Because of that it is impossible that the label's value to always be the hash of the full machine template. + // (Because the hash changes when the machine template changes). + // As a result, we use the hash of the machine template while ignoring all in-place mutable fields, i.e. the + // machine template with only fields that could trigger a rollout for the machine-template-hash, making it + // independent of the changes to any in-place mutable fields. + templateHash, err := mdutil.ComputeSpewHash(mdutil.MachineTemplateDeepCopyRolloutFields(&deployment.Spec.Template)) + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired MachineSet: failed to compute machine template hash") + } + uniqueIdentifierLabelValue = fmt.Sprintf("%d", templateHash) - // Set new machine set's annotation - mdutil.SetNewMachineSetAnnotations(d, &newMS, newRevision, false, log) - // Create the new MachineSet. If it already exists, then we need to check for possible - // hash collisions. If there is any other error, we need to report it in the status of - // the Deployment. - alreadyExists := false - err = r.Client.Create(ctx, &newMS) - createdMS := &newMS - switch { - // We may end up hitting this due to a slow cache or a fast resync of the Deployment. - case apierrors.IsAlreadyExists(err): - alreadyExists = true + name = computeNewMachineSetName(deployment.Name+"-", apirand.SafeEncodeString(uniqueIdentifierLabelValue)) - ms := &clusterv1.MachineSet{} - msErr := r.Client.Get(ctx, client.ObjectKey{Namespace: newMS.Namespace, Name: newMS.Name}, ms) - if msErr != nil { - return nil, msErr + // Add foregroundDeletion finalizer to MachineSet if the MachineDeployment has it. + if sets.New[string](deployment.Finalizers...).Has(metav1.FinalizerDeleteDependents) { + finalizers = []string{metav1.FinalizerDeleteDependents} } - // If the Deployment owns the MachineSet and the MachineSet's MachineTemplateSpec is semantically - // deep equal to the MachineTemplateSpec of the Deployment, it's the Deployment's new MachineSet. - // Otherwise, this is a hash collision and we need to increment the collisionCount field in - // the status of the Deployment and requeue to try the creation in the next sync. - controllerRef := metav1.GetControllerOf(ms) - if controllerRef != nil && controllerRef.UID == d.UID && mdutil.EqualMachineTemplate(&d.Spec.Template, &ms.Spec.Template) { - createdMS = ms - break + replicas, err = mdutil.NewMSNewReplicas(deployment, oldMSs, 0) + if err != nil { + return nil, errors.Wrap(err, "failed to compute desired MachineSet") } - return nil, err - case err != nil: - log.Error(err, "Failed to create new MachineSet", "MachineSet", klog.KObj(&newMS)) - r.recorder.Eventf(d, corev1.EventTypeWarning, "FailedCreate", "Failed to create MachineSet %q: %v", newMS.Name, err) - return nil, err + machineTemplateSpec = *deployment.Spec.Template.Spec.DeepCopy() + } else { + // For updating an existing MachineSet: + // * get the uniqueIdentifier from labels of the existingMS + // * use name, uid, finalizers, replicas and machine template spec from existingMS. + // Note: We use the uid, to ensure that the Server-Side-Apply only updates existingMS. + // Note: We carry over those fields because we don't want to mutate them for an existingMS. + var uniqueIdentifierLabelExists bool + uniqueIdentifierLabelValue, uniqueIdentifierLabelExists = existingMS.Labels[clusterv1.MachineDeploymentUniqueLabel] + if !uniqueIdentifierLabelExists { + return nil, errors.Errorf("failed to compute desired MachineSet: failed to get unique identifier from %q annotation", + clusterv1.MachineDeploymentUniqueLabel) + } + + name = existingMS.Name + uid = existingMS.UID + + // Keep foregroundDeletion finalizer if the existingMS has it. + // Note: This case is a little different from the create case. In the update case we preserve + // the finalizer on the MachineSet if it already exists. Because of SSA we should not build + // the finalizer information from the MachineDeployment when updating a MachineSet because that could lead + // to dropping the finalizer from the MachineSet if it is dropped from the MachineDeployment. + // We should not drop the finalizer on the MachineSet if the finalizer is dropped form the MachineDeployment. + if sets.New[string](existingMS.Finalizers...).Has(metav1.FinalizerDeleteDependents) { + finalizers = []string{metav1.FinalizerDeleteDependents} + } + + replicas = *existingMS.Spec.Replicas + + machineTemplateSpec = *existingMS.Spec.Template.Spec.DeepCopy() + } + + // Construct the basic MachineSet. + desiredMS := &clusterv1.MachineSet{ + TypeMeta: metav1.TypeMeta{ + APIVersion: clusterv1.GroupVersion.String(), + Kind: "MachineSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: deployment.Namespace, + // Note: By setting the ownerRef on creation we signal to the MachineSet controller that this is not a stand-alone MachineSet. + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(deployment, machineDeploymentKind)}, + UID: uid, + Finalizers: finalizers, + }, + Spec: clusterv1.MachineSetSpec{ + Replicas: &replicas, + ClusterName: deployment.Spec.ClusterName, + Template: clusterv1.MachineTemplateSpec{ + Spec: machineTemplateSpec, + }, + }, } - if !alreadyExists { - log.V(4).Info("Created new MachineSet", "MachineSet", klog.KObj(createdMS)) - r.recorder.Eventf(d, corev1.EventTypeNormal, "SuccessfulCreate", "Created MachineSet %q", newMS.Name) + // Set the in-place mutable fields. + // When we create a new MachineSet we will just create the MachineSet with those fields. + // When we update an existing MachineSet will we update the fields on the existing MachineSet (in-place mutate). + + // Set labels and .spec.template.labels. + desiredMS.Labels = mdutil.CloneAndAddLabel(deployment.Spec.Template.Labels, + clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue) + // Always set the MachineDeploymentNameLabel. + // Note: If a client tries to create a MachineDeployment without a selector, the MachineDeployment webhook + // will add this label automatically. But we want this label to always be present even if the MachineDeployment + // has a selector which doesn't include it. Therefore, we have to set it here explicitly. + desiredMS.Labels[clusterv1.MachineDeploymentNameLabel] = deployment.Name + desiredMS.Spec.Template.Labels = mdutil.CloneAndAddLabel(deployment.Spec.Template.Labels, + clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue) + + // Set selector. + desiredMS.Spec.Selector = *mdutil.CloneSelectorAndAddLabel(&deployment.Spec.Selector, clusterv1.MachineDeploymentUniqueLabel, uniqueIdentifierLabelValue) + + // Set annotations and .spec.template.annotations. + if desiredMS.Annotations, err = mdutil.ComputeMachineSetAnnotations(log, deployment, oldMSs, existingMS); err != nil { + return nil, errors.Wrap(err, "failed to compute desired MachineSet: failed to compute annotations") } + desiredMS.Spec.Template.Annotations = cloneStringMap(deployment.Spec.Template.Annotations) - err = r.updateMachineDeployment(ctx, d, func(innerDeployment *clusterv1.MachineDeployment) { - mdutil.SetDeploymentRevision(d, newRevision) - }) + // Set all other in-place mutable fields. + desiredMS.Spec.MinReadySeconds = pointer.Int32Deref(deployment.Spec.MinReadySeconds, 0) + desiredMS.Spec.DeletePolicy = pointer.StringDeref(deployment.Spec.Strategy.RollingUpdate.DeletePolicy, "") + desiredMS.Spec.Template.Spec.NodeDrainTimeout = deployment.Spec.Template.Spec.NodeDrainTimeout + desiredMS.Spec.Template.Spec.NodeDeletionTimeout = deployment.Spec.Template.Spec.NodeDeletionTimeout + desiredMS.Spec.Template.Spec.NodeVolumeDetachTimeout = deployment.Spec.Template.Spec.NodeVolumeDetachTimeout - return createdMS, err + return desiredMS, nil +} + +// cloneStringMap clones a string map. +func cloneStringMap(in map[string]string) map[string]string { + out := map[string]string{} + for k, v := range in { + out[k] = v + } + return out +} + +const ( + maxNameLength = 63 + randomLength = 5 + maxGeneratedNameLength = maxNameLength - randomLength +) + +// computeNewMachineSetName generates a new name for the MachineSet just like +// the upstream SimpleNameGenerator. +// Note: We had to extract the logic as we want to use the MachineSet name suffix as +// unique identifier for the MachineSet. +func computeNewMachineSetName(base, suffix string) string { + if len(base) > maxGeneratedNameLength { + base = base[:maxGeneratedNameLength] + } + return fmt.Sprintf("%s%s", base, suffix) } // scale scales proportionally in order to mitigate risk. Otherwise, scaling up can increase the size diff --git a/internal/controllers/machinedeployment/machinedeployment_sync_test.go b/internal/controllers/machinedeployment/machinedeployment_sync_test.go index b8219b36c382..86b521c88e91 100644 --- a/internal/controllers/machinedeployment/machinedeployment_sync_test.go +++ b/internal/controllers/machinedeployment/machinedeployment_sync_test.go @@ -20,12 +20,16 @@ import ( "context" "fmt" "testing" + "time" . "github.com/onsi/gomega" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + apirand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/client-go/tools/record" + "k8s.io/klog/v2/klogr" "k8s.io/utils/pointer" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -495,6 +499,229 @@ func TestSyncDeploymentStatus(t *testing.T) { } } +func TestComputeDesiredMachineSet(t *testing.T) { + duration5s := &metav1.Duration{Duration: 5 * time.Second} + duration10s := &metav1.Duration{Duration: 10 * time.Second} + + infraRef := corev1.ObjectReference{ + Kind: "GenericInfrastructureMachineTemplate", + Name: "infra-template-1", + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + } + bootstrapRef := corev1.ObjectReference{ + Kind: "GenericBootstrapConfigTemplate", + Name: "bootstrap-template-1", + APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", + } + + deployment := &clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "md1", + Annotations: map[string]string{"top-level-annotation": "top-level-annotation-value"}, + }, + Spec: clusterv1.MachineDeploymentSpec{ + ClusterName: "test-cluster", + Replicas: pointer.Int32(3), + MinReadySeconds: pointer.Int32(10), + Strategy: &clusterv1.MachineDeploymentStrategy{ + Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, + RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ + MaxSurge: intOrStrPtr(1), + DeletePolicy: pointer.String("Random"), + MaxUnavailable: intOrStrPtr(0), + }, + }, + Selector: metav1.LabelSelector{ + MatchLabels: map[string]string{"k1": "v1"}, + }, + Template: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: map[string]string{"machine-label1": "machine-value1"}, + Annotations: map[string]string{"machine-annotation1": "machine-value1"}, + }, + Spec: clusterv1.MachineSpec{ + Version: pointer.String("v1.25.3"), + InfrastructureRef: infraRef, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &bootstrapRef, + }, + NodeDrainTimeout: duration10s, + NodeVolumeDetachTimeout: duration10s, + NodeDeletionTimeout: duration10s, + }, + }, + }, + } + + log := klogr.New() + + skeletonMSBasedOnMD := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Labels: map[string]string{"machine-label1": "machine-value1"}, + Annotations: map[string]string{"top-level-annotation": "top-level-annotation-value"}, + }, + Spec: clusterv1.MachineSetSpec{ + ClusterName: "test-cluster", + Replicas: pointer.Int32(3), + MinReadySeconds: 10, + DeletePolicy: string(clusterv1.RandomMachineSetDeletePolicy), + Selector: metav1.LabelSelector{MatchLabels: map[string]string{"k1": "v1"}}, + Template: *deployment.Spec.Template.DeepCopy(), + }, + } + + t.Run("should compute a new MachineSet when no old MachineSets exist", func(t *testing.T) { + expectedMS := skeletonMSBasedOnMD.DeepCopy() + + g := NewWithT(t) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, nil, nil, log) + g.Expect(err).To(BeNil()) + assertMachineSet(g, actualMS, expectedMS) + }) + + t.Run("should compute a new MachineSet when old MachineSets exist", func(t *testing.T) { + oldMS := skeletonMSBasedOnMD.DeepCopy() + oldMS.Spec.Replicas = pointer.Int32(2) + + expectedMS := skeletonMSBasedOnMD.DeepCopy() + expectedMS.Spec.Replicas = pointer.Int32(2) // 4 (maxsurge+replicas) - 2 (replicas of old ms) = 2 + + g := NewWithT(t) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, nil, []*clusterv1.MachineSet{oldMS}, log) + g.Expect(err).To(BeNil()) + assertMachineSet(g, actualMS, expectedMS) + }) + + t.Run("should compute the updated MachineSet when no old MachineSets exists", func(t *testing.T) { + uniqueID := apirand.String(5) + existingMS := skeletonMSBasedOnMD.DeepCopy() + // computeDesiredMachineSet should retain the UID, name and the "machine-template-hash" label value + // of the existing machine. + // Other fields like labels, annotations, node timeout, etc are expected to change. + existingMSUID := types.UID("abc-123-uid") + existingMS.UID = existingMSUID + existingMS.Name = deployment.Name + "-" + uniqueID + existingMS.Labels = map[string]string{ + clusterv1.MachineDeploymentUniqueLabel: uniqueID, + "ms-label-1": "ms-value-1", + } + existingMS.Annotations = nil + existingMS.Spec.Template.Labels = map[string]string{ + clusterv1.MachineDeploymentUniqueLabel: uniqueID, + "ms-label-2": "ms-value-2", + } + existingMS.Spec.Template.Annotations = nil + existingMS.Spec.Template.Spec.NodeDrainTimeout = duration5s + existingMS.Spec.Template.Spec.NodeDeletionTimeout = duration5s + existingMS.Spec.Template.Spec.NodeVolumeDetachTimeout = duration5s + existingMS.Spec.DeletePolicy = string(clusterv1.NewestMachineSetDeletePolicy) + existingMS.Spec.MinReadySeconds = 0 + + expectedMS := skeletonMSBasedOnMD.DeepCopy() + expectedMS.UID = existingMSUID + expectedMS.Name = deployment.Name + "-" + uniqueID + expectedMS.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID + expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID + + g := NewWithT(t) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, nil, log) + g.Expect(err).To(BeNil()) + assertMachineSet(g, actualMS, expectedMS) + }) + + t.Run("should compute the updated MachineSet when old MachineSets exist", func(t *testing.T) { + uniqueID := apirand.String(5) + existingMS := skeletonMSBasedOnMD.DeepCopy() + existingMSUID := types.UID("abc-123-uid") + existingMS.UID = existingMSUID + existingMS.Name = deployment.Name + "-" + uniqueID + existingMS.Labels = map[string]string{ + clusterv1.MachineDeploymentUniqueLabel: uniqueID, + "ms-label-1": "ms-value-1", + } + existingMS.Annotations = nil + existingMS.Spec.Template.Labels = map[string]string{ + clusterv1.MachineDeploymentUniqueLabel: uniqueID, + "ms-label-2": "ms-value-2", + } + existingMS.Spec.Template.Annotations = nil + existingMS.Spec.Template.Spec.NodeDrainTimeout = duration5s + existingMS.Spec.Template.Spec.NodeDeletionTimeout = duration5s + existingMS.Spec.Template.Spec.NodeVolumeDetachTimeout = duration5s + existingMS.Spec.DeletePolicy = string(clusterv1.NewestMachineSetDeletePolicy) + existingMS.Spec.MinReadySeconds = 0 + + oldMS := skeletonMSBasedOnMD.DeepCopy() + oldMS.Spec.Replicas = pointer.Int32(2) + + // Note: computeDesiredMachineSet does not modify the replicas on the updated MachineSet. + // Therefore, even though we have the old machineset with replicas 2 the updatedMS does not + // get modified replicas (2 = 4(maxsuge+spec.replica) - 2(oldMS replicas)). + // Nb. The final replicas of the MachineSet are calculated elsewhere. + expectedMS := skeletonMSBasedOnMD.DeepCopy() + expectedMS.UID = existingMSUID + expectedMS.Name = deployment.Name + "-" + uniqueID + expectedMS.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID + expectedMS.Spec.Template.Labels[clusterv1.MachineDeploymentUniqueLabel] = uniqueID + + g := NewWithT(t) + actualMS, err := (&Reconciler{}).computeDesiredMachineSet(deployment, existingMS, []*clusterv1.MachineSet{oldMS}, log) + g.Expect(err).To(BeNil()) + assertMachineSet(g, actualMS, expectedMS) + }) +} + +func assertMachineSet(g *WithT, actualMS *clusterv1.MachineSet, expectedMS *clusterv1.MachineSet) { + // check UID + if expectedMS.UID != "" { + g.Expect(actualMS.UID).Should(Equal(expectedMS.UID)) + } + // Check Name + if expectedMS.Name != "" { + g.Expect(actualMS.Name).Should(Equal(expectedMS.Name)) + } + // Check Namespace + g.Expect(actualMS.Namespace).Should(Equal(expectedMS.Namespace)) + + // Check Replicas + g.Expect(actualMS.Spec.Replicas).ShouldNot(BeNil()) + g.Expect(actualMS.Spec.Replicas).Should(HaveValue(Equal(*expectedMS.Spec.Replicas))) + + // Check ClusterName + g.Expect(actualMS.Spec.ClusterName).Should(Equal(expectedMS.Spec.ClusterName)) + + // Check Labels + for k, v := range expectedMS.Labels { + g.Expect(actualMS.Labels).Should(HaveKeyWithValue(k, v)) + } + for k, v := range expectedMS.Spec.Template.Labels { + g.Expect(actualMS.Spec.Template.Labels).Should(HaveKeyWithValue(k, v)) + } + // Verify that the labels also has the unique identifier key. + g.Expect(actualMS.Labels).Should(HaveKey(clusterv1.MachineDeploymentUniqueLabel)) + g.Expect(actualMS.Spec.Template.Labels).Should(HaveKey(clusterv1.MachineDeploymentUniqueLabel)) + + // Check Annotations + // Note: More nuanced validation of the Revision annotation calculations are done when testing `ComputeMachineSetAnnotations`. + for k, v := range expectedMS.Annotations { + g.Expect(actualMS.Annotations).Should(HaveKeyWithValue(k, v)) + } + for k, v := range expectedMS.Spec.Template.Annotations { + g.Expect(actualMS.Spec.Template.Annotations).Should(HaveKeyWithValue(k, v)) + } + + // Check MinReadySeconds + g.Expect(actualMS.Spec.MinReadySeconds).Should(Equal(expectedMS.Spec.MinReadySeconds)) + + // Check DeletePolicy + g.Expect(actualMS.Spec.DeletePolicy).Should(Equal(expectedMS.Spec.DeletePolicy)) + + // Check MachineTemplateSpec + g.Expect(actualMS.Spec.Template.Spec).Should(Equal(expectedMS.Spec.Template.Spec)) +} + // asserts the conditions set on the Getter object. // TODO: replace this with util.condition.MatchConditions (or a new matcher in controller runtime komega). func assertConditions(t *testing.T, from conditions.Getter, conditions ...*clusterv1.Condition) { diff --git a/internal/controllers/machinedeployment/mdutil/util.go b/internal/controllers/machinedeployment/mdutil/util.go index 7f05f733d36d..deba1edf90ec 100644 --- a/internal/controllers/machinedeployment/mdutil/util.go +++ b/internal/controllers/machinedeployment/mdutil/util.go @@ -27,6 +27,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/go-logr/logr" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" @@ -40,6 +41,28 @@ import ( "sigs.k8s.io/cluster-api/util/conversion" ) +// MachineSetsByDecreasingReplicas sorts the list of MachineSets in decreasing order of replicas, +// using creation time (ascending order) and name (alphabetical) as tie breakers. +type MachineSetsByDecreasingReplicas []*clusterv1.MachineSet + +func (o MachineSetsByDecreasingReplicas) Len() int { return len(o) } +func (o MachineSetsByDecreasingReplicas) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o MachineSetsByDecreasingReplicas) Less(i, j int) bool { + if o[i].Spec.Replicas == nil { + return false + } + if o[j].Spec.Replicas == nil { + return true + } + if *o[i].Spec.Replicas == *o[j].Spec.Replicas { + if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) + } + return *o[i].Spec.Replicas > *o[j].Spec.Replicas +} + // MachineSetsByCreationTimestamp sorts a list of MachineSet by creation timestamp, using their names as a tie breaker. type MachineSetsByCreationTimestamp []*clusterv1.MachineSet @@ -144,27 +167,6 @@ func skipCopyAnnotation(key string) bool { return annotationsToSkip[key] } -// copyDeploymentAnnotationsToMachineSet copies deployment's annotations to machine set's annotations, -// and returns true if machine set's annotation is changed. -// Note that apply and revision annotations are not copied. -func copyDeploymentAnnotationsToMachineSet(deployment *clusterv1.MachineDeployment, ms *clusterv1.MachineSet) bool { - msAnnotationsChanged := false - if ms.Annotations == nil { - ms.Annotations = make(map[string]string) - } - for k, v := range deployment.Annotations { - // newMS revision is updated automatically in getNewMachineSet, and the deployment's revision number is then updated - // by copying its newMS revision number. We should not copy deployment's revision to its newMS, since the update of - // deployment revision number may fail (revision becomes stale) and the revision number in newMS is more reliable. - if skipCopyAnnotation(k) || ms.Annotations[k] == v { - continue - } - ms.Annotations[k] = v - msAnnotationsChanged = true - } - return msAnnotationsChanged -} - func getMaxReplicasAnnotation(ms *clusterv1.MachineSet, logger logr.Logger) (int32, bool) { return getIntFromAnnotation(ms, clusterv1.MaxReplicasAnnotation, logger) } @@ -184,59 +186,59 @@ func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger return int32(intValue), true } -// SetNewMachineSetAnnotations sets new machine set's annotations appropriately by updating its revision and -// copying required deployment annotations to it; it returns true if machine set's annotation is changed. -func SetNewMachineSetAnnotations(deployment *clusterv1.MachineDeployment, newMS *clusterv1.MachineSet, newRevision string, exists bool, logger logr.Logger) bool { - logger = logger.WithValues("MachineSet", klog.KObj(newMS)) - - // First, copy deployment's annotations (except for apply and revision annotations) - annotationChanged := copyDeploymentAnnotationsToMachineSet(deployment, newMS) - // Then, update machine set's revision annotation - if newMS.Annotations == nil { - newMS.Annotations = make(map[string]string) +// ComputeMachineSetAnnotations computes the annotations that should be set on the MachineSet. +// Note: The passed in newMS is nil if the new MachineSet doesn't exist in the apiserver yet. +func ComputeMachineSetAnnotations(log logr.Logger, deployment *clusterv1.MachineDeployment, oldMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (map[string]string, error) { + // Copy annotations from Deployment annotations while filtering out some annotations + // that we don't want to propagate. + annotations := map[string]string{} + for k, v := range deployment.Annotations { + if skipCopyAnnotation(k) { + continue + } + annotations[k] = v } - oldRevision, ok := newMS.Annotations[clusterv1.RevisionAnnotation] + // The newMS's revision should be the greatest among all MSes. Usually, its revision number is newRevision (the max revision number - // of all old MSes + 1). However, it's possible that some of the old MSes are deleted after the newMS revision being updated, and - // newRevision becomes smaller than newMS's revision. We should only update newMS revision when it's smaller than newRevision. + // of all old MSes + 1). However, it's possible that some old MSes are deleted after the newMS revision being updated, and + // newRevision becomes smaller than newMS's revision. We will never decrease a revision of a MachineSet. + maxOldRevision := MaxRevision(oldMSs, log) + newRevisionInt := maxOldRevision + 1 + newRevision := strconv.FormatInt(newRevisionInt, 10) + if newMS != nil { + currentRevision, currentRevisionExists := newMS.Annotations[clusterv1.RevisionAnnotation] + if currentRevisionExists { + currentRevisionInt, err := strconv.ParseInt(currentRevision, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse current revision on MachineSet %s", klog.KObj(newMS)) + } + if newRevisionInt < currentRevisionInt { + newRevision = currentRevision + } + } - oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64) - if err != nil { - if oldRevision != "" { - logger.Error(err, "Updating machine set revision OldRevision not int") - return false + // Ensure we preserve the revision history annotation in any case if it already exists. + // Note: With Server-Side-Apply not setting the annotation would drop it. + revisionHistory, revisionHistoryExists := newMS.Annotations[clusterv1.RevisionHistoryAnnotation] + if revisionHistoryExists { + annotations[clusterv1.RevisionHistoryAnnotation] = revisionHistory } - // If the MS annotation is empty then initialise it to 0 - oldRevisionInt = 0 - } - newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64) - if err != nil { - logger.Error(err, "Updating machine set revision NewRevision not int") - return false - } - if oldRevisionInt < newRevisionInt { - newMS.Annotations[clusterv1.RevisionAnnotation] = newRevision - annotationChanged = true - logger.V(4).Info("Updating machine set revision", "revision", newRevision) - } - // If a revision annotation already existed and this machine set was updated with a new revision - // then that means we are rolling back to this machine set. We need to preserve the old revisions - // for historical information. - if ok && annotationChanged { - revisionHistoryAnnotation := newMS.Annotations[clusterv1.RevisionHistoryAnnotation] - oldRevisions := strings.Split(revisionHistoryAnnotation, ",") - if oldRevisions[0] == "" { - newMS.Annotations[clusterv1.RevisionHistoryAnnotation] = oldRevision - } else { - oldRevisions = append(oldRevisions, oldRevision) - newMS.Annotations[clusterv1.RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",") + + // If the revision changes then add the old revision to the revision history annotation + if currentRevisionExists && currentRevision != newRevision { + oldRevisions := strings.Split(revisionHistory, ",") + if oldRevisions[0] == "" { + annotations[clusterv1.RevisionHistoryAnnotation] = currentRevision + } else { + annotations[clusterv1.RevisionHistoryAnnotation] = strings.Join(append(oldRevisions, currentRevision), ",") + } } } - // If the new machine set is about to be created, we need to add replica annotations to it. - if !exists && SetReplicasAnnotations(newMS, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+MaxSurge(*deployment)) { - annotationChanged = true - } - return annotationChanged + + annotations[clusterv1.RevisionAnnotation] = newRevision + annotations[clusterv1.DesiredReplicasAnnotation] = fmt.Sprintf("%d", *deployment.Spec.Replicas) + annotations[clusterv1.MaxReplicasAnnotation] = fmt.Sprintf("%d", *(deployment.Spec.Replicas)+MaxSurge(*deployment)) + return annotations, nil } // FindOneActiveOrLatest returns the only active or the latest machine set in case there is at most one active @@ -368,35 +370,50 @@ func getMachineSetFraction(ms clusterv1.MachineSet, d clusterv1.MachineDeploymen } // EqualMachineTemplate returns true if two given machineTemplateSpec are equal, -// ignoring the diff in value of Labels["machine-template-hash"], and the version from external references. +// ignoring all the in-place propagated fields, and the version from external references. func EqualMachineTemplate(template1, template2 *clusterv1.MachineTemplateSpec) bool { - t1Copy := template1.DeepCopy() - t2Copy := template2.DeepCopy() + t1Copy := MachineTemplateDeepCopyRolloutFields(template1) + t2Copy := MachineTemplateDeepCopyRolloutFields(template2) - // Remove `machine-template-hash` from the comparison: - // 1. The hash result would be different upon machineTemplateSpec API changes - // (e.g. the addition of a new field will cause the hash code to change) - // 2. The deployment template won't have hash labels - delete(t1Copy.Labels, clusterv1.MachineDeploymentUniqueLabel) - delete(t2Copy.Labels, clusterv1.MachineDeploymentUniqueLabel) + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) +} + +// MachineTemplateDeepCopyRolloutFields copies a MachineTemplateSpec +// and sets all fields that should be propagated in-place to nil and drops version from +// external references. +func MachineTemplateDeepCopyRolloutFields(template *clusterv1.MachineTemplateSpec) *clusterv1.MachineTemplateSpec { + templateCopy := template.DeepCopy() + + // Drop labels and annotations + templateCopy.Labels = nil + templateCopy.Annotations = nil + + // Drop node timeout values + templateCopy.Spec.NodeDrainTimeout = nil + templateCopy.Spec.NodeDeletionTimeout = nil + templateCopy.Spec.NodeVolumeDetachTimeout = nil // Remove the version part from the references APIVersion field, // for more details see issue #2183 and #2140. - t1Copy.Spec.InfrastructureRef.APIVersion = t1Copy.Spec.InfrastructureRef.GroupVersionKind().Group - if t1Copy.Spec.Bootstrap.ConfigRef != nil { - t1Copy.Spec.Bootstrap.ConfigRef.APIVersion = t1Copy.Spec.Bootstrap.ConfigRef.GroupVersionKind().Group - } - t2Copy.Spec.InfrastructureRef.APIVersion = t2Copy.Spec.InfrastructureRef.GroupVersionKind().Group - if t2Copy.Spec.Bootstrap.ConfigRef != nil { - t2Copy.Spec.Bootstrap.ConfigRef.APIVersion = t2Copy.Spec.Bootstrap.ConfigRef.GroupVersionKind().Group + templateCopy.Spec.InfrastructureRef.APIVersion = templateCopy.Spec.InfrastructureRef.GroupVersionKind().Group + if templateCopy.Spec.Bootstrap.ConfigRef != nil { + templateCopy.Spec.Bootstrap.ConfigRef.APIVersion = templateCopy.Spec.Bootstrap.ConfigRef.GroupVersionKind().Group } - return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) + return templateCopy } -// FindNewMachineSet returns the new MS this given deployment targets (the one with the same machine template). +// FindNewMachineSet returns the new MS this given deployment targets (the one with the same machine template, ignoring in-place mutable fields). +// NOTE: If we find a matching MachineSet which only differs in in-place mutable fields we can use it to +// fulfill the intent of the MachineDeployment by just updating the MachineSet to propagate in-place mutable fields. +// Thus we don't have to create a new MachineSet and we can avoid an unnecessary rollout. +// NOTE: Even after we changed EqualMachineTemplate to ignore fields that are propagated in-place we can guarantee that if there exists a "new machineset" +// using the old logic then a new machineset will definitely exist using the new logic. The new logic is looser. Therefore, we will +// not face a case where there exists a machine set matching the old logic but there does not exist a machineset matching the new logic. +// In fact previously not matching MS can now start matching the target. Since there could be multiple matches, lets choose the +// MS with the most replicas so that there is minimum machine churn. func FindNewMachineSet(deployment *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet) *clusterv1.MachineSet { - sort.Sort(MachineSetsByCreationTimestamp(msList)) + sort.Sort(MachineSetsByDecreasingReplicas(msList)) for i := range msList { if EqualMachineTemplate(&msList[i].Spec.Template, &deployment.Spec.Template) { // In rare cases, such as after cluster upgrades, Deployment may end up with @@ -510,7 +527,7 @@ func DeploymentComplete(deployment *clusterv1.MachineDeployment, newStatus *clus // 1) The new MS is saturated: newMS's replicas == deployment's replicas // 2) For RollingUpdateStrategy: Max number of machines allowed is reached: deployment's replicas + maxSurge == all MSs' replicas. // 3) For OnDeleteStrategy: Max number of machines allowed is reached: deployment's replicas == all MSs' replicas. -func NewMSNewReplicas(deployment *clusterv1.MachineDeployment, allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (int32, error) { +func NewMSNewReplicas(deployment *clusterv1.MachineDeployment, allMSs []*clusterv1.MachineSet, newMSReplicas int32) (int32, error) { switch deployment.Spec.Strategy.Type { case clusterv1.RollingUpdateMachineDeploymentStrategyType: // Check if we can scale up. @@ -523,26 +540,26 @@ func NewMSNewReplicas(deployment *clusterv1.MachineDeployment, allMSs []*cluster maxTotalMachines := *(deployment.Spec.Replicas) + int32(maxSurge) if currentMachineCount >= maxTotalMachines { // Cannot scale up. - return *(newMS.Spec.Replicas), nil + return newMSReplicas, nil } // Scale up. scaleUpCount := maxTotalMachines - currentMachineCount // Do not exceed the number of desired replicas. - scaleUpCount = integer.Int32Min(scaleUpCount, *(deployment.Spec.Replicas)-*(newMS.Spec.Replicas)) - return *(newMS.Spec.Replicas) + scaleUpCount, nil + scaleUpCount = integer.Int32Min(scaleUpCount, *(deployment.Spec.Replicas)-newMSReplicas) + return newMSReplicas + scaleUpCount, nil case clusterv1.OnDeleteMachineDeploymentStrategyType: // Find the total number of machines currentMachineCount := TotalMachineSetsReplicaSum(allMSs) if currentMachineCount >= *(deployment.Spec.Replicas) { // Cannot scale up as more replicas exist than desired number of replicas in the MachineDeployment. - return *(newMS.Spec.Replicas), nil + return newMSReplicas, nil } // Scale up the latest MachineSet so the total amount of replicas across all MachineSets match // the desired number of replicas in the MachineDeployment scaleUpCount := *(deployment.Spec.Replicas) - currentMachineCount - return *(newMS.Spec.Replicas) + scaleUpCount, nil + return newMSReplicas + scaleUpCount, nil default: - return 0, fmt.Errorf("deployment strategy %v isn't supported", deployment.Spec.Strategy.Type) + return 0, fmt.Errorf("failed to compute replicas: deployment strategy %v isn't supported", deployment.Spec.Strategy.Type) } } diff --git a/internal/controllers/machinedeployment/mdutil/util_test.go b/internal/controllers/machinedeployment/mdutil/util_test.go index 1b508d3951a8..049f82d2751b 100644 --- a/internal/controllers/machinedeployment/mdutil/util_test.go +++ b/internal/controllers/machinedeployment/mdutil/util_test.go @@ -19,6 +19,7 @@ package mdutil import ( "fmt" "math/rand" + "sort" "strconv" "testing" "time" @@ -30,6 +31,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apiserver/pkg/storage/names" "k8s.io/klog/v2/klogr" + "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" ) @@ -76,175 +78,209 @@ func generateDeployment(image string) clusterv1.MachineDeployment { Annotations: make(map[string]string), }, Spec: clusterv1.MachineDeploymentSpec{ - Replicas: func(i int32) *int32 { return &i }(1), + Replicas: pointer.Int32(3), Selector: metav1.LabelSelector{MatchLabels: machineLabels}, Template: clusterv1.MachineTemplateSpec{ ObjectMeta: clusterv1.ObjectMeta{ Labels: machineLabels, }, - Spec: clusterv1.MachineSpec{}, + Spec: clusterv1.MachineSpec{ + NodeDrainTimeout: &metav1.Duration{Duration: 10 * time.Second}, + }, }, }, } } -func generateMachineTemplateSpec(annotations, labels map[string]string) clusterv1.MachineTemplateSpec { - return clusterv1.MachineTemplateSpec{ - ObjectMeta: clusterv1.ObjectMeta{ - Annotations: annotations, - Labels: labels, +func TestMachineSetsByDecreasingReplicas(t *testing.T) { + t0 := time.Now() + t1 := t0.Add(1 * time.Minute) + msAReplicas1T0 := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1.Time{Time: t0}, + Name: "ms-a", + }, + Spec: clusterv1.MachineSetSpec{ + Replicas: pointer.Int32(1), + }, + } + + msAAReplicas3T0 := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1.Time{Time: t0}, + Name: "ms-aa", + }, + Spec: clusterv1.MachineSetSpec{ + Replicas: pointer.Int32(3), + }, + } + + msBReplicas1T0 := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1.Time{Time: t0}, + Name: "ms-b", + }, + Spec: clusterv1.MachineSetSpec{ + Replicas: pointer.Int32(1), + }, + } + + msAReplicas1T1 := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + CreationTimestamp: metav1.Time{Time: t1}, + Name: "ms-a", + }, + Spec: clusterv1.MachineSetSpec{ + Replicas: pointer.Int32(1), }, - Spec: clusterv1.MachineSpec{}, + } + + tests := []struct { + name string + machineSets []*clusterv1.MachineSet + want []*clusterv1.MachineSet + }{ + { + name: "machine set with higher replicas should be lower in the list", + machineSets: []*clusterv1.MachineSet{msAReplicas1T0, msAAReplicas3T0}, + want: []*clusterv1.MachineSet{msAAReplicas3T0, msAReplicas1T0}, + }, + { + name: "MachineSet created earlier should be lower in the list if replicas are the same", + machineSets: []*clusterv1.MachineSet{msAReplicas1T1, msAReplicas1T0}, + want: []*clusterv1.MachineSet{msAReplicas1T0, msAReplicas1T1}, + }, + { + name: "MachineSet with lower name should be lower in the list if the replicas and creationTimestamp are same", + machineSets: []*clusterv1.MachineSet{msBReplicas1T0, msAReplicas1T0}, + want: []*clusterv1.MachineSet{msAReplicas1T0, msBReplicas1T0}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // sort the machine sets and verify the sorted list + g := NewWithT(t) + sort.Sort(MachineSetsByDecreasingReplicas(tt.machineSets)) + g.Expect(tt.machineSets).To(Equal(tt.want)) + }) } } func TestEqualMachineTemplate(t *testing.T) { + machineTemplate := &clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: map[string]string{"l1": "v1"}, + Annotations: map[string]string{"a1": "v1"}, + }, + Spec: clusterv1.MachineSpec{ + NodeDrainTimeout: &metav1.Duration{Duration: 10 * time.Second}, + NodeDeletionTimeout: &metav1.Duration{Duration: 10 * time.Second}, + NodeVolumeDetachTimeout: &metav1.Duration{Duration: 10 * time.Second}, + InfrastructureRef: corev1.ObjectReference{ + Name: "infra1", + Namespace: "default", + Kind: "InfrastructureMachine", + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + }, + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + Name: "bootstrap1", + Namespace: "default", + Kind: "BootstrapConfig", + APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", + }, + }, + }, + } + + machineTemplateWithEmptyLabels := machineTemplate.DeepCopy() + machineTemplateWithEmptyLabels.Labels = map[string]string{} + + machineTemplateWithDifferentLabels := machineTemplate.DeepCopy() + machineTemplateWithDifferentLabels.Labels = map[string]string{"l2": "v2"} + + machineTemplateWithEmptyAnnotations := machineTemplate.DeepCopy() + machineTemplateWithEmptyAnnotations.Annotations = map[string]string{} + + machineTemplateWithDifferentAnnotations := machineTemplate.DeepCopy() + machineTemplateWithDifferentAnnotations.Annotations = map[string]string{"a2": "v2"} + + machineTemplateWithDifferentInPlaceMutableSpecFields := machineTemplate.DeepCopy() + machineTemplateWithDifferentInPlaceMutableSpecFields.Spec.NodeDrainTimeout = &metav1.Duration{Duration: 20 * time.Second} + machineTemplateWithDifferentInPlaceMutableSpecFields.Spec.NodeDeletionTimeout = &metav1.Duration{Duration: 20 * time.Second} + machineTemplateWithDifferentInPlaceMutableSpecFields.Spec.NodeVolumeDetachTimeout = &metav1.Duration{Duration: 20 * time.Second} + + machineTemplateWithDifferentInfraRef := machineTemplate.DeepCopy() + machineTemplateWithDifferentInfraRef.Spec.InfrastructureRef.Name = "infra2" + + machineTemplateWithDifferentInfraRefAPIVersion := machineTemplate.DeepCopy() + machineTemplateWithDifferentInfraRefAPIVersion.Spec.InfrastructureRef.APIVersion = "infrastructure.cluster.x-k8s.io/v1beta2" + + machineTemplateWithDifferentBootstrapConfigRef := machineTemplate.DeepCopy() + machineTemplateWithDifferentBootstrapConfigRef.Spec.Bootstrap.ConfigRef.Name = "bootstrap2" + + machineTemplateWithDifferentBootstrapConfigRefAPIVersion := machineTemplate.DeepCopy() + machineTemplateWithDifferentBootstrapConfigRefAPIVersion.Spec.Bootstrap.ConfigRef.APIVersion = "bootstrap.cluster.x-k8s.io/v1beta2" + tests := []struct { Name string - Former, Latter clusterv1.MachineTemplateSpec + Former, Latter *clusterv1.MachineTemplateSpec Expected bool }{ { - Name: "Same spec, same labels", - Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), - Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), + Name: "Same spec, except latter does not have labels", + Former: machineTemplate, + Latter: machineTemplateWithEmptyLabels, Expected: true, }, { - Name: "Same spec, only machine-template-hash label value is different", - Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), - Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2", "something": "else"}), + Name: "Same spec, except latter has different labels", + Former: machineTemplate, + Latter: machineTemplateWithDifferentLabels, Expected: true, }, { - Name: "Same spec, the former doesn't have machine-template-hash label", - Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{"something": "else"}), - Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2", "something": "else"}), + Name: "Same spec, except latter does not have annotations", + Former: machineTemplate, + Latter: machineTemplateWithEmptyAnnotations, Expected: true, }, { - Name: "Same spec, the former doesn't have machine-template-hash label", - Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{"something": "else"}), - Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2", "something": "else"}), + Name: "Same spec, except latter has different annotations", + Former: machineTemplate, + Latter: machineTemplateWithDifferentAnnotations, Expected: true, }, { - Name: "Same spec, the label is different, the former doesn't have machine-template-hash label, same number of labels", - Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{"something": "else"}), - Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2"}), - Expected: false, - }, - { - Name: "Same spec, the label is different, the latter doesn't have machine-template-hash label, same number of labels", - Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1"}), - Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{"something": "else"}), - Expected: false, - }, - { - Name: "Same spec, the label is different, and the machine-template-hash label value is the same", - Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1"}), - Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), - Expected: false, - }, - { - Name: "Different spec, same labels", - Former: generateMachineTemplateSpec(map[string]string{"former": "value"}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), - Latter: generateMachineTemplateSpec(map[string]string{"latter": "value"}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), - Expected: false, - }, - { - Name: "Different spec, different machine-template-hash label value", - Former: generateMachineTemplateSpec(map[string]string{"x": ""}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), - Latter: generateMachineTemplateSpec(map[string]string{"x": "1"}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2", "something": "else"}), - Expected: false, + Name: "Spec changes, latter has different in-place mutable spec fields", + Former: machineTemplate, + Latter: machineTemplateWithDifferentInPlaceMutableSpecFields, + Expected: true, }, { - Name: "Different spec, the former doesn't have machine-template-hash label", - Former: generateMachineTemplateSpec(map[string]string{"x": ""}, map[string]string{"something": "else"}), - Latter: generateMachineTemplateSpec(map[string]string{"x": "1"}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2", "something": "else"}), + Name: "Spec changes, latter has different InfrastructureRef", + Former: machineTemplate, + Latter: machineTemplateWithDifferentInfraRef, Expected: false, }, { - Name: "Different spec, different labels", - Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{"something": "else"}), - Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{"nothing": "else"}), + Name: "Spec changes, latter has different Bootstrap.ConfigRef", + Former: machineTemplate, + Latter: machineTemplateWithDifferentBootstrapConfigRef, Expected: false, }, { - Name: "Same spec, except for references versions", - Former: clusterv1.MachineTemplateSpec{ - ObjectMeta: clusterv1.ObjectMeta{ - Labels: map[string]string{}, - }, - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha2", - Kind: "MachineBootstrap", - }, - }, - InfrastructureRef: corev1.ObjectReference{ - APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", - Kind: "MachineInfrastructure", - }, - }, - }, - Latter: clusterv1.MachineTemplateSpec{ - ObjectMeta: clusterv1.ObjectMeta{ - Labels: map[string]string{}, - }, - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", - Kind: "MachineBootstrap", - }, - }, - InfrastructureRef: corev1.ObjectReference{ - APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "MachineInfrastructure", - }, - }, - }, + Name: "Same spec, except latter has different InfrastructureRef APIVersion", + Former: machineTemplate, + Latter: machineTemplateWithDifferentInfraRefAPIVersion, Expected: true, }, { - Name: "Same spec, bootstrap references are different kinds", - Former: clusterv1.MachineTemplateSpec{ - ObjectMeta: clusterv1.ObjectMeta{ - Labels: map[string]string{}, - }, - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha2", - Kind: "MachineBootstrap1", - }, - }, - InfrastructureRef: corev1.ObjectReference{ - APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", - Kind: "MachineInfrastructure", - }, - }, - }, - Latter: clusterv1.MachineTemplateSpec{ - ObjectMeta: clusterv1.ObjectMeta{ - Labels: map[string]string{}, - }, - Spec: clusterv1.MachineSpec{ - Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - APIVersion: "bootstrap.cluster.x-k8s.io/v1beta1", - Kind: "MachineBootstrap2", - }, - }, - InfrastructureRef: corev1.ObjectReference{ - APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "MachineInfrastructure", - }, - }, - }, - Expected: false, + Name: "Same spec, except latter has different Bootstrap.ConfigRef APIVersion", + Former: machineTemplate, + Latter: machineTemplateWithDifferentBootstrapConfigRefAPIVersion, + Expected: true, }, } @@ -260,32 +296,26 @@ func TestEqualMachineTemplate(t *testing.T) { g.Expect(t2.Labels).NotTo(BeNil()) } - runTest(&test.Former, &test.Latter) + runTest(test.Former, test.Latter) // Test the same case in reverse order - runTest(&test.Latter, &test.Former) + runTest(test.Latter, test.Former) }) } } func TestFindNewMachineSet(t *testing.T) { - now := metav1.Now() - later := metav1.Time{Time: now.Add(time.Minute)} - deployment := generateDeployment("nginx") - newMS := generateMS(deployment) - newMS.Labels[clusterv1.MachineDeploymentUniqueLabel] = "hash" - newMS.CreationTimestamp = later - newMSDup := generateMS(deployment) - newMSDup.Labels[clusterv1.MachineDeploymentUniqueLabel] = "different-hash" - newMSDup.CreationTimestamp = now + matchingMS := generateMS(deployment) - oldDeployment := generateDeployment("nginx") - oldMS := generateMS(oldDeployment) - oldMS.Spec.Template.Annotations = map[string]string{ - "old": "true", - } - oldMS.Status.FullyLabeledReplicas = *(oldMS.Spec.Replicas) + matchingMSHigherReplicas := generateMS(deployment) + matchingMSHigherReplicas.Spec.Replicas = pointer.Int32(2) + + matchingMSDiffersInPlaceMutableFields := generateMS(deployment) + matchingMSDiffersInPlaceMutableFields.Spec.Template.Spec.NodeDrainTimeout = &metav1.Duration{Duration: 20 * time.Second} + + oldMS := generateMS(deployment) + oldMS.Spec.Template.Spec.InfrastructureRef.Name = "changed-infra-ref" tests := []struct { Name string @@ -294,19 +324,25 @@ func TestFindNewMachineSet(t *testing.T) { expected *clusterv1.MachineSet }{ { - Name: "Get new MachineSet with the same template as Deployment spec but different machine-template-hash value", + Name: "Get the MachineSet with the MachineTemplate that matches the intent of the MachineDeployment", + deployment: deployment, + msList: []*clusterv1.MachineSet{&oldMS, &matchingMS}, + expected: &matchingMS, + }, + { + Name: "Get the MachineSet with the higher replicas if multiple MachineSets match the desired intent on the MachineDeployment", deployment: deployment, - msList: []*clusterv1.MachineSet{&newMS, &oldMS}, - expected: &newMS, + msList: []*clusterv1.MachineSet{&oldMS, &matchingMS, &matchingMSHigherReplicas}, + expected: &matchingMSHigherReplicas, }, { - Name: "Get the oldest new MachineSet when there are more than one MachineSet with the same template", + Name: "Get the MachineSet with the MachineTemplate that matches the desired intent on the MachineDeployment, except differs in in-place mutable fields", deployment: deployment, - msList: []*clusterv1.MachineSet{&newMS, &oldMS, &newMSDup}, - expected: &newMSDup, + msList: []*clusterv1.MachineSet{&oldMS, &matchingMSDiffersInPlaceMutableFields}, + expected: &matchingMSDiffersInPlaceMutableFields, }, { - Name: "Get nil new MachineSet", + Name: "Get nil if no MachineSet matches the desired intent of the MachineDeployment", deployment: deployment, msList: []*clusterv1.MachineSet{&oldMS}, expected: nil, @@ -324,34 +360,22 @@ func TestFindNewMachineSet(t *testing.T) { } func TestFindOldMachineSets(t *testing.T) { - now := metav1.Now() - later := metav1.Time{Time: now.Add(time.Minute)} - before := metav1.Time{Time: now.Add(-time.Minute)} - deployment := generateDeployment("nginx") + newMS := generateMS(deployment) - *(newMS.Spec.Replicas) = 1 - newMS.Labels[clusterv1.MachineDeploymentUniqueLabel] = "hash" - newMS.CreationTimestamp = later + newMS.Name = "aa" + newMS.Spec.Replicas = pointer.Int32(1) - newMSDup := generateMS(deployment) - newMSDup.Labels[clusterv1.MachineDeploymentUniqueLabel] = "different-hash" - newMSDup.CreationTimestamp = now + newMSHigherReplicas := generateMS(deployment) + newMSHigherReplicas.Spec.Replicas = pointer.Int32(2) + + newMSHigherName := generateMS(deployment) + newMSHigherName.Spec.Replicas = pointer.Int32(1) + newMSHigherName.Name = "ab" oldDeployment := generateDeployment("nginx") + oldDeployment.Spec.Template.Spec.InfrastructureRef.Name = "changed-infra-ref" oldMS := generateMS(oldDeployment) - oldMS.Spec.Template.Annotations = map[string]string{ - "old": "true", - } - oldMS.Status.FullyLabeledReplicas = *(oldMS.Spec.Replicas) - oldMS.CreationTimestamp = before - - oldDeployment = generateDeployment("nginx") - oldDeployment.Spec.Selector.MatchLabels["old-label"] = "old-value" - oldDeployment.Spec.Template.Labels["old-label"] = "old-value" - oldMSwithOldLabel := generateMS(oldDeployment) - oldMSwithOldLabel.Status.FullyLabeledReplicas = *(oldMSwithOldLabel.Spec.Replicas) - oldMSwithOldLabel.CreationTimestamp = before tests := []struct { Name string @@ -375,24 +399,24 @@ func TestFindOldMachineSets(t *testing.T) { expectedRequire: nil, }, { - Name: "Get old MachineSets with two new MachineSets, only the oldest new MachineSet is seen as new MachineSet", + Name: "Get old MachineSets with two new MachineSets, only the MachineSet with higher replicas is seen as new MachineSet", deployment: deployment, - msList: []*clusterv1.MachineSet{&oldMS, &newMS, &newMSDup}, + msList: []*clusterv1.MachineSet{&oldMS, &newMS, &newMSHigherReplicas}, expected: []*clusterv1.MachineSet{&oldMS, &newMS}, expectedRequire: []*clusterv1.MachineSet{&newMS}, }, { - Name: "Get empty old MachineSets", + Name: "Get old MachineSets with two new MachineSets, when replicas are matching only the MachineSet with lower name is seen as new MachineSet", deployment: deployment, - msList: []*clusterv1.MachineSet{&newMS}, - expected: []*clusterv1.MachineSet{}, - expectedRequire: nil, + msList: []*clusterv1.MachineSet{&oldMS, &newMS, &newMSHigherName}, + expected: []*clusterv1.MachineSet{&oldMS, &newMSHigherName}, + expectedRequire: []*clusterv1.MachineSet{&newMSHigherName}, }, { - Name: "Get old MachineSets after label changed in MachineDeployments", + Name: "Get empty old MachineSets", deployment: deployment, - msList: []*clusterv1.MachineSet{&newMS, &oldMSwithOldLabel}, - expected: []*clusterv1.MachineSet{&oldMSwithOldLabel}, + msList: []*clusterv1.MachineSet{&newMS}, + expected: []*clusterv1.MachineSet{}, expectedRequire: nil, }, } @@ -562,7 +586,7 @@ func TestNewMSNewReplicas(t *testing.T) { }(test.maxSurge), } *(newRC.Spec.Replicas) = test.newMSReplicas - ms, err := NewMSNewReplicas(&newDeployment, []*clusterv1.MachineSet{&rs5}, &newRC) + ms, err := NewMSNewReplicas(&newDeployment, []*clusterv1.MachineSet{&rs5}, *newRC.Spec.Replicas) g.Expect(err).NotTo(HaveOccurred()) g.Expect(ms).To(Equal(test.expected)) }) @@ -716,24 +740,10 @@ func TestMaxUnavailable(t *testing.T) { func TestAnnotationUtils(t *testing.T) { // Setup tDeployment := generateDeployment("nginx") + tDeployment.Spec.Replicas = pointer.Int32(1) tMS := generateMS(tDeployment) - tDeployment.Annotations[clusterv1.RevisionAnnotation] = "999" - logger := klogr.New() - - // Test Case 1: Check if anotations are copied properly from deployment to MS - t.Run("SetNewMachineSetAnnotations", func(t *testing.T) { - g := NewWithT(t) - - // Try to set the increment revision from 1 through 20 - for i := 0; i < 20; i++ { - nextRevision := fmt.Sprintf("%d", i+1) - SetNewMachineSetAnnotations(&tDeployment, &tMS, nextRevision, true, logger) - // Now the MachineSets Revision Annotation should be i+1 - g.Expect(tMS.Annotations).To(HaveKeyWithValue(clusterv1.RevisionAnnotation, nextRevision)) - } - }) - // Test Case 2: Check if annotations are set properly + // Test Case 1: Check if annotations are set properly t.Run("SetReplicasAnnotations", func(t *testing.T) { g := NewWithT(t) @@ -742,7 +752,7 @@ func TestAnnotationUtils(t *testing.T) { g.Expect(tMS.Annotations).To(HaveKeyWithValue(clusterv1.MaxReplicasAnnotation, "11")) }) - // Test Case 3: Check if annotations reflect deployments state + // Test Case 2: Check if annotations reflect deployments state tMS.Annotations[clusterv1.DesiredReplicasAnnotation] = "1" tMS.Status.AvailableReplicas = 1 tMS.Spec.Replicas = new(int32) @@ -755,6 +765,151 @@ func TestAnnotationUtils(t *testing.T) { }) } +func TestComputeMachineSetAnnotations(t *testing.T) { + deployment := generateDeployment("nginx") + deployment.Spec.Replicas = pointer.Int32(3) + maxSurge := intstr.FromInt(1) + maxUnavailable := intstr.FromInt(0) + deployment.Spec.Strategy = &clusterv1.MachineDeploymentStrategy{ + Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, + RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ + MaxSurge: &maxSurge, + MaxUnavailable: &maxUnavailable, + }, + } + deployment.Annotations = map[string]string{ + corev1.LastAppliedConfigAnnotation: "last-applied-configuration", + "key1": "value1", + } + + tests := []struct { + name string + deployment *clusterv1.MachineDeployment + oldMSs []*clusterv1.MachineSet + ms *clusterv1.MachineSet + want map[string]string + wantErr bool + }{ + { + name: "Calculating annotations for a new MachineSet", + deployment: &deployment, + oldMSs: nil, + ms: nil, + want: map[string]string{ + "key1": "value1", + clusterv1.RevisionAnnotation: "1", + clusterv1.DesiredReplicasAnnotation: "3", + clusterv1.MaxReplicasAnnotation: "4", + }, + wantErr: false, + }, + { + name: "Calculating annotations for a new MachineSet - old MSs exist", + deployment: &deployment, + oldMSs: []*clusterv1.MachineSet{machineSetWithRevisionAndHistory("1", "")}, + ms: nil, + want: map[string]string{ + "key1": "value1", + clusterv1.RevisionAnnotation: "2", + clusterv1.DesiredReplicasAnnotation: "3", + clusterv1.MaxReplicasAnnotation: "4", + }, + wantErr: false, + }, + { + name: "Calculating annotations for a existing MachineSet", + deployment: &deployment, + oldMSs: nil, + ms: machineSetWithRevisionAndHistory("1", ""), + want: map[string]string{ + "key1": "value1", + clusterv1.RevisionAnnotation: "1", + clusterv1.DesiredReplicasAnnotation: "3", + clusterv1.MaxReplicasAnnotation: "4", + }, + wantErr: false, + }, + { + name: "Calculating annotations for a existing MachineSet - old MSs exist", + deployment: &deployment, + oldMSs: []*clusterv1.MachineSet{ + machineSetWithRevisionAndHistory("1", ""), + machineSetWithRevisionAndHistory("2", ""), + }, + ms: machineSetWithRevisionAndHistory("1", ""), + want: map[string]string{ + "key1": "value1", + clusterv1.RevisionAnnotation: "3", + clusterv1.RevisionHistoryAnnotation: "1", + clusterv1.DesiredReplicasAnnotation: "3", + clusterv1.MaxReplicasAnnotation: "4", + }, + wantErr: false, + }, + { + name: "Calculating annotations for a existing MachineSet - old MSs exist - existing revision is greater", + deployment: &deployment, + oldMSs: []*clusterv1.MachineSet{ + machineSetWithRevisionAndHistory("1", ""), + machineSetWithRevisionAndHistory("2", ""), + }, + ms: machineSetWithRevisionAndHistory("4", ""), + want: map[string]string{ + "key1": "value1", + clusterv1.RevisionAnnotation: "4", + clusterv1.DesiredReplicasAnnotation: "3", + clusterv1.MaxReplicasAnnotation: "4", + }, + wantErr: false, + }, + { + name: "Calculating annotations for a existing MachineSet - old MSs exist - ms already has revision history", + deployment: &deployment, + oldMSs: []*clusterv1.MachineSet{ + machineSetWithRevisionAndHistory("3", ""), + machineSetWithRevisionAndHistory("4", ""), + }, + ms: machineSetWithRevisionAndHistory("2", "1"), + want: map[string]string{ + "key1": "value1", + clusterv1.RevisionAnnotation: "5", + clusterv1.RevisionHistoryAnnotation: "1,2", + clusterv1.DesiredReplicasAnnotation: "3", + clusterv1.MaxReplicasAnnotation: "4", + }, + wantErr: false, + }, + } + + log := klogr.New() + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) + got, err := ComputeMachineSetAnnotations(log, tt.deployment, tt.oldMSs, tt.ms) + if tt.wantErr { + g.Expect(err).ShouldNot(BeNil()) + } else { + g.Expect(err).Should(BeNil()) + g.Expect(got).Should(Equal(tt.want)) + } + }) + } +} + +func machineSetWithRevisionAndHistory(revision string, revisionHistory string) *clusterv1.MachineSet { + ms := &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + clusterv1.RevisionAnnotation: revision, + }, + }, + } + if revisionHistory != "" { + ms.Annotations[clusterv1.RevisionHistoryAnnotation] = revisionHistory + } + return ms +} + func TestReplicasAnnotationsNeedUpdate(t *testing.T) { desiredReplicas := fmt.Sprintf("%d", int32(10)) maxReplicas := fmt.Sprintf("%d", int32(20))