From 7b1f2917c6a51a95caafcbe013d97c474ff2986d Mon Sep 17 00:00:00 2001 From: Kenny Leung Date: Tue, 8 May 2018 15:23:43 -0700 Subject: [PATCH] First pass for machine deployment controller Copies/borrows heavily from deployment controller. Things not implemented in this PR: deployment progress orphan adoption --- .../machinedeployment/controller.go | 340 ++++++- .../machinedeployment/controller_test.go | 908 ++++++++++++++++++ .../machinedeployment_suite_test.go | 46 + pkg/controller/machinedeployment/rolling.go | 266 +++++ .../machinedeployment/rolling_test.go | 433 +++++++++ pkg/controller/machinedeployment/sync.go | 458 +++++++++ pkg/controller/machinedeployment/sync_test.go | 435 +++++++++ pkg/controller/machinedeployment/util/util.go | 696 ++++++++++++++ .../machinedeployment/util/util_test.go | 841 ++++++++++++++++ sample/machinedeployment.yaml | 28 +- 10 files changed, 4438 insertions(+), 13 deletions(-) create mode 100644 pkg/controller/machinedeployment/controller_test.go create mode 100644 pkg/controller/machinedeployment/machinedeployment_suite_test.go create mode 100644 pkg/controller/machinedeployment/rolling.go create mode 100644 pkg/controller/machinedeployment/rolling_test.go create mode 100644 pkg/controller/machinedeployment/sync.go create mode 100644 pkg/controller/machinedeployment/sync_test.go create mode 100644 pkg/controller/machinedeployment/util/util.go create mode 100644 pkg/controller/machinedeployment/util/util_test.go diff --git a/pkg/controller/machinedeployment/controller.go b/pkg/controller/machinedeployment/controller.go index 50147b25b43f..29dc93425d62 100644 --- a/pkg/controller/machinedeployment/controller.go +++ b/pkg/controller/machinedeployment/controller.go @@ -1,4 +1,3 @@ - /* Copyright 2018 The Kubernetes Authors. @@ -15,41 +14,360 @@ See the License for the specific language governing permissions and limitations under the License. */ - package machinedeployment import ( - "log" + "fmt" + "reflect" + "github.com/golang/glog" "github.com/kubernetes-incubator/apiserver-builder/pkg/builders" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" + + "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" - "sigs.k8s.io/cluster-api/pkg/controller/sharedinformers" + "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" listers "sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/pkg/controller/sharedinformers" ) +// controllerKind contains the schema.GroupVersionKind for this controller type. +var controllerKind = v1alpha1.SchemeGroupVersion.WithKind("MachineDeployment") + // +controller:group=cluster,version=v1alpha1,kind=MachineDeployment,resource=machinedeployments type MachineDeploymentControllerImpl struct { builders.DefaultControllerFns + // machineClient a client that knows how to consume Machine resources + machineClient clientset.Interface + // lister indexes properties about MachineDeployment - lister listers.MachineDeploymentLister + mLister listers.MachineLister + mdLister listers.MachineDeploymentLister + msLister listers.MachineSetLister + + informers *sharedinformers.SharedInformers } // Init initializes the controller and is called by the generated code // Register watches for additional resource types here. func (c *MachineDeploymentControllerImpl) Init(arguments sharedinformers.ControllerInitArguments) { // Use the lister for indexing machinedeployments labels - c.lister = arguments.GetSharedInformers().Factory.Cluster().V1alpha1().MachineDeployments().Lister() + c.mLister = arguments.GetSharedInformers().Factory.Cluster().V1alpha1().Machines().Lister() + c.msLister = arguments.GetSharedInformers().Factory.Cluster().V1alpha1().MachineSets().Lister() + c.mdLister = arguments.GetSharedInformers().Factory.Cluster().V1alpha1().MachineDeployments().Lister() + + arguments.GetSharedInformers().Factory.Cluster().V1alpha1().MachineSets().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: c.addMachineSet, + UpdateFunc: c.updateMachineSet, + DeleteFunc: c.deleteMachineSet, + }) + + mc, err := clientset.NewForConfig(arguments.GetRestConfig()) + if err != nil { + glog.Fatalf("error building clientset for machineClient: %v", err) + } + c.machineClient = mc + + c.informers = arguments.GetSharedInformers() + + c.waitForCacheSync() } -// Reconcile handles enqueued messages +func (c *MachineDeploymentControllerImpl) waitForCacheSync() { + glog.Infof("Waiting for caches to sync for machine deployment controller") + stopCh := make(chan struct{}) + mListerSynced := c.informers.Factory.Cluster().V1alpha1().Machines().Informer().HasSynced + msListerSynced := c.informers.Factory.Cluster().V1alpha1().MachineSets().Informer().HasSynced + mdListerSynced := c.informers.Factory.Cluster().V1alpha1().MachineDeployments().Informer().HasSynced + if !cache.WaitForCacheSync(stopCh, mListerSynced, msListerSynced, mdListerSynced) { + glog.Warningf("Unable to sync caches for machine deployment controller") + return + } + glog.Infof("Caches are synced for machine deployment controller") +} + +func (c *MachineDeploymentControllerImpl) getMachineSetsForDeployment(d *v1alpha1.MachineDeployment) ([]*v1alpha1.MachineSet, error) { + // List all MachineSets to find those we own but that no longer match our + // selector. + msList, err := c.msLister.MachineSets(d.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + // TODO: flush out machine set adoption. + + var filteredMS []*v1alpha1.MachineSet + for _, ms := range msList { + if metav1.GetControllerOf(ms) == nil || (metav1.GetControllerOf(ms) != nil && !metav1.IsControlledBy(ms, d)) { + glog.V(4).Infof("%s not controlled by %v", ms.Name, d.Name) + continue + } + selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) + if err != nil { + glog.Errorf("Skipping machineset %v, failed to get label selector from spec selector.", ms.Name) + continue + } + // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() { + glog.Warningf("Skipping machineset %v as the selector is empty.", ms.Name) + continue + } + if !selector.Matches(labels.Set(ms.Labels)) { + glog.V(4).Infof("Skipping machineset %v, label mismatch.", ms.Name) + continue + } + filteredMS = append(filteredMS, ms) + } + return filteredMS, nil +} + +// Reconcile handles reconciling of machine deployment func (c *MachineDeploymentControllerImpl) Reconcile(u *v1alpha1.MachineDeployment) error { - // Implement controller logic here - log.Printf("Running reconcile MachineDeployment for %s\n", u.Name) - return nil + // Deep-copy otherwise we are mutating our cache. + d := u.DeepCopy() + + everything := metav1.LabelSelector{} + if reflect.DeepEqual(d.Spec.Selector, &everything) { + if d.Status.ObservedGeneration < d.Generation { + d.Status.ObservedGeneration = d.Generation + if _, err := c.machineClient.ClusterV1alpha1().MachineDeployments(d.Namespace).UpdateStatus(d); err != nil { + glog.Warningf("Failed to update status for deployment %v. %v", d.Name, err) + return err + } + } + return nil + } + + msList, err := c.getMachineSetsForDeployment(d) + if err != nil { + return err + } + + machineMap, err := c.getMachineMapForDeployment(d, msList) + if err != nil { + return err + } + + if d.DeletionTimestamp != nil { + return c.sync(d, msList, machineMap) + } + + if d.Spec.Paused { + return c.sync(d, msList, machineMap) + } + + switch d.Spec.Strategy.Type { + case common.RollingUpdateMachineDeploymentStrategyType: + return c.rolloutRolling(d, msList, machineMap) + } + return fmt.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type) } func (c *MachineDeploymentControllerImpl) Get(namespace, name string) (*v1alpha1.MachineDeployment, error) { - return c.lister.MachineDeployments(namespace).Get(name) + return c.mdLister.MachineDeployments(namespace).Get(name) +} + +// addMachineSet enqueues the deployment that manages a MachineSet when the MachineSet is created. +func (c *MachineDeploymentControllerImpl) addMachineSet(obj interface{}) { + ms := obj.(*v1alpha1.MachineSet) + + if ms.DeletionTimestamp != nil { + // On a restart of the controller manager, it's possible for an object to + // show up in a state that is already pending deletion. + c.deleteMachineSet(ms) + return + } + + // If it has a ControllerRef, that's all that matters. + if controllerRef := metav1.GetControllerOf(ms); controllerRef != nil { + d := c.resolveControllerRef(ms.Namespace, controllerRef) + if d == nil { + return + } + glog.V(4).Infof("MachineSet %s added for deployment %v.", ms.Name, d.Name) + c.enqueue(d) + return + } + + // Otherwise, it's an orphan. Get a list of all matching Deployments and sync + // them to see if anyone wants to adopt it. + mds := c.getMachineDeploymentsForMachineSet(ms) + if len(mds) == 0 { + return + } + glog.V(4).Infof("Orphan MachineSet %s added.", ms.Name) + for _, d := range mds { + c.enqueue(d) + } +} + +// getMachineDeploymentsForMachineSet returns a list of Deployments that potentially +// match a MachineSet. +func (c *MachineDeploymentControllerImpl) getMachineDeploymentsForMachineSet(ms *v1alpha1.MachineSet) []*v1alpha1.MachineDeployment { + if len(ms.Labels) == 0 { + glog.Warningf("no machine deployments found for MachineSet %v because it has no labels", ms.Name) + return nil + } + + dList, err := c.mdLister.MachineDeployments(ms.Namespace).List(labels.Everything()) + if err != nil { + glog.Warningf("failed to list machine deployments, %v", err) + return nil + } + + var deployments []*v1alpha1.MachineDeployment + for _, d := range dList { + selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) + if err != nil { + continue + } + // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(ms.Labels)) { + continue + } + deployments = append(deployments, d) + } + + return deployments +} + +// updateMachineSet figures out what deployment(s) manage a MachineSet when the MachineSet +// is updated and wake them up. If anything on the MachineSet has changed we need to +// reconcile it's current MachineDeployment. If the MachineSet's controller reference has +// changed, we must also reconcile it's old MachineDeployment. +func (c *MachineDeploymentControllerImpl) updateMachineSet(old, cur interface{}) { + curMS := cur.(*v1alpha1.MachineSet) + oldMS := old.(*v1alpha1.MachineSet) + if curMS.ResourceVersion == oldMS.ResourceVersion { + // Periodic resync will send update events for all known machine sets. + // Two different versions of the same machine set will always have different RVs. + return + } + + curControllerRef := metav1.GetControllerOf(curMS) + oldControllerRef := metav1.GetControllerOf(oldMS) + controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef) + if controllerRefChanged && oldControllerRef != nil { + // The ControllerRef was changed. Sync the old controller, if any. + if d := c.resolveControllerRef(oldMS.Namespace, oldControllerRef); d != nil { + c.enqueue(d) + } + } + + // If it has a ControllerRef, that's all that matters. + if curControllerRef != nil { + d := c.resolveControllerRef(curMS.Namespace, curControllerRef) + if d == nil { + return + } + glog.V(4).Infof("MachineSet %s updated.", curMS.Name) + c.enqueue(d) + return + } + + // Otherwise, it's an orphan. If anything changed, sync matching controllers + // to see if anyone wants to adopt it now. + labelChanged := !reflect.DeepEqual(curMS.Labels, oldMS.Labels) + if labelChanged || controllerRefChanged { + mds := c.getMachineDeploymentsForMachineSet(curMS) + if len(mds) == 0 { + return + } + glog.V(4).Infof("Orphan MachineSet %s updated.", curMS.Name) + for _, d := range mds { + c.enqueue(d) + } + } +} + +// deleteMachineSet enqueues the deployment that manages a MachineSet when +// the MachineSet is deleted. +func (c *MachineDeploymentControllerImpl) deleteMachineSet(obj interface{}) { + ms := obj.(*v1alpha1.MachineSet) + + controllerRef := metav1.GetControllerOf(ms) + if controllerRef == nil { + // No controller should care about orphans being deleted. + return + } + d := c.resolveControllerRef(ms.Namespace, controllerRef) + if d == nil { + return + } + glog.V(4).Infof("MachineSet %s deleted.", ms.Name) + c.enqueue(d) +} + +// resolveControllerRef returns the controller referenced by a ControllerRef, +// or nil if the ControllerRef could not be resolved to a matching controller +// of the correct Kind. +func (c *MachineDeploymentControllerImpl) resolveControllerRef(namespace string, controllerRef *metav1.OwnerReference) *v1alpha1.MachineDeployment { + // We can't look up by UID, so look up by Name and then verify UID. + // Don't even try to look up by Name if it's the wrong Kind. + if controllerRef.Kind != controllerKind.Kind { + glog.Warningf("Failed to get machine deployment, controller ref had unexpected kind %v, expected %v", controllerRef.Kind, controllerKind.Kind) + return nil + } + d, err := c.mdLister.MachineDeployments(namespace).Get(controllerRef.Name) + if err != nil { + glog.Warningf("Failed to get machine deployment with name %v", controllerRef.Name) + return nil + } + if d.UID != controllerRef.UID { + // The controller we found with this Name is not the same one that the + // ControllerRef points to. + glog.Warningf("Failed to get machine deployment, UID mismatch. controller ref UID: %v, found machine deployment UID: %v", controllerRef.UID, d.UID) + return nil + } + return d +} + +// getMachineMapForDeployment returns the Machines managed by a Deployment. +// +// It returns a map from MachineSet UID to a list of Machines controlled by that MS, +// according to the Machine's ControllerRef. +func (c *MachineDeploymentControllerImpl) getMachineMapForDeployment(d *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet) (map[types.UID]*v1alpha1.MachineList, error) { + // Get all Machines that potentially belong to this Deployment. + selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) + if err != nil { + return nil, err + } + machines, err := c.mLister.Machines(d.Namespace).List(selector) + if err != nil { + return nil, err + } + // Group Machines by their controller (if it's in msList). + machineMap := make(map[types.UID]*v1alpha1.MachineList, len(msList)) + for _, ms := range msList { + machineMap[ms.UID] = &v1alpha1.MachineList{} + } + for _, machine := range machines { + // Do not ignore inactive Machines because Recreate Deployments need to verify that no + // Machines from older versions are running before spinning up new Machines. + controllerRef := metav1.GetControllerOf(machine) + if controllerRef == nil { + continue + } + // Only append if we care about this UID. + if machineList, ok := machineMap[controllerRef.UID]; ok { + machineList.Items = append(machineList.Items, *machine) + } + } + return machineMap, nil +} + +func (c *MachineDeploymentControllerImpl) enqueue(d *v1alpha1.MachineDeployment) { + key, err := cache.MetaNamespaceKeyFunc(d) + if err != nil { + utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %#v: %v", d, err)) + return + } + + c.informers.WorkerQueues["MachineDeployment"].Queue.Add(key) } diff --git a/pkg/controller/machinedeployment/controller_test.go b/pkg/controller/machinedeployment/controller_test.go new file mode 100644 index 000000000000..06bb5fcf9905 --- /dev/null +++ b/pkg/controller/machinedeployment/controller_test.go @@ -0,0 +1,908 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machinedeployment + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "sync" + "testing" + "time" + + controllerlib "github.com/kubernetes-incubator/apiserver-builder/pkg/controller" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + core "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" + + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apimachinery/pkg/util/uuid" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1/testutil" + "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" + "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake" + v1alpha1listers "sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/pkg/controller/sharedinformers" +) + +var ( + noTimestamp = metav1.Time{} +) + +func ms(name string, replicas int, selector map[string]string, timestamp metav1.Time) *v1alpha1.MachineSet { + return &v1alpha1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + CreationTimestamp: timestamp, + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.MachineSetSpec{ + Replicas: func() *int32 { i := int32(replicas); return &i }(), + Selector: metav1.LabelSelector{MatchLabels: selector}, + Template: v1alpha1.MachineTemplateSpec{}, + }, + } +} + +func newMSWithStatus(name string, specReplicas, statusReplicas int, selector map[string]string) *v1alpha1.MachineSet { + ms := ms(name, specReplicas, selector, noTimestamp) + ms.Status = v1alpha1.MachineSetStatus{ + Replicas: int32(statusReplicas), + } + return ms +} + +func newMachineDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *v1alpha1.MachineDeployment { + localReplicas := int32(replicas) + localMinReadySeconds := int32(300) + defaultMaxSurge := intstr.FromInt(0) + defaultMaxUnavailable := intstr.FromInt(0) + + d := v1alpha1.MachineDeployment{ + TypeMeta: metav1.TypeMeta{APIVersion: "v1alpha1/v1beta1"}, + ObjectMeta: metav1.ObjectMeta{ + UID: uuid.NewUUID(), + Name: name, + Namespace: metav1.NamespaceDefault, + Annotations: make(map[string]string), + }, + Spec: v1alpha1.MachineDeploymentSpec{ + Strategy: v1alpha1.MachineDeploymentStrategy{ + Type: common.RollingUpdateMachineDeploymentStrategyType, + RollingUpdate: &v1alpha1.MachineRollingUpdateDeployment{ + MaxUnavailable: &defaultMaxUnavailable, + MaxSurge: &defaultMaxSurge, + }, + }, + Replicas: &localReplicas, + Selector: metav1.LabelSelector{MatchLabels: selector}, + Template: v1alpha1.MachineTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: selector, + }, + Spec: v1alpha1.MachineSpec{}, + }, + MinReadySeconds: &localMinReadySeconds, + RevisionHistoryLimit: revisionHistoryLimit, + }, + } + if maxSurge != nil { + d.Spec.Strategy.RollingUpdate.MaxSurge = maxSurge + } + if maxUnavailable != nil { + d.Spec.Strategy.RollingUpdate.MaxUnavailable = maxUnavailable + } + return &d +} + +func newMachineSet(d *v1alpha1.MachineDeployment, name string, replicas int) *v1alpha1.MachineSet { + return &v1alpha1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + UID: uuid.NewUUID(), + Namespace: metav1.NamespaceDefault, + Labels: d.Spec.Selector.MatchLabels, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)}, + }, + Spec: v1alpha1.MachineSetSpec{ + Selector: d.Spec.Selector, + Replicas: func() *int32 { i := int32(replicas); return &i }(), + Template: d.Spec.Template, + }, + } +} + +func newMinimalMachineSet(name string, replicas int) *v1alpha1.MachineSet { + return &v1alpha1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + UID: uuid.NewUUID(), + Namespace: metav1.NamespaceDefault, + }, + Spec: v1alpha1.MachineSetSpec{ + Replicas: func() *int32 { i := int32(replicas); return &i }(), + }, + } +} + +func addDeploymentProperties(d *v1alpha1.MachineDeployment, ms *v1alpha1.MachineSet) *v1alpha1.MachineSet { + ms.ObjectMeta.Labels = d.Spec.Selector.MatchLabels + ms.ObjectMeta.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)} + ms.Spec.Selector = d.Spec.Selector + ms.Spec.Template = d.Spec.Template + return ms +} + +func machineDeploymentControllerReconcile(t *testing.T, cs *clientset.Clientset, controller *MachineDeploymentController) { + instance := v1alpha1.MachineDeployment{} + instance.Name = "instance-1" + replicas := int32(0) + minReadySeconds := int32(0) + instance.Spec.Replicas = &replicas + instance.Spec.MinReadySeconds = &minReadySeconds + instance.Spec.Selector = metav1.LabelSelector{MatchLabels: map[string]string{"foo": "barr"}} + instance.Spec.Template.Labels = map[string]string{"foo": "barr"} + + expectedKey := "default/instance-1" + + // When creating a new object, it should invoke the reconcile method. + cluster := testutil.GetVanillaCluster() + cluster.Name = "cluster-1" + if _, err := cs.ClusterV1alpha1().Clusters(metav1.NamespaceDefault).Create(&cluster); err != nil { + t.Fatal(err) + } + client := cs.ClusterV1alpha1().MachineDeployments(metav1.NamespaceDefault) + before := make(chan struct{}) + after := make(chan struct{}) + var aftOnce, befOnce sync.Once + + actualKey := "" + var actualErr error + + // Setup test callbacks to be called when the message is reconciled. + // Sometimes reconcile is called multiple times, so use Once to prevent closing the channels again. + controller.BeforeReconcile = func(key string) { + actualKey = key + befOnce.Do(func() { close(before) }) + } + controller.AfterReconcile = func(key string, err error) { + actualKey = key + actualErr = err + aftOnce.Do(func() { close(after) }) + } + + // Create an instance + if _, err := client.Create(&instance); err != nil { + t.Fatal(err) + } + defer client.Delete(instance.Name, &metav1.DeleteOptions{}) + + // Verify reconcile function is called against the correct key + select { + case <-before: + if actualKey != expectedKey { + t.Fatalf( + "Reconcile function was not called with the correct key.\nActual:\t%+v\nExpected:\t%+v", + actualKey, expectedKey) + } + if actualErr != nil { + t.Fatal(actualErr) + } + case <-time.After(time.Second * 2): + t.Fatalf("reconcile never called") + } + + select { + case <-after: + if actualKey != expectedKey { + t.Fatalf( + "Reconcile function was not called with the correct key.\nActual:\t%+v\nExpected:\t%+v", + actualKey, expectedKey) + } + if actualErr != nil { + t.Fatal(actualErr) + } + case <-time.After(time.Second * 2): + t.Fatalf("reconcile never finished") + } +} + +type fixture struct { + t *testing.T + + client *fake.Clientset + // Objects to put in the store. + dLister []*v1alpha1.MachineDeployment + msLister []*v1alpha1.MachineSet + machineLister []*v1alpha1.Machine + + // Actions expected to happen on the client. Objects from here are also + // preloaded into NewSimpleFake. + actions []core.Action + objects []runtime.Object +} + +func (f *fixture) expectGetDeploymentAction(d *v1alpha1.MachineDeployment) { + action := core.NewGetAction(schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machinedeployments"}, d.Namespace, d.Name) + f.actions = append(f.actions, action) +} + +func (f *fixture) expectUpdateDeploymentStatusAction(d *v1alpha1.MachineDeployment) { + action := core.NewUpdateAction(schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machinedeployments"}, d.Namespace, d) + action.Subresource = "status" + f.actions = append(f.actions, action) +} + +func (f *fixture) expectUpdateDeploymentAction(d *v1alpha1.MachineDeployment) { + action := core.NewUpdateAction(schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machinedeployments"}, d.Namespace, d) + f.actions = append(f.actions, action) +} + +func (f *fixture) expectCreateMSAction(ms *v1alpha1.MachineSet) { + f.actions = append(f.actions, core.NewCreateAction(schema.GroupVersionResource{Group: "cluster.k8s.io", Version: "v1alpha1", Resource: "machinesets"}, ms.Namespace, ms)) +} + +func newFixture(t *testing.T) *fixture { + f := &fixture{} + f.t = t + f.objects = []runtime.Object{} + return f +} + +func (f *fixture) newController() *MachineDeploymentControllerImpl { + machineIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + machineLister := v1alpha1listers.NewMachineLister(machineIndexer) + machineSetIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + machineSetLister := v1alpha1listers.NewMachineSetLister(machineSetIndexer) + machineDeploymentIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + machineDeploymentLister := v1alpha1listers.NewMachineDeploymentLister(machineDeploymentIndexer) + + fakeClient := fake.NewSimpleClientset(f.objects...) + f.client = fakeClient + controller := &MachineDeploymentControllerImpl{} + controller.machineClient = fakeClient + controller.mdLister = machineDeploymentLister + controller.msLister = machineSetLister + controller.mLister = machineLister + controller.informers = &sharedinformers.SharedInformers{} + controller.informers.WorkerQueues = map[string]*controllerlib.QueueWorker{} + queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "MachineDeployment") + controller.informers.WorkerQueues["MachineDeployment"] = &controllerlib.QueueWorker{queue, 10, "MachineDeployment", nil} + + for _, d := range f.dLister { + err := machineDeploymentIndexer.Add(d) + if err != nil { + f.t.Fatal(err) + } + } + for _, ms := range f.msLister { + err := machineSetIndexer.Add(ms) + if err != nil { + f.t.Fatal(err) + } + } + for _, machine := range f.machineLister { + err := machineIndexer.Add(machine) + if err != nil { + f.t.Fatal(err) + } + } + return controller +} + +func (f *fixture) runExpectError(deploymentName *v1alpha1.MachineDeployment, startInformers bool) { + f.runParams(deploymentName, startInformers, true) +} + +func (f *fixture) run(deploymentName *v1alpha1.MachineDeployment) { + f.runParams(deploymentName, true, false) +} + +func (f *fixture) runParams(deploymentName *v1alpha1.MachineDeployment, startInformers bool, expectError bool) { + c := f.newController() + + err := c.Reconcile(deploymentName) + if !expectError && err != nil { + f.t.Errorf("error syncing deployment: %v", err) + } else if expectError && err == nil { + f.t.Error("expected error syncing deployment, got nil") + } + + actions := filterInformerActions(f.client.Actions()) + + if len(actions) != len(f.actions) { + f.t.Errorf("Got %d actions, expected %d actions", len(actions), len(f.actions)) + } + + for i, action := range actions { + if len(f.actions) < i+1 { + f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:]) + break + } + + expectedAction := f.actions[i] + if !(expectedAction.Matches(action.GetVerb(), action.GetResource().Resource) && action.GetSubresource() == expectedAction.GetSubresource()) { + f.t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expectedAction, action) + continue + } + } + + if len(f.actions) > len(actions) { + f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):]) + } +} + +func filterInformerActions(actions []core.Action) []core.Action { + ret := []core.Action{} + for _, action := range actions { + if len(action.GetNamespace()) == 0 && + (action.Matches("list", "machines") || + action.Matches("list", "machinedeployments") || + action.Matches("list", "machinesets") || + action.Matches("watch", "machines") || + action.Matches("watch", "machinedeployments") || + action.Matches("watch", "machinesets")) { + continue + } + ret = append(ret, action) + } + + return ret +} + +func TestSyncDeploymentCreatesMachineSet(t *testing.T) { + f := newFixture(t) + + d := newMachineDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) + f.dLister = append(f.dLister, d) + f.objects = append(f.objects, d) + + ms := newMachineSet(d, "randomName", 1) + + f.expectCreateMSAction(ms) + f.expectUpdateDeploymentStatusAction(d) + + f.run(d) +} + +func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) { + f := newFixture(t) + + d := newMachineDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) + now := metav1.Now() + d.DeletionTimestamp = &now + f.dLister = append(f.dLister, d) + f.objects = append(f.objects, d) + + f.run(d) +} + +func TestGetMachineSetsForDeployment(t *testing.T) { + tests := []struct { + name string + noDeploymentSelector bool + diffCtrlRef bool + diffLabels bool + noCtrlRef bool + expectedMachineSets int + }{ + { + name: "scenario 1. machine set returned.", + expectedMachineSets: 1, + }, + { + name: "scenario 2. machine set with diff controller ref, machine set not returned.", + diffCtrlRef: true, + expectedMachineSets: 0, + }, + { + name: "scenario 3. deployment with no selector, machine set not returned.", + noDeploymentSelector: true, + expectedMachineSets: 0, + }, + { + name: "scenario 4. machine set with non-matching labels not returned.", + diffLabels: true, + expectedMachineSets: 0, + }, + { + name: "scenario 5. machine set with no controller ref, machine set not returned.", + noCtrlRef: true, + expectedMachineSets: 0, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Log(test.name) + d1 := newMachineDeployment("d1", 1, nil, nil, nil, map[string]string{"foo": "bar"}) + d2 := newMachineDeployment("d2", 1, nil, nil, nil, map[string]string{"foo": "bar2"}) + ms := newMachineSet(d1, "ms", 1) + + if test.diffCtrlRef { + ms.ObjectMeta.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(d2, controllerKind)} + } + if test.noDeploymentSelector { + d1.Spec.Selector = metav1.LabelSelector{} + } + if test.diffLabels { + ms.ObjectMeta.Labels = map[string]string{"foo": "bar2"} + } + if test.noCtrlRef { + ms.ObjectMeta.OwnerReferences = nil + } + + f := newFixture(t) + + f.dLister = append(f.dLister, d1, d2) + f.msLister = append(f.msLister, ms) + f.objects = append(f.objects, d1, d2, ms) + + c := f.newController() + + msList, err := c.getMachineSetsForDeployment(d1) + if err != nil { + t.Fatalf("unexpected err calling getMachineSetsForDeployment, %v", err) + } + if test.expectedMachineSets != len(msList) { + t.Fatalf("got %v machine sets, expected %v machine sets, %v", len(msList), test.expectedMachineSets, msList) + } + }) + } +} + +func hasExpectedMachineNames(t *testing.T, mList *v1alpha1.MachineList, msName string, numMSMachines int) { + var names, expectedNames []string + for _, m := range mList.Items { + names = append(names, m.Name) + } + sort.Strings(names) + + for i := 0; i < numMSMachines; i++ { + expectedNames = append(expectedNames, fmt.Sprintf("%v-machine-%v", msName, i)) + } + if !reflect.DeepEqual(names, expectedNames) { + t.Fatalf("got %v machine names, expected %v machine names for %v", names, expectedNames, msName) + } +} + +func TestGetMachineMapForMachineSets(t *testing.T) { + tests := []struct { + name string + numMS1Machines int + numMS2Machines int + addMachineWithNoCtrlRef bool + expectedUIDs int + }{ + { + name: "scenario 1. multiple machine sets, one populated, one empty", + numMS1Machines: 3, + numMS2Machines: 0, + expectedUIDs: 2, + }, + { + name: "scenario 2. multiple machine sets, two populated", + numMS1Machines: 3, + numMS2Machines: 2, + expectedUIDs: 2, + }, + { + name: "scenario 3. multiple machine sets, both empty", + numMS1Machines: 0, + numMS2Machines: 0, + expectedUIDs: 2, + }, + { + name: "scenario 4. skip machine with no controller ref.", + numMS1Machines: 3, + numMS2Machines: 2, + addMachineWithNoCtrlRef: true, + expectedUIDs: 2, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Log(test.name) + d1 := newMachineDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) + ms1 := newMachineSet(d1, "ms1", 1) + ms2 := newMachineSet(d1, "ms2", 1) + + f := newFixture(t) + + for i := 0; i < test.numMS1Machines; i++ { + m := generateMachineFromMS(ms1, i) + f.machineLister = append(f.machineLister, m) + f.objects = append(f.objects, m) + } + + for i := 0; i < test.numMS2Machines; i++ { + m := generateMachineFromMS(ms2, i) + f.machineLister = append(f.machineLister, m) + f.objects = append(f.objects, m) + } + + f.dLister = append(f.dLister, d1) + f.msLister = append(f.msLister, ms1, ms2) + f.objects = append(f.objects, d1, ms1, ms2) + + if test.addMachineWithNoCtrlRef { + m := generateMachineFromMS(ms1, 99) + m.ObjectMeta.OwnerReferences = nil + f.machineLister = append(f.machineLister, m) + f.objects = append(f.objects, m) + } + + c := f.newController() + + machineMap, err := c.getMachineMapForDeployment(d1, f.msLister) + if err != nil { + t.Fatalf("getMachineMapForDeployment() error: %v", err) + } + + if test.expectedUIDs != len(machineMap) { + t.Fatalf("got %v machine set UIDs, expected %v machine set UIDs", test.expectedUIDs, len(machineMap)) + } + + if test.numMS1Machines != len(machineMap[ms1.UID].Items) { + t.Fatalf("got %v machines, expected %v machines for ms1", test.numMS1Machines, len(machineMap[ms1.UID].Items)) + } + if test.numMS2Machines != len(machineMap[ms2.UID].Items) { + t.Fatalf("got %v machines, expected %v machines for ms2", test.numMS2Machines, len(machineMap[ms2.UID].Items)) + } + + hasExpectedMachineNames(t, machineMap[ms1.UID], ms1.Name, test.numMS1Machines) + hasExpectedMachineNames(t, machineMap[ms2.UID], ms2.Name, test.numMS2Machines) + }) + } +} + +func getMachineSetActions(actions []core.Action) []core.Action { + var filteredActions []core.Action + for _, action := range actions { + if action.GetResource().Resource == "machinesets" { + filteredActions = append(filteredActions, action) + } + } + return filteredActions +} + +func TestAddMachineSet(t *testing.T) { + tests := []struct { + name string + stripOwnerRef bool + ownerDoesntExist bool + isDeleting bool + diffLabel bool + expectCreation bool + }{ + { + name: "scenario 1. machine set with controller ref.", + expectCreation: true, + }, + { + name: "scenario 2. machine set with no controller ref.", + stripOwnerRef: true, + expectCreation: true, + }, + { + name: "scenario 3. machine set that is being deleted.", + stripOwnerRef: true, + isDeleting: true, + expectCreation: false, + }, + { + name: "scenario 4. machine set with controller ref that controller doesn't exist.", + ownerDoesntExist: true, + expectCreation: false, + }, + { + name: "scenario 5. machine set with no controller ref, no matching deployment.", + stripOwnerRef: true, + diffLabel: true, + expectCreation: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Log(test.name) + d := newMachineDeployment("d", 1, nil, nil, nil, map[string]string{"foo": "bar"}) + ms := newMachineSet(d, "ms", 1) + + if test.stripOwnerRef { + ms.ObjectMeta.OwnerReferences = nil + } + if test.isDeleting { + now := metav1.Now() + ms.DeletionTimestamp = &now + } + if test.diffLabel { + ms.ObjectMeta.Labels = map[string]string{"foo": "bar2"} + } + + f := newFixture(t) + + if !test.ownerDoesntExist { + f.dLister = append(f.dLister, d) + } + f.objects = append(f.objects, d, ms) + + c := f.newController() + + c.addMachineSet(ms) + + queue := c.informers.WorkerQueues["MachineDeployment"].Queue + + if !test.expectCreation { + if queue.Len() != 0 { + t.Fatalf("got %d queued items, expected %d queued items", queue.Len(), 0) + } + } + if test.expectCreation { + if queue.Len() != 1 { + t.Fatalf("got %d queued items, expected %d queued items", queue.Len(), 1) + } + verifyQueuedKey(t, queue, d) + } + if test.isDeleting { + if queue.Len() != 0 { + t.Fatalf("got %d queued items, expected %d queued items", queue.Len(), 0) + } + + } + + }) + } +} + +func TestUpdateMachineSet(t *testing.T) { + tests := []struct { + name string + sameResourceVersion bool + noOldCtrlRef bool + noNewCtrlRef bool + diffNewCtrlRef bool + diffNewCtrlExists bool + diffNewLabels bool + expectOldReconcile bool + expectNewReconcile bool + }{ + { + name: "scenario 1. same resource version, no-op", + sameResourceVersion: true, + }, + { + name: "scenario 2. no change to controller ref, queue old controller ref", + expectOldReconcile: true, + }, + { + name: "scenario 3. old controller ref to new different controller ref that exists, queue old controller ref and new controller ref", + diffNewCtrlRef: true, + diffNewCtrlExists: true, + expectOldReconcile: true, + expectNewReconcile: true, + }, + { + name: "scenario 4. old controller ref to new different controller ref that doesn't exist, queue old controller ref", + diffNewCtrlRef: true, + diffNewCtrlExists: false, + expectOldReconcile: true, + }, + { + name: "scenario 5. no old controller ref, to new controller ref exists, queue new controller ref", + noOldCtrlRef: true, + diffNewCtrlRef: true, + diffNewCtrlExists: true, + expectNewReconcile: true, + }, + { + name: "scenario 6. no old controller ref, to new controller ref doesn't exist, no controller ref to queue", + noOldCtrlRef: true, + diffNewCtrlRef: true, + diffNewCtrlExists: false, + }, + { + name: "scenario 7. old controller ref, to no new controller ref, orphaned, queue old controller ref", + noNewCtrlRef: true, + expectOldReconcile: true, + }, + { + name: "scenario 8. no controller ref, to no new controller ref, no controller ref to queue", + noOldCtrlRef: true, + noNewCtrlRef: true, + }, + { + name: "scenario 9. no controller ref, to no new controller ref, label change, found deployment by label, queue found deployment", + noOldCtrlRef: true, + noNewCtrlRef: true, + diffNewCtrlExists: true, + diffNewLabels: true, + expectNewReconcile: true, + }, + { + name: "scenario 10. no controller ref, to no new controller ref, label change, deployment not found, no deployment to queue", + noOldCtrlRef: true, + noNewCtrlRef: true, + diffNewCtrlExists: false, + diffNewLabels: true, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Log(test.name) + + d1 := newMachineDeployment("d1", 1, nil, nil, nil, map[string]string{"foo": "bar"}) + d2 := newMachineDeployment("d2", 1, nil, nil, nil, map[string]string{"foo": "bar2"}) + oriMS := newMachineSet(d1, "ms", 1) + + oldMS := *oriMS + newMS := *oriMS + + if !test.sameResourceVersion { + bumpResourceVersion(&newMS) + } + if test.noOldCtrlRef { + oldMS.ObjectMeta.OwnerReferences = nil + } + if test.noNewCtrlRef { + newMS.ObjectMeta.OwnerReferences = nil + } + if test.diffNewCtrlRef { + newMS.ObjectMeta.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(d2, controllerKind)} + } + if test.diffNewLabels { + newMS.ObjectMeta.Labels = map[string]string{"foo": "bar2"} + } + + f := newFixture(t) + + f.dLister = append(f.dLister, d1) + if test.diffNewCtrlExists { + f.dLister = append(f.dLister, d2) + } + f.msLister = append(f.msLister, &oldMS) + f.objects = append(f.objects, d1, &oldMS) + + c := f.newController() + + c.updateMachineSet(&oldMS, &newMS) + + expectedReconcileCount := 0 + if test.expectOldReconcile { + expectedReconcileCount++ + } + if test.expectNewReconcile { + expectedReconcileCount++ + } + + queue := c.informers.WorkerQueues["MachineDeployment"].Queue + + if queue.Len() != expectedReconcileCount { + t.Fatalf("got %d queued items, expected %d queued items", queue.Len(), expectedReconcileCount) + } + if test.expectOldReconcile { + verifyQueuedKey(t, queue, d1) + } + if test.expectNewReconcile { + verifyQueuedKey(t, queue, d2) + } + }) + } +} + +func TestDeleteMachineSet(t *testing.T) { + tests := []struct { + name string + stripOwnerRef bool + ownerDoesntExist bool + expectDelete bool + }{ + { + name: "scenario 1. has controller ref that exists", + expectDelete: true, + }, + { + name: "scenario 2. has controller ref that does not exist", + ownerDoesntExist: true, + expectDelete: false, + }, + { + name: "scenario 3. no controller ref", + stripOwnerRef: true, + expectDelete: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Log(test.name) + d := newMachineDeployment("foo", 1, nil, nil, nil, map[string]string{"foo": "bar"}) + ms := newMachineSet(d, "ms", 1) + + if test.stripOwnerRef { + ms.ObjectMeta.OwnerReferences = nil + } + + f := newFixture(t) + + if !test.ownerDoesntExist { + f.dLister = append(f.dLister, d) + } + f.objects = append(f.objects, d, ms) + + c := f.newController() + + c.deleteMachineSet(ms) + + queue := c.informers.WorkerQueues["MachineDeployment"].Queue + + if !test.expectDelete { + if queue.Len() != 0 { + t.Fatalf("got %d queued items, expected %d queued items", queue.Len(), 0) + } + } + if test.expectDelete { + if queue.Len() != 1 { + t.Fatalf("got %d queued items, expected %d queued items", queue.Len(), 1) + } + verifyQueuedKey(t, queue, d) + } + }) + } +} + +func bumpResourceVersion(obj metav1.Object) { + ver, _ := strconv.ParseInt(obj.GetResourceVersion(), 10, 32) + obj.SetResourceVersion(strconv.FormatInt(ver+1, 10)) +} + +// generateMachineFromMS creates a machine, with the input MachineSet's selector and its template +func generateMachineFromMS(ms *v1alpha1.MachineSet, count int) *v1alpha1.Machine { + trueVar := true + return &v1alpha1.Machine{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%v-machine-%v", ms.Name, count), + Namespace: ms.Namespace, + Labels: ms.Spec.Selector.MatchLabels, + OwnerReferences: []metav1.OwnerReference{ + {UID: ms.UID, APIVersion: "v1alpha1", Kind: "MachineSet", Name: ms.Name, Controller: &trueVar}, + }, + }, + Spec: ms.Spec.Template.Spec, + } +} + +func verifyQueuedKey(t *testing.T, queue workqueue.RateLimitingInterface, d *v1alpha1.MachineDeployment) { + key, done := queue.Get() + if key == nil || done { + t.Fatalf("failed to enqueue controller.") + } + expectedKey, err := cache.MetaNamespaceKeyFunc(d) + if err != nil { + t.Fatalf("failed to get key for deployment.") + } + if expectedKey != key { + t.Fatalf("got %v key, expected %v key", key, expectedKey) + } +} diff --git a/pkg/controller/machinedeployment/machinedeployment_suite_test.go b/pkg/controller/machinedeployment/machinedeployment_suite_test.go new file mode 100644 index 000000000000..9699d6f3cac4 --- /dev/null +++ b/pkg/controller/machinedeployment/machinedeployment_suite_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machinedeployment + +import ( + "testing" + + "github.com/kubernetes-incubator/apiserver-builder/pkg/test" + + "sigs.k8s.io/cluster-api/pkg/apis" + "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset" + "sigs.k8s.io/cluster-api/pkg/controller/sharedinformers" + "sigs.k8s.io/cluster-api/pkg/openapi" +) + +func TestMachineDeploymentSet(t *testing.T) { + testenv := test.NewTestEnvironment() + config := testenv.Start(apis.GetAllApiBuilders(), openapi.GetOpenAPIDefinitions) + cs := clientset.NewForConfigOrDie(config) + + shutdown := make(chan struct{}) + si := sharedinformers.NewSharedInformers(config, shutdown) + controller := NewMachineDeploymentController(config, si) + controller.Run(shutdown) + + t.Run("machineDeploymentControllerReconcile", func(t *testing.T) { + machineDeploymentControllerReconcile(t, cs, controller) + }) + + close(shutdown) + testenv.Stop() +} diff --git a/pkg/controller/machinedeployment/rolling.go b/pkg/controller/machinedeployment/rolling.go new file mode 100644 index 000000000000..429150613225 --- /dev/null +++ b/pkg/controller/machinedeployment/rolling.go @@ -0,0 +1,266 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machinedeployment + +import ( + "fmt" + "sort" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/integer" + + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + dutil "sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util" +) + +// rolloutRolling implements the logic for rolling a new machine set. +func (dc *MachineDeploymentControllerImpl) rolloutRolling(d *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet, machineMap map[types.UID]*v1alpha1.MachineList) error { + newMS, oldMSs, err := dc.getAllMachineSetsAndSyncRevision(d, msList, machineMap, true) + if err != nil { + return err + } + allMSs := append(oldMSs, newMS) + + // Scale up, if we can. + scaledUp, err := dc.reconcileNewMachineSet(allMSs, newMS, d) + if err != nil { + return err + } + if scaledUp { + // TODO: update deployment status for deployment progress + return nil + } + + // Scale down, if we can. + scaledDown, err := dc.reconcileOldMachineSets(allMSs, dutil.FilterActiveMachineSets(oldMSs), newMS, d) + if err != nil { + return err + } + if scaledDown { + // TODO: update deployment status for deployment progress + return nil + } + + if dutil.DeploymentComplete(d, &d.Status) { + if err := dc.cleanupDeployment(oldMSs, d); err != nil { + return err + } + } + + // TODO: update deployment status for deployment progress + return nil +} + +func (dc *MachineDeploymentControllerImpl) reconcileNewMachineSet(allMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) (bool, error) { + if deployment.Spec.Replicas == nil { + return false, fmt.Errorf("spec replicas for deployment set %v is nil, this is unexpected", deployment.Name) + } + if newMS.Spec.Replicas == nil { + return false, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", newMS.Name) + } + + if *(newMS.Spec.Replicas) == *(deployment.Spec.Replicas) { + // Scaling not required. + return false, nil + } + if *(newMS.Spec.Replicas) > *(deployment.Spec.Replicas) { + // Scale down. + scaled, _, err := dc.scaleMachineSet(newMS, *(deployment.Spec.Replicas), deployment) + return scaled, err + } + newReplicasCount, err := dutil.NewMSNewReplicas(deployment, allMSs, newMS) + if err != nil { + return false, err + } + scaled, _, err := dc.scaleMachineSet(newMS, newReplicasCount, deployment) + return scaled, err +} + +func (dc *MachineDeploymentControllerImpl) reconcileOldMachineSets(allMSs []*v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) (bool, error) { + if deployment.Spec.Replicas == nil { + return false, fmt.Errorf("spec replicas for deployment set %v is nil, this is unexpected", deployment.Name) + } + if newMS.Spec.Replicas == nil { + return false, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", newMS.Name) + } + + oldMachinesCount := dutil.GetReplicaCountForMachineSets(oldMSs) + if oldMachinesCount == 0 { + // Can't scale down further + return false, nil + } + + allMachinesCount := dutil.GetReplicaCountForMachineSets(allMSs) + glog.V(4).Infof("New machine set %s/%s has %d available machines.", newMS.Namespace, newMS.Name, newMS.Status.AvailableReplicas) + maxUnavailable := dutil.MaxUnavailable(*deployment) + + // Check if we can scale down. We can scale down in the following 2 cases: + // * Some old machine sets have unhealthy replicas, we could safely scale down those unhealthy replicas since that won't further + // increase unavailability. + // * New machine set has scaled up and it's replicas becomes ready, then we can scale down old machine sets in a further step. + // + // maxScaledDown := allMachinesCount - minAvailable - newMachineSetMachinesUnavailable + // take into account not only maxUnavailable and any surge machines that have been created, but also unavailable machines from + // the newMS, so that the unavailable machines from the newMS would not make us scale down old machine sets in a further + // step(that will increase unavailability). + // + // Concrete example: + // + // * 10 replicas + // * 2 maxUnavailable (absolute number, not percent) + // * 3 maxSurge (absolute number, not percent) + // + // case 1: + // * Deployment is updated, newMS is created with 3 replicas, oldMS is scaled down to 8, and newMS is scaled up to 5. + // * The new machine set machines crashloop and never become available. + // * allMachinesCount is 13. minAvailable is 8. newMSMachinesUnavailable is 5. + // * A node fails and causes one of the oldMS machines to become unavailable. However, 13 - 8 - 5 = 0, so the oldMS won't be scaled down. + // * The user notices the crashloop and does kubectl rollout undo to rollback. + // * newMSMachinesUnavailable is 1, since we rolled back to the good machine set, so maxScaledDown = 13 - 8 - 1 = 4. 4 of the crashlooping machines will be scaled down. + // * The total number of machines will then be 9 and the newMS can be scaled up to 10. + // + // case 2: + // Same example, but pushing a new machine template instead of rolling back (aka "roll over"): + // * The new machine set created must start with 0 replicas because allMachinesCount is already at 13. + // * However, newMSMachinesUnavailable would also be 0, so the 2 old machine sets could be scaled down by 5 (13 - 8 - 0), which would then + // allow the new machine set to be scaled up by 5. + minAvailable := *(deployment.Spec.Replicas) - maxUnavailable + newMSUnavailableMachineCount := *(newMS.Spec.Replicas) - newMS.Status.AvailableReplicas + maxScaledDown := allMachinesCount - minAvailable - newMSUnavailableMachineCount + if maxScaledDown <= 0 { + return false, nil + } + + // Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment + // and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737 + oldMSs, cleanupCount, err := dc.cleanupUnhealthyReplicas(oldMSs, deployment, maxScaledDown) + if err != nil { + return false, nil + } + glog.V(4).Infof("Cleaned up unhealthy replicas from old MSes by %d", cleanupCount) + + // Scale down old machine sets, need check maxUnavailable to ensure we can scale down + allMSs = append(oldMSs, newMS) + scaledDownCount, err := dc.scaleDownOldMachineSetsForRollingUpdate(allMSs, oldMSs, deployment) + if err != nil { + return false, nil + } + glog.V(4).Infof("Scaled down old MSes of deployment %s by %d", deployment.Name, scaledDownCount) + + totalScaledDown := cleanupCount + scaledDownCount + return totalScaledDown > 0, nil +} + +// cleanupUnhealthyReplicas will scale down old machine sets with unhealthy replicas, so that all unhealthy replicas will be deleted. +func (dc *MachineDeploymentControllerImpl) cleanupUnhealthyReplicas(oldMSs []*v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment, maxCleanupCount int32) ([]*v1alpha1.MachineSet, int32, error) { + sort.Sort(dutil.MachineSetsByCreationTimestamp(oldMSs)) + // Safely scale down all old machine sets with unhealthy replicas. Replica set will sort the machines in the order + // such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will + // been deleted first and won't increase unavailability. + totalScaledDown := int32(0) + for i, targetMS := range oldMSs { + if targetMS.Spec.Replicas == nil { + return nil, 0, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", targetMS.Name) + } + + if totalScaledDown >= maxCleanupCount { + break + } + oldMSReplicas := *(targetMS.Spec.Replicas) + if oldMSReplicas == 0 { + // cannot scale down this machine set. + continue + } + oldMSAvailableReplicas := targetMS.Status.AvailableReplicas + glog.V(4).Infof("Found %d available machines in old MS %s/%s", oldMSAvailableReplicas, targetMS.Namespace, targetMS.Name) + if oldMSReplicas == oldMSAvailableReplicas { + // no unhealthy replicas found, no scaling required. + continue + } + + remainingCleanupCount := maxCleanupCount - totalScaledDown + unhealthyCount := oldMSReplicas - oldMSAvailableReplicas + scaledDownCount := integer.Int32Min(remainingCleanupCount, unhealthyCount) + newReplicasCount := oldMSReplicas - scaledDownCount + + if newReplicasCount > oldMSReplicas { + return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetMS.Namespace, targetMS.Name, oldMSReplicas, newReplicasCount) + } + _, updatedOldMS, err := dc.scaleMachineSet(targetMS, newReplicasCount, deployment) + if err != nil { + return nil, totalScaledDown, err + } + totalScaledDown += scaledDownCount + oldMSs[i] = updatedOldMS + } + return oldMSs, totalScaledDown, nil +} + +// scaleDownOldMachineSetsForRollingUpdate scales down old machine sets when deployment strategy is "RollingUpdate". +// Need check maxUnavailable to ensure availability +func (dc *MachineDeploymentControllerImpl) scaleDownOldMachineSetsForRollingUpdate(allMSs []*v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) (int32, error) { + if deployment.Spec.Replicas == nil { + return 0, fmt.Errorf("spec replicas for deployment %v is nil, this is unexpected", deployment.Name) + } + + maxUnavailable := dutil.MaxUnavailable(*deployment) + + // Check if we can scale down. + minAvailable := *(deployment.Spec.Replicas) - maxUnavailable + // Find the number of available machines. + availableMachineCount := dutil.GetAvailableReplicaCountForMachineSets(allMSs) + if availableMachineCount <= minAvailable { + // Cannot scale down. + return 0, nil + } + glog.V(4).Infof("Found %d available machines in deployment %s, scaling down old MSes", availableMachineCount, deployment.Name) + + sort.Sort(dutil.MachineSetsByCreationTimestamp(oldMSs)) + + totalScaledDown := int32(0) + totalScaleDownCount := availableMachineCount - minAvailable + for _, targetMS := range oldMSs { + if targetMS.Spec.Replicas == nil { + return 0, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", targetMS.Name) + } + + if totalScaledDown >= totalScaleDownCount { + // No further scaling required. + break + } + if *(targetMS.Spec.Replicas) == 0 { + // cannot scale down this MachineSet. + continue + } + // Scale down. + scaleDownCount := int32(integer.Int32Min(*(targetMS.Spec.Replicas), totalScaleDownCount-totalScaledDown)) + newReplicasCount := *(targetMS.Spec.Replicas) - scaleDownCount + if newReplicasCount > *(targetMS.Spec.Replicas) { + return 0, fmt.Errorf("when scaling down old MS, got invalid request to scale down %s/%s %d -> %d", targetMS.Namespace, targetMS.Name, *(targetMS.Spec.Replicas), newReplicasCount) + } + _, _, err := dc.scaleMachineSet(targetMS, newReplicasCount, deployment) + if err != nil { + return totalScaledDown, err + } + + totalScaledDown += scaleDownCount + } + + return totalScaledDown, nil +} diff --git a/pkg/controller/machinedeployment/rolling_test.go b/pkg/controller/machinedeployment/rolling_test.go new file mode 100644 index 000000000000..ef4578ab097c --- /dev/null +++ b/pkg/controller/machinedeployment/rolling_test.go @@ -0,0 +1,433 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machinedeployment + +import ( + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + core "k8s.io/client-go/testing" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake" +) + +func TestMachineDeploymentController_reconcileNewMachineSet(t *testing.T) { + // expectedNewReplicas = deploymentReplicas + maxSurge - oldReplicas - newReplicas + tests := []struct { + name string + deploymentReplicas int + maxSurge intstr.IntOrString + oldReplicas int + newReplicas int + scaleExpected bool + expectedNewReplicas int + }{ + { + name: "scenario 1. new replicas for surge only, scale up.", + deploymentReplicas: 10, + maxSurge: intstr.FromInt(2), + oldReplicas: 10, + newReplicas: 0, + scaleExpected: true, + expectedNewReplicas: 2, + }, + { + name: "scenario 2. scale up old replicas to meet desired and surge, scale up.", + deploymentReplicas: 10, + maxSurge: intstr.FromInt(2), + oldReplicas: 5, + newReplicas: 0, + scaleExpected: true, + expectedNewReplicas: 7, + }, + { + name: "scenario 3. old replica meet desired and new replica meet surge, no change", + deploymentReplicas: 10, + maxSurge: intstr.FromInt(2), + oldReplicas: 10, + newReplicas: 2, + scaleExpected: false, + }, + { + name: "scenario 4. old replica lower than desired, new replica exceed desired and surge, scale down", + deploymentReplicas: 10, + maxSurge: intstr.FromInt(2), + oldReplicas: 2, + newReplicas: 11, + scaleExpected: true, + expectedNewReplicas: 10, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + newMS := ms("foo-v2", test.newReplicas, nil, noTimestamp) + oldMS := ms("foo-v2", test.oldReplicas, nil, noTimestamp) + allMSs := []*v1alpha1.MachineSet{newMS, oldMS} + maxUnavailable := intstr.FromInt(0) + deployment := newMachineDeployment("foo", test.deploymentReplicas, nil, &test.maxSurge, &maxUnavailable, map[string]string{"foo": "bar"}) + + rObjects := []runtime.Object{} + rObjects = append(rObjects, oldMS) + + fakeClient := fake.NewSimpleClientset(rObjects...) + controller := &MachineDeploymentControllerImpl{} + controller.machineClient = fakeClient + + scaled, err := controller.reconcileNewMachineSet(allMSs, newMS, deployment) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if !test.scaleExpected { + if scaled || len(fakeClient.Actions()) > 0 { + t.Fatalf("unexpected scaling: %v", fakeClient.Actions()) + } + } + if test.scaleExpected { + if !scaled { + t.Fatalf("expected scaling to occur") + } + if len(fakeClient.Actions()) != 1 { + t.Fatalf("expected 1 action during scale, got: %v", fakeClient.Actions()) + } + updated := fakeClient.Actions()[0].(core.UpdateAction).GetObject().(*v1alpha1.MachineSet) + if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a { + t.Fatalf("expected update to %d replicas, got %d", e, a) + } + } + }) + } +} + +func TestMachineDeploymentController_reconcileOldMachineSets(t *testing.T) { + tests := []struct { + name string + deploymentReplicas int + maxUnavailable intstr.IntOrString + oldReplicas int + newReplicas int + readyMachinesFromOldMS int + readyMachinesFromNewMS int + scaleExpected bool + expectedOldReplicas int + expectedActions int + }{ + { + name: "scenario 1: 10 desired, oldMS at 10, 10 ready, 1 max surge, 0 max unavailable => oldMS at 10, no scaling.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(0), + oldReplicas: 10, + newReplicas: 0, + readyMachinesFromOldMS: 10, + readyMachinesFromNewMS: 0, + scaleExpected: false, + }, + { + name: "scenario 2: 10 desired, oldMS at 10, 10 ready, 1 max surge, 2 max unavailable => oldMS at 8, scale down by 2.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + oldReplicas: 10, + newReplicas: 0, + readyMachinesFromOldMS: 10, + readyMachinesFromNewMS: 0, + scaleExpected: true, + expectedOldReplicas: 8, + expectedActions: 1, + }, + { // expect unhealthy replicas from old machine sets been cleaned up + name: "scenario 3: 10 desired, oldMS at 10, 8 ready, 1 max surge, 2 max unavailable => oldMS at 8, scale down by 0.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + oldReplicas: 10, + newReplicas: 0, + readyMachinesFromOldMS: 8, + readyMachinesFromNewMS: 0, + scaleExpected: true, + expectedOldReplicas: 8, + expectedActions: 1, + }, + { // expect 1 unhealthy replica from old machine sets been cleaned up, and 1 ready machine been scaled down + name: "scenario 4: 10 desired, oldMS at 10, 9 ready, 1 max surge, 2 max unavailable => oldMS at 8, scale down by 1.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + oldReplicas: 10, + newReplicas: 0, + readyMachinesFromOldMS: 9, + readyMachinesFromNewMS: 0, + scaleExpected: true, + expectedOldReplicas: 8, + expectedActions: 2, + }, + { // the unavailable machines from the newMS would not make us scale down old MSs in a further step + name: "scenario 5: 10 desired, oldMS at 8, newMS at 2, 1 max surge, 8 oldMS ready, 0 newMS ready, 2 max unavailable => no scale.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + oldReplicas: 8, + newReplicas: 2, + readyMachinesFromOldMS: 8, + readyMachinesFromNewMS: 0, + scaleExpected: false, + }, + { + name: "scenario 6: 10 desired, oldMS at 10, newMS at 1, 1 max surge, 0 max unavailable => oldMS at 9, scale down by 1.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(0), + oldReplicas: 10, + newReplicas: 1, + readyMachinesFromOldMS: 10, + readyMachinesFromNewMS: 1, + scaleExpected: true, + expectedOldReplicas: 9, + expectedActions: 1, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Logf(test.name) + + newSelector := map[string]string{"foo": "new"} + oldSelector := map[string]string{"foo": "old"} + newMS := ms("foo-new", test.newReplicas, newSelector, noTimestamp) + newMS.Status.AvailableReplicas = int32(test.readyMachinesFromNewMS) + oldMS := ms("foo-old", test.oldReplicas, oldSelector, noTimestamp) + oldMS.Status.AvailableReplicas = int32(test.readyMachinesFromOldMS) + oldMSs := []*v1alpha1.MachineSet{oldMS} + allMSs := []*v1alpha1.MachineSet{oldMS, newMS} + maxSurge := intstr.FromInt(1) + deployment := newMachineDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector) + + rObjects := []runtime.Object{} + rObjects = append(rObjects, oldMS, newMS) + + fakeClient := fake.NewSimpleClientset(rObjects...) + controller := &MachineDeploymentControllerImpl{} + controller.machineClient = fakeClient + + scaled, err := controller.reconcileOldMachineSets(allMSs, oldMSs, newMS, deployment) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !test.scaleExpected { + if scaled || len(fakeClient.Actions()) > 0 { + t.Fatalf("unexpected scaling: %v", fakeClient.Actions()) + } + } + if test.scaleExpected { + if !scaled { + t.Errorf("expected scaling to occur") + } + if test.expectedActions != len(fakeClient.Actions()) { + t.Fatalf("got %d actions, expected %d; %v", len(fakeClient.Actions()), test.expectedActions, fakeClient.Actions()) + } + updated := fakeClient.Actions()[len(fakeClient.Actions())-1].(core.UpdateAction).GetObject().(*v1alpha1.MachineSet) + if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a { + t.Fatalf("expected update to %d replicas, got %d", e, a) + } + } + }) + } +} + +func TestMachineDeploymentController_cleanupUnhealthyReplicas(t *testing.T) { + tests := []struct { + name string + oldReplicas int + readyMachines int + unHealthyMachines int + maxCleanupCount int + cleanupCountExpected int + }{ + { + name: "scenario 1. 2 unhealthy, max 1 cleanup => 1 cleanup.", + oldReplicas: 10, + readyMachines: 8, + unHealthyMachines: 2, + maxCleanupCount: 1, + cleanupCountExpected: 1, + }, + { + name: "scenario 2. 2 unhealthy, max 3 cleanup => 2 cleanup.", + oldReplicas: 10, + readyMachines: 8, + unHealthyMachines: 2, + maxCleanupCount: 3, + cleanupCountExpected: 2, + }, + { + name: "scenario 3. 2 unhealthy, max 0 cleanup => 0 cleanup.", + oldReplicas: 10, + readyMachines: 8, + unHealthyMachines: 2, + maxCleanupCount: 0, + cleanupCountExpected: 0, + }, + { + name: "scenario 4. 0 unhealthy, max 3 cleanup => 0 cleanup.", + oldReplicas: 10, + readyMachines: 10, + unHealthyMachines: 0, + maxCleanupCount: 3, + cleanupCountExpected: 0, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Log(test.name) + oldMS := ms("foo-v2", test.oldReplicas, nil, noTimestamp) + oldMS.Status.AvailableReplicas = int32(test.readyMachines) + oldMSs := []*v1alpha1.MachineSet{oldMS} + maxSurge := intstr.FromInt(2) + maxUnavailable := intstr.FromInt(2) + deployment := newMachineDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil) + + rObjects := []runtime.Object{} + rObjects = append(rObjects, oldMS) + + fakeClient := fake.NewSimpleClientset(rObjects...) + controller := &MachineDeploymentControllerImpl{} + controller.machineClient = fakeClient + + _, cleanupCount, err := controller.cleanupUnhealthyReplicas(oldMSs, deployment, int32(test.maxCleanupCount)) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if int(cleanupCount) != test.cleanupCountExpected { + t.Fatalf("got %d clean up count, expected %d clean up count", cleanupCount, test.cleanupCountExpected) + } + }) + } +} + +func TestMachineDeploymentController_scaleDownOldMachineSetsForRollingUpdate(t *testing.T) { + tests := []struct { + name string + deploymentReplicas int + maxUnavailable intstr.IntOrString + readyMachines int + oldReplicas int + scaleExpected bool + expectedOldReplicas int + }{ + { + name: "scenario 1. 10 desired, oldMS at 10, 10 ready, max unavailable 0 => oldMS at 10, no scaling.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(0), + readyMachines: 10, + oldReplicas: 10, + scaleExpected: false, + }, + { + name: "scenario 2. 10 desired, oldMS at 10, 10 ready, max unavailable 2 => oldMS at 8, scale down by 2.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + readyMachines: 10, + oldReplicas: 10, + scaleExpected: true, + expectedOldReplicas: 8, + }, + { + name: "scenario 3. 10 desired, oldMS at 8, 8 ready, max unavailable 2 => oldMS at 8, no scaling.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + readyMachines: 8, + oldReplicas: 10, + scaleExpected: false, + }, + { + name: "scenario 4. 10 desired, oldMS at 0, 10 ready, max unavailable 2 => oldMS at 0, no scaling.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + readyMachines: 10, + oldReplicas: 0, + scaleExpected: false, + }, + { + name: "scenario 5. 10 desired, oldMS at 10, 1 ready, max unavailable 2 => oldMS at 10, no scaling.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(2), + readyMachines: 1, + oldReplicas: 10, + scaleExpected: false, + }, + { + name: "scenario 6. 10 desired, oldMS at 11, 11 ready, max unavailable 0 => oldMS at 10, scale down by 1.", + deploymentReplicas: 10, + maxUnavailable: intstr.FromInt(0), + readyMachines: 11, + oldReplicas: 11, + scaleExpected: true, + expectedOldReplicas: 10, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Logf(test.name) + oldMS := ms("foo-v2", test.oldReplicas, nil, noTimestamp) + oldMS.Status.AvailableReplicas = int32(test.readyMachines) + allMSs := []*v1alpha1.MachineSet{oldMS} + oldMSs := []*v1alpha1.MachineSet{oldMS} + maxSurge := intstr.FromInt(1) + deployment := newMachineDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"}) + + rObjects := []runtime.Object{} + rObjects = append(rObjects, oldMS) + + fakeClient := fake.NewSimpleClientset(rObjects...) + controller := &MachineDeploymentControllerImpl{} + controller.machineClient = fakeClient + + scaled, err := controller.scaleDownOldMachineSetsForRollingUpdate(allMSs, oldMSs, deployment) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !test.scaleExpected { + if scaled != 0 { + t.Fatalf("unexpected scaling: %v", fakeClient.Actions()) + } + } + if test.scaleExpected { + if scaled == 0 { + t.Fatalf("expected scaling to occur; actions: %v", fakeClient.Actions()) + } + // There are both list and update actions logged, so extract the update + // action for verification. + var updateAction core.UpdateAction + for _, action := range fakeClient.Actions() { + switch a := action.(type) { + case core.UpdateAction: + if updateAction != nil { + t.Errorf("expected only 1 update action; had %v and found %v", updateAction, a) + } else { + updateAction = a + } + } + } + if updateAction == nil { + t.Fatalf("expected an update action") + } + updated := updateAction.GetObject().(*v1alpha1.MachineSet) + if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a { + t.Fatalf("got %d replicas, expected %d replicas updated", a, e) + } + } + }) + } +} diff --git a/pkg/controller/machinedeployment/sync.go b/pkg/controller/machinedeployment/sync.go new file mode 100644 index 000000000000..108f40a737f2 --- /dev/null +++ b/pkg/controller/machinedeployment/sync.go @@ -0,0 +1,458 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machinedeployment + +import ( + "fmt" + "reflect" + "sort" + "strconv" + + "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + apirand "k8s.io/apimachinery/pkg/util/rand" + + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + dutil "sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util" +) + +// sync is responsible for reconciling deployments on scaling events or when they +// are paused. +func (dc *MachineDeploymentControllerImpl) sync(d *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet, machineMap map[types.UID]*v1alpha1.MachineList) error { + newMS, oldMSs, err := dc.getAllMachineSetsAndSyncRevision(d, msList, machineMap, false) + if err != nil { + return err + } + if err := dc.scale(d, newMS, oldMSs); err != nil { + // If we get an error while trying to scale, the deployment will be requeued + // so we can abort this resync + return err + } + + // TODO: Clean up the deployment when it's paused and no rollback is in flight. + + allMSs := append(oldMSs, newMS) + return dc.syncDeploymentStatus(allMSs, newMS, d) +} + +// getAllMachineSetsAndSyncRevision returns all the machine sets for the provided deployment (new and all old), with new MS's and deployment's revision updated. +// +// msList should come from getMachineSetsForDeployment(d). +// machineMap should come from getMachineMapForDeployment(d, msList). +// +// 1. Get all old MSes this deployment targets, and calculate the max revision number among them (maxOldV). +// 2. Get new MS this deployment targets (whose machine template matches deployment's), and update new MS's revision number to (maxOldV + 1), +// only if its revision number is smaller than (maxOldV + 1). If this step failed, we'll update it in the next deployment sync loop. +// 3. Copy new MS's revision number to deployment (update deployment's revision). If this step failed, we'll update it in the next deployment sync loop. +// +// Note that currently the deployment controller is using caches to avoid querying the server for reads. +// This may lead to stale reads of machine sets, thus incorrect deployment status. +func (dc *MachineDeploymentControllerImpl) getAllMachineSetsAndSyncRevision(d *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet, machineMap map[types.UID]*v1alpha1.MachineList, createIfNotExisted bool) (*v1alpha1.MachineSet, []*v1alpha1.MachineSet, error) { + _, allOldMSs := dutil.FindOldMachineSets(d, msList) + + // Get new machine set with the updated revision number + newMS, err := dc.getNewMachineSet(d, msList, allOldMSs, createIfNotExisted) + if err != nil { + return nil, nil, err + } + + return newMS, allOldMSs, nil +} + +// Returns a machine set that matches the intent of the given deployment. Returns nil if the new machine set doesn't exist yet. +// 1. Get existing new MS (the MS that the given deployment targets, whose machine template is the same as deployment's). +// 2. If there's existing new MS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old MSes. +// 3. If there's no existing new MS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas. +// Note that the machine-template-hash will be added to adopted MSes and machines. +func (dc *MachineDeploymentControllerImpl) getNewMachineSet(d *v1alpha1.MachineDeployment, msList, oldMSs []*v1alpha1.MachineSet, createIfNotExisted bool) (*v1alpha1.MachineSet, error) { + existingNewMS := dutil.FindNewMachineSet(d, msList) + + // Calculate the max revision number among all old MSes + maxOldRevision := dutil.MaxRevision(oldMSs) + // Calculate revision number for this new machine set + newRevision := strconv.FormatInt(maxOldRevision+1, 10) + + // Latest machine set exists. We need to sync its annotations (includes copying all but + // annotationsToSkip from the parent deployment, and update revision, desiredReplicas, + // and maxReplicas) and also update the revision annotation in the deployment with the + // latest revision. + if existingNewMS != nil { + msCopy := existingNewMS.DeepCopy() + + // Set existing new machine set's annotation + annotationsUpdated := dutil.SetNewMachineSetAnnotations(d, msCopy, newRevision, true) + + minReadySecondsNeedsUpdate := msCopy.Spec.MinReadySeconds != *d.Spec.MinReadySeconds + if annotationsUpdated || minReadySecondsNeedsUpdate { + msCopy.Spec.MinReadySeconds = *d.Spec.MinReadySeconds + return dc.machineClient.ClusterV1alpha1().MachineSets(msCopy.ObjectMeta.Namespace).Update(msCopy) + } + + // Apply revision annotation from existingNewMS if it is missing from the deployment. + // needsUpdate is false if there is not annotation or is already present. + needsUpdate := dutil.SetDeploymentRevision(d, msCopy.Annotations[dutil.RevisionAnnotation]) + + if needsUpdate { + var err error + if d, err = dc.machineClient.ClusterV1alpha1().MachineDeployments(d.Namespace).UpdateStatus(d); err != nil { + return nil, err + } + } + return msCopy, nil + } + + if !createIfNotExisted { + return nil, nil + } + + // new MachineSet does not exist, create one. + newMSTemplate := *d.Spec.Template.DeepCopy() + machineTemplateSpecHash := fmt.Sprintf("%d", dutil.ComputeHash(&newMSTemplate)) + newMSTemplate.Labels = dutil.CloneAndAddLabel(d.Spec.Template.Labels, dutil.DefaultMachineDeploymentUniqueLabelKey, machineTemplateSpecHash) + // Add machineTemplateHash label to selector. + newMSSelector := dutil.CloneSelectorAndAddLabel(&d.Spec.Selector, dutil.DefaultMachineDeploymentUniqueLabelKey, machineTemplateSpecHash) + + // Create new MachineSet + newMS := v1alpha1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + // Make the name deterministic, to ensure idempotence + Name: d.Name + "-" + apirand.SafeEncodeString(machineTemplateSpecHash), + Namespace: d.Namespace, + Labels: newMSTemplate.Labels, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(d, controllerKind)}, + }, + Spec: v1alpha1.MachineSetSpec{ + Replicas: new(int32), + MinReadySeconds: *d.Spec.MinReadySeconds, + Selector: *newMSSelector, + Template: newMSTemplate, + }, + } + allMSs := append(oldMSs, &newMS) + newReplicasCount, err := dutil.NewMSNewReplicas(d, allMSs, &newMS) + if err != nil { + return nil, err + } + + *(newMS.Spec.Replicas) = newReplicasCount + // Set new machine set's annotation + dutil.SetNewMachineSetAnnotations(d, &newMS, newRevision, false) + // Create the new MachineSet. If it already exists, then we need to check for possible + // hash collisions. If there is any other error, we need to report it in the status of + // the Deployment. + alreadyExists := false + createdMS, err := dc.machineClient.ClusterV1alpha1().MachineSets(d.Namespace).Create(&newMS) + switch { + // We may end up hitting this due to a slow cache or a fast resync of the Deployment. + case errors.IsAlreadyExists(err): + alreadyExists = true + + // Fetch a copy of the MachineSet. + ms, msErr := dc.msLister.MachineSets(newMS.Namespace).Get(newMS.Name) + if msErr != nil { + return nil, msErr + } + + // If the Deployment owns the MachineSet and the MachineSet's MachineTemplateSpec is semantically + // deep equal to the MachineTemplateSpec of the Deployment, it's the Deployment's new MachineSet. + // Otherwise, this is a hash collision and we need to increment the collisionCount field in + // the status of the Deployment and requeue to try the creation in the next sync. + controllerRef := metav1.GetControllerOf(ms) + if controllerRef != nil && controllerRef.UID == d.UID && dutil.EqualIgnoreHash(&d.Spec.Template, &ms.Spec.Template) { + createdMS = ms + err = nil + break + } + + return nil, err + case err != nil: + glog.V(4).Infof("Failed to create new machine set %q: %v", newMS.Name, err) + return nil, err + } + + needsUpdate := dutil.SetDeploymentRevision(d, newRevision) + if !alreadyExists { + glog.V(4).Infof("Created new machine set %q", createdMS.Name) + } + if needsUpdate { + _, err = dc.machineClient.ClusterV1alpha1().MachineDeployments(d.Namespace).UpdateStatus(d) + } + return createdMS, err +} + +// scale scales proportionally in order to mitigate risk. Otherwise, scaling up can increase the size +// of the new machine set and scaling down can decrease the sizes of the old ones, both of which would +// have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable +// replicas in the event of a problem with the rolled out template. Should run only on scaling events or +// when a deployment is paused and not during the normal rollout process. +func (dc *MachineDeploymentControllerImpl) scale(deployment *v1alpha1.MachineDeployment, newMS *v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet) error { + if deployment.Spec.Replicas == nil { + return fmt.Errorf("spec replicas for deployment %v is nil, this is unexpected", deployment.Name) + } + // If there is only one active machine set then we should scale that up to the full count of the + // deployment. If there is no active machine set, then we should scale up the newest machine set. + if activeOrLatest := dutil.FindOneActiveOrLatest(newMS, oldMSs); activeOrLatest != nil { + if activeOrLatest.Spec.Replicas == nil { + return fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", activeOrLatest.Name) + } + if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) { + return nil + } + _, _, err := dc.scaleMachineSet(activeOrLatest, *(deployment.Spec.Replicas), deployment) + return err + } + + // If the new machine set is saturated, old machine sets should be fully scaled down. + // This case handles machine set adoption during a saturated new machine set. + if dutil.IsSaturated(deployment, newMS) { + for _, old := range dutil.FilterActiveMachineSets(oldMSs) { + if _, _, err := dc.scaleMachineSet(old, 0, deployment); err != nil { + return err + } + } + return nil + } + + // There are old machine sets with machines and the new machine set is not saturated. + // We need to proportionally scale all machine sets (new and old) in case of a + // rolling deployment. + if dutil.IsRollingUpdate(deployment) { + allMSs := dutil.FilterActiveMachineSets(append(oldMSs, newMS)) + totalMSReplicas := dutil.GetReplicaCountForMachineSets(allMSs) + + allowedSize := int32(0) + if *(deployment.Spec.Replicas) > 0 { + allowedSize = *(deployment.Spec.Replicas) + dutil.MaxSurge(*deployment) + } + + // Number of additional replicas that can be either added or removed from the total + // replicas count. These replicas should be distributed proportionally to the active + // machine sets. + deploymentReplicasToAdd := allowedSize - totalMSReplicas + + // The additional replicas should be distributed proportionally amongst the active + // machine sets from the larger to the smaller in size machine set. Scaling direction + // drives what happens in case we are trying to scale machine sets of the same size. + // In such a case when scaling up, we should scale up newer machine sets first, and + // when scaling down, we should scale down older machine sets first. + var scalingOperation string + switch { + case deploymentReplicasToAdd > 0: + sort.Sort(dutil.MachineSetsBySizeNewer(allMSs)) + scalingOperation = "up" + + case deploymentReplicasToAdd < 0: + sort.Sort(dutil.MachineSetsBySizeOlder(allMSs)) + scalingOperation = "down" + } + + // Iterate over all active machine sets and estimate proportions for each of them. + // The absolute value of deploymentReplicasAdded should never exceed the absolute + // value of deploymentReplicasToAdd. + deploymentReplicasAdded := int32(0) + nameToSize := make(map[string]int32) + for i := range allMSs { + ms := allMSs[i] + if ms.Spec.Replicas == nil { + glog.Errorf("spec replicas for machine set %v is nil, this is unexpected.", ms.Name) + continue + } + + // Estimate proportions if we have replicas to add, otherwise simply populate + // nameToSize with the current sizes for each machine set. + if deploymentReplicasToAdd != 0 { + proportion := dutil.GetProportion(ms, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded) + + nameToSize[ms.Name] = *(ms.Spec.Replicas) + proportion + deploymentReplicasAdded += proportion + } else { + nameToSize[ms.Name] = *(ms.Spec.Replicas) + } + } + + // Update all machine sets + for i := range allMSs { + ms := allMSs[i] + + // Add/remove any leftovers to the largest machine set. + if i == 0 && deploymentReplicasToAdd != 0 { + leftover := deploymentReplicasToAdd - deploymentReplicasAdded + nameToSize[ms.Name] = nameToSize[ms.Name] + leftover + if nameToSize[ms.Name] < 0 { + nameToSize[ms.Name] = 0 + } + } + + // TODO: Use transactions when we have them. + if _, _, err := dc.scaleMachineSetOperation(ms, nameToSize[ms.Name], deployment, scalingOperation); err != nil { + // Return as soon as we fail, the deployment is requeued + return err + } + } + } + return nil +} + +func (dc *MachineDeploymentControllerImpl) scaleMachineSet(ms *v1alpha1.MachineSet, newScale int32, deployment *v1alpha1.MachineDeployment) (bool, *v1alpha1.MachineSet, error) { + if ms.Spec.Replicas == nil { + return false, nil, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) + } + // No need to scale + if *(ms.Spec.Replicas) == newScale { + return false, ms, nil + } + var scalingOperation string + if *(ms.Spec.Replicas) < newScale { + scalingOperation = "up" + } else { + scalingOperation = "down" + } + + scaled, newMS, err := dc.scaleMachineSetOperation(ms, newScale, deployment, scalingOperation) + return scaled, newMS, err +} + +func (dc *MachineDeploymentControllerImpl) scaleMachineSetOperation(ms *v1alpha1.MachineSet, newScale int32, deployment *v1alpha1.MachineDeployment, scaleOperation string) (bool, *v1alpha1.MachineSet, error) { + if ms.Spec.Replicas == nil { + return false, nil, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) + } + sizeNeedsUpdate := *(ms.Spec.Replicas) != newScale + + annotationsNeedUpdate := dutil.ReplicasAnnotationsNeedUpdate(ms, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+dutil.MaxSurge(*deployment)) + + scaled := false + var err error + if sizeNeedsUpdate || annotationsNeedUpdate { + msCopy := ms.DeepCopy() + *(msCopy.Spec.Replicas) = newScale + dutil.SetReplicasAnnotations(msCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+dutil.MaxSurge(*deployment)) + ms, err = dc.machineClient.ClusterV1alpha1().MachineSets(msCopy.Namespace).Update(msCopy) + if err == nil && sizeNeedsUpdate { + scaled = true + } + } + return scaled, ms, err +} + +// cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old machine sets +// where N=d.Spec.RevisionHistoryLimit. Old machine sets are older versions of the machinetemplate of a deployment kept +// around by default 1) for historical reasons and 2) for the ability to rollback a deployment. +func (dc *MachineDeploymentControllerImpl) cleanupDeployment(oldMSs []*v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) error { + if deployment.Spec.RevisionHistoryLimit == nil { + return nil + } + + // Avoid deleting machine set with deletion timestamp set + aliveFilter := func(ms *v1alpha1.MachineSet) bool { + return ms != nil && ms.ObjectMeta.DeletionTimestamp == nil + } + cleanableMSes := dutil.FilterMachineSets(oldMSs, aliveFilter) + + diff := int32(len(cleanableMSes)) - *deployment.Spec.RevisionHistoryLimit + if diff <= 0 { + return nil + } + + sort.Sort(dutil.MachineSetsByCreationTimestamp(cleanableMSes)) + glog.V(4).Infof("Looking to cleanup old machine sets for deployment %q", deployment.Name) + + for i := int32(0); i < diff; i++ { + ms := cleanableMSes[i] + if ms.Spec.Replicas == nil { + return fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) + } + // Avoid delete machine set with non-zero replica counts + if ms.Status.Replicas != 0 || *(ms.Spec.Replicas) != 0 || ms.Generation > ms.Status.ObservedGeneration || ms.DeletionTimestamp != nil { + continue + } + glog.V(4).Infof("Trying to cleanup machine set %q for deployment %q", ms.Name, deployment.Name) + if err := dc.machineClient.ClusterV1alpha1().MachineSets(ms.Namespace).Delete(ms.Name, nil); err != nil && !errors.IsNotFound(err) { + // Return error instead of aggregating and continuing DELETEs on the theory + // that we may be overloading the api server. + return err + } + } + + return nil +} + +// syncDeploymentStatus checks if the status is up-to-date and sync it if necessary +func (dc *MachineDeploymentControllerImpl) syncDeploymentStatus(allMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, d *v1alpha1.MachineDeployment) error { + newStatus := calculateStatus(allMSs, newMS, d) + + if reflect.DeepEqual(d.Status, newStatus) { + return nil + } + + newDeployment := d + newDeployment.Status = newStatus + _, err := dc.machineClient.ClusterV1alpha1().MachineDeployments(newDeployment.Namespace).UpdateStatus(newDeployment) + return err +} + +// calculateStatus calculates the latest status for the provided deployment by looking into the provided machine sets. +func calculateStatus(allMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) v1alpha1.MachineDeploymentStatus { + availableReplicas := dutil.GetAvailableReplicaCountForMachineSets(allMSs) + totalReplicas := dutil.GetReplicaCountForMachineSets(allMSs) + unavailableReplicas := totalReplicas - availableReplicas + // If unavailableReplicas is negative, then that means the Deployment has more available replicas running than + // desired, e.g. whenever it scales down. In such a case we should simply default unavailableReplicas to zero. + if unavailableReplicas < 0 { + unavailableReplicas = 0 + } + + status := v1alpha1.MachineDeploymentStatus{ + // TODO: Ensure that if we start retrying status updates, we won't pick up a new Generation value. + ObservedGeneration: deployment.Generation, + Replicas: dutil.GetActualReplicaCountForMachineSets(allMSs), + UpdatedReplicas: dutil.GetActualReplicaCountForMachineSets([]*v1alpha1.MachineSet{newMS}), + ReadyReplicas: dutil.GetReadyReplicaCountForMachineSets(allMSs), + AvailableReplicas: availableReplicas, + UnavailableReplicas: unavailableReplicas, + } + + return status +} + +// isScalingEvent checks whether the provided deployment has been updated with a scaling event +// by looking at the desired-replicas annotation in the active machine sets of the deployment. +// +// msList should come from getMachineSetsForDeployment(d). +// machineMap should come from getMachineMapForDeployment(d, msList). +func (dc *MachineDeploymentControllerImpl) isScalingEvent(d *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet, machineMap map[types.UID]*v1alpha1.MachineList) (bool, error) { + if d.Spec.Replicas == nil { + return false, fmt.Errorf("spec replicas for deployment %v is nil, this is unexpected", d.Name) + } + newMS, oldMSs, err := dc.getAllMachineSetsAndSyncRevision(d, msList, machineMap, false) + if err != nil { + return false, err + } + allMSs := append(oldMSs, newMS) + for _, ms := range dutil.FilterActiveMachineSets(allMSs) { + desired, ok := dutil.GetDesiredReplicasAnnotation(ms) + if !ok { + continue + } + if desired != *(d.Spec.Replicas) { + return true, nil + } + } + return false, nil +} diff --git a/pkg/controller/machinedeployment/sync_test.go b/pkg/controller/machinedeployment/sync_test.go new file mode 100644 index 000000000000..50d5d72af0d7 --- /dev/null +++ b/pkg/controller/machinedeployment/sync_test.go @@ -0,0 +1,435 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machinedeployment + +import ( + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + testclient "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake" + v1alpha1listers "sigs.k8s.io/cluster-api/pkg/client/listers_generated/cluster/v1alpha1" + dutil "sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/util" +) + +func intOrStrP(val int) *intstr.IntOrString { + intOrStr := intstr.FromInt(val) + return &intOrStr +} + +func TestScale(t *testing.T) { + newTimestamp := metav1.Date(2016, 5, 20, 2, 0, 0, 0, time.UTC) + oldTimestamp := metav1.Date(2016, 5, 20, 1, 0, 0, 0, time.UTC) + olderTimestamp := metav1.Date(2016, 5, 20, 0, 0, 0, 0, time.UTC) + + var updatedTemplate = func(replicas int) *v1alpha1.MachineDeployment { + d := newMachineDeployment("foo", replicas, nil, nil, nil, map[string]string{"foo": "bar"}) + d.Spec.Template.Labels["another"] = "label" + return d + } + + tests := []struct { + name string + deployment *v1alpha1.MachineDeployment + oldDeployment *v1alpha1.MachineDeployment + + newMS *v1alpha1.MachineSet + oldMSs []*v1alpha1.MachineSet + + expectedNew *v1alpha1.MachineSet + expectedOld []*v1alpha1.MachineSet + wasntUpdated map[string]bool + + desiredReplicasAnnotations map[string]int32 + }{ + { + name: "normal scaling event: 10 -> 12", + deployment: newMachineDeployment("foo", 12, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 10, nil, nil, nil, nil), + + newMS: ms("foo-v1", 10, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{}, + + expectedNew: ms("foo-v1", 12, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{}, + }, + { + name: "normal scaling event: 10 -> 5", + deployment: newMachineDeployment("foo", 5, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 10, nil, nil, nil, nil), + + newMS: ms("foo-v1", 10, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{}, + + expectedNew: ms("foo-v1", 5, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{}, + }, + { + name: "proportional scaling: 5 -> 10", + deployment: newMachineDeployment("foo", 10, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 5, nil, nil, nil, nil), + + newMS: ms("foo-v2", 2, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v1", 3, nil, oldTimestamp)}, + + expectedNew: ms("foo-v2", 4, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v1", 6, nil, oldTimestamp)}, + }, + { + name: "proportional scaling: 5 -> 3", + deployment: newMachineDeployment("foo", 3, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 5, nil, nil, nil, nil), + + newMS: ms("foo-v2", 2, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v1", 3, nil, oldTimestamp)}, + + expectedNew: ms("foo-v2", 1, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v1", 2, nil, oldTimestamp)}, + }, + { + name: "proportional scaling: 9 -> 4", + deployment: newMachineDeployment("foo", 4, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 9, nil, nil, nil, nil), + + newMS: ms("foo-v2", 8, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v1", 1, nil, oldTimestamp)}, + + expectedNew: ms("foo-v2", 4, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v1", 0, nil, oldTimestamp)}, + }, + { + name: "proportional scaling: 7 -> 10", + deployment: newMachineDeployment("foo", 10, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 7, nil, nil, nil, nil), + + newMS: ms("foo-v3", 2, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v2", 3, nil, oldTimestamp), ms("foo-v1", 2, nil, olderTimestamp)}, + + expectedNew: ms("foo-v3", 3, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v2", 4, nil, oldTimestamp), ms("foo-v1", 3, nil, olderTimestamp)}, + }, + { + name: "proportional scaling: 13 -> 8", + deployment: newMachineDeployment("foo", 8, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 13, nil, nil, nil, nil), + + newMS: ms("foo-v3", 2, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v2", 8, nil, oldTimestamp), ms("foo-v1", 3, nil, olderTimestamp)}, + + expectedNew: ms("foo-v3", 1, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v2", 5, nil, oldTimestamp), ms("foo-v1", 2, nil, olderTimestamp)}, + }, + // Scales up the new machine set. + { + name: "leftover distribution: 3 -> 4", + deployment: newMachineDeployment("foo", 4, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 3, nil, nil, nil, nil), + + newMS: ms("foo-v3", 1, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v2", 1, nil, oldTimestamp), ms("foo-v1", 1, nil, olderTimestamp)}, + + expectedNew: ms("foo-v3", 2, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v2", 1, nil, oldTimestamp), ms("foo-v1", 1, nil, olderTimestamp)}, + }, + // Scales down the older machine set. + { + name: "leftover distribution: 3 -> 2", + deployment: newMachineDeployment("foo", 2, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 3, nil, nil, nil, nil), + + newMS: ms("foo-v3", 1, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v2", 1, nil, oldTimestamp), ms("foo-v1", 1, nil, olderTimestamp)}, + + expectedNew: ms("foo-v3", 1, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v2", 1, nil, oldTimestamp), ms("foo-v1", 0, nil, olderTimestamp)}, + }, + // Scales up the latest machine set first. + { + name: "proportional scaling (no new rs): 4 -> 5", + deployment: newMachineDeployment("foo", 5, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 4, nil, nil, nil, nil), + + newMS: nil, + oldMSs: []*v1alpha1.MachineSet{ms("foo-v2", 2, nil, oldTimestamp), ms("foo-v1", 2, nil, olderTimestamp)}, + + expectedNew: nil, + expectedOld: []*v1alpha1.MachineSet{ms("foo-v2", 3, nil, oldTimestamp), ms("foo-v1", 2, nil, olderTimestamp)}, + }, + // Scales down to zero + { + name: "proportional scaling: 6 -> 0", + deployment: newMachineDeployment("foo", 0, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 6, nil, nil, nil, nil), + + newMS: ms("foo-v3", 3, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v2", 2, nil, oldTimestamp), ms("foo-v1", 1, nil, olderTimestamp)}, + + expectedNew: ms("foo-v3", 0, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v2", 0, nil, oldTimestamp), ms("foo-v1", 0, nil, olderTimestamp)}, + }, + // Scales up from zero + { + name: "proportional scaling: 0 -> 6", + deployment: newMachineDeployment("foo", 6, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 6, nil, nil, nil, nil), + + newMS: ms("foo-v3", 0, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v2", 0, nil, oldTimestamp), ms("foo-v1", 0, nil, olderTimestamp)}, + + expectedNew: ms("foo-v3", 6, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v2", 0, nil, oldTimestamp), ms("foo-v1", 0, nil, olderTimestamp)}, + wasntUpdated: map[string]bool{"foo-v2": true, "foo-v1": true}, + }, + // Scenario: deployment.spec.replicas == 3 ( foo-v1.spec.replicas == foo-v2.spec.replicas == foo-v3.spec.replicas == 1 ) + // Deployment is scaled to 5. foo-v3.spec.replicas and foo-v2.spec.replicas should increment by 1 but foo-v2 fails to + // update. + { + name: "failed ms update", + deployment: newMachineDeployment("foo", 5, nil, nil, nil, nil), + oldDeployment: newMachineDeployment("foo", 5, nil, nil, nil, nil), + + newMS: ms("foo-v3", 2, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v2", 1, nil, oldTimestamp), ms("foo-v1", 1, nil, olderTimestamp)}, + + expectedNew: ms("foo-v3", 2, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v2", 2, nil, oldTimestamp), ms("foo-v1", 1, nil, olderTimestamp)}, + wasntUpdated: map[string]bool{"foo-v3": true, "foo-v1": true}, + + desiredReplicasAnnotations: map[string]int32{"foo-v2": int32(3)}, + }, + { + name: "deployment with surge machines", + deployment: newMachineDeployment("foo", 20, nil, intOrStrP(2), nil, nil), + oldDeployment: newMachineDeployment("foo", 10, nil, intOrStrP(2), nil, nil), + + newMS: ms("foo-v2", 6, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v1", 6, nil, oldTimestamp)}, + + expectedNew: ms("foo-v2", 11, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v1", 11, nil, oldTimestamp)}, + }, + { + name: "change both surge and size", + deployment: newMachineDeployment("foo", 50, nil, intOrStrP(6), nil, nil), + oldDeployment: newMachineDeployment("foo", 10, nil, intOrStrP(3), nil, nil), + + newMS: ms("foo-v2", 5, nil, newTimestamp), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v1", 8, nil, oldTimestamp)}, + + expectedNew: ms("foo-v2", 22, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v1", 34, nil, oldTimestamp)}, + }, + { + name: "change both size and template", + deployment: updatedTemplate(14), + oldDeployment: newMachineDeployment("foo", 10, nil, nil, nil, map[string]string{"foo": "bar"}), + + newMS: nil, + oldMSs: []*v1alpha1.MachineSet{ms("foo-v2", 7, nil, newTimestamp), ms("foo-v1", 3, nil, oldTimestamp)}, + + expectedNew: nil, + expectedOld: []*v1alpha1.MachineSet{ms("foo-v2", 10, nil, newTimestamp), ms("foo-v1", 4, nil, oldTimestamp)}, + }, + { + name: "saturated but broken new machine set does not affect old machines", + deployment: newMachineDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil), + oldDeployment: newMachineDeployment("foo", 2, nil, intOrStrP(1), intOrStrP(1), nil), + + newMS: func() *v1alpha1.MachineSet { + ms := ms("foo-v2", 2, nil, newTimestamp) + ms.Status.AvailableReplicas = 0 + return ms + }(), + oldMSs: []*v1alpha1.MachineSet{ms("foo-v1", 1, nil, oldTimestamp)}, + + expectedNew: ms("foo-v2", 2, nil, newTimestamp), + expectedOld: []*v1alpha1.MachineSet{ms("foo-v1", 1, nil, oldTimestamp)}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + _ = olderTimestamp + t.Log(test.name) + fakeClient := fake.Clientset{} + controller := &MachineDeploymentControllerImpl{} + controller.machineClient = &fakeClient + + if test.newMS != nil { + desiredReplicas := *(test.oldDeployment.Spec.Replicas) + if desired, ok := test.desiredReplicasAnnotations[test.newMS.Name]; ok { + desiredReplicas = desired + } + dutil.SetReplicasAnnotations(test.newMS, desiredReplicas, desiredReplicas+dutil.MaxSurge(*test.oldDeployment)) + } + for i := range test.oldMSs { + ms := test.oldMSs[i] + if ms == nil { + continue + } + desiredReplicas := *(test.oldDeployment.Spec.Replicas) + if desired, ok := test.desiredReplicasAnnotations[ms.Name]; ok { + desiredReplicas = desired + } + dutil.SetReplicasAnnotations(ms, desiredReplicas, desiredReplicas+dutil.MaxSurge(*test.oldDeployment)) + } + + if err := controller.scale(test.deployment, test.newMS, test.oldMSs); err != nil { + t.Errorf("%s: unexpected error: %v", test.name, err) + return + } + + // Construct the nameToSize map that will hold all the sizes we got our of tests + // Skip updating the map if the machine set wasn't updated since there will be + // no update action for it. + nameToSize := make(map[string]int32) + if test.newMS != nil { + nameToSize[test.newMS.Name] = *(test.newMS.Spec.Replicas) + } + for i := range test.oldMSs { + ms := test.oldMSs[i] + nameToSize[ms.Name] = *(ms.Spec.Replicas) + } + // Get all the UPDATE actions and update nameToSize with all the updated sizes. + for _, action := range fakeClient.Actions() { + ms := action.(testclient.UpdateAction).GetObject().(*v1alpha1.MachineSet) + if !test.wasntUpdated[ms.Name] { + nameToSize[ms.Name] = *(ms.Spec.Replicas) + } + } + + if test.expectedNew != nil && test.newMS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newMS.Name] { + t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newMS.Name]) + return + } + if len(test.expectedOld) != len(test.oldMSs) { + t.Errorf("%s: expected %d old machine sets, got %d", test.name, len(test.expectedOld), len(test.oldMSs)) + return + } + for n := range test.oldMSs { + ms := test.oldMSs[n] + expected := test.expectedOld[n] + if *(expected.Spec.Replicas) != nameToSize[ms.Name] { + t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, ms.Name, *(expected.Spec.Replicas), nameToSize[ms.Name]) + } + } + }) + } +} + +func TestDeploymentController_cleanupDeployment(t *testing.T) { + selector := map[string]string{"foo": "bar"} + alreadyDeleted := newMSWithStatus("foo-1", 0, 0, selector) + now := metav1.Now() + alreadyDeleted.DeletionTimestamp = &now + + tests := []struct { + name string + oldMSs []*v1alpha1.MachineSet + revisionHistoryLimit int32 + expectedDeletions int + }{ + { + name: "3 machine set qualifies for deletion, limit to keep 1, delete 2.", + oldMSs: []*v1alpha1.MachineSet{ + newMSWithStatus("foo-1", 0, 0, selector), + newMSWithStatus("foo-2", 0, 0, selector), + newMSWithStatus("foo-3", 0, 0, selector), + }, + revisionHistoryLimit: 1, + expectedDeletions: 2, + }, + { + // Only delete the machine set with Spec.Replicas = Status.Replicas = 0. + name: "1 machine set qualifies for deletion, limit 0, delete 1.", + oldMSs: []*v1alpha1.MachineSet{ + newMSWithStatus("foo-1", 0, 0, selector), + newMSWithStatus("foo-2", 0, 1, selector), + newMSWithStatus("foo-3", 1, 0, selector), + newMSWithStatus("foo-4", 1, 1, selector), + }, + revisionHistoryLimit: 0, + expectedDeletions: 1, + }, + { + name: "2 machine set qualfiies for deletion, limit 0, delete 2.", + oldMSs: []*v1alpha1.MachineSet{ + newMSWithStatus("foo-1", 0, 0, selector), + newMSWithStatus("foo-2", 0, 0, selector), + }, + revisionHistoryLimit: 0, + expectedDeletions: 2, + }, + { + name: "0 machine set qualifies for deletion, limit 0, delete 0.", + oldMSs: []*v1alpha1.MachineSet{ + newMSWithStatus("foo-1", 1, 1, selector), + newMSWithStatus("foo-2", 1, 1, selector), + }, + revisionHistoryLimit: 0, + expectedDeletions: 0, + }, + { + name: "1 machine set qualifies for deletion, already deleting, limit 0, delete 0.", + oldMSs: []*v1alpha1.MachineSet{ + alreadyDeleted, + }, + revisionHistoryLimit: 0, + expectedDeletions: 0, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Log(test.name) + + rObjects := []runtime.Object{} + machineSetIndexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{}) + for _, ms := range test.oldMSs { + err := machineSetIndexer.Add(ms) + if err != nil { + t.Fatal(err) + } + rObjects = append(rObjects, ms) + } + machineSetLister := v1alpha1listers.NewMachineSetLister(machineSetIndexer) + + fakeClient := fake.NewSimpleClientset(rObjects...) + controller := &MachineDeploymentControllerImpl{} + controller.machineClient = fakeClient + controller.msLister = machineSetLister + + d := newMachineDeployment("foo", 1, &test.revisionHistoryLimit, nil, nil, map[string]string{"foo": "bar"}) + controller.cleanupDeployment(test.oldMSs, d) + + gotDeletions := 0 + for _, action := range fakeClient.Actions() { + if "delete" == action.GetVerb() { + gotDeletions++ + } + } + if gotDeletions != test.expectedDeletions { + t.Errorf("expect %v old machine sets been deleted, but got %v", test.expectedDeletions, gotDeletions) + } + }) + } +} diff --git a/pkg/controller/machinedeployment/util/util.go b/pkg/controller/machinedeployment/util/util.go new file mode 100644 index 000000000000..ed080d294eca --- /dev/null +++ b/pkg/controller/machinedeployment/util/util.go @@ -0,0 +1,696 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "hash" + "hash/fnv" + "sort" + "strconv" + "strings" + + "github.com/davecgh/go-spew/spew" + "github.com/golang/glog" + + "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/util/integer" + + "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" +) + +const ( + DefaultMachineDeploymentUniqueLabelKey = "machine-template-hash" + + // RevisionAnnotation is the revision annotation of a machine deployment's machine sets which records its rollout sequence + RevisionAnnotation = "machinedeployment.clusters.k8s.io/revision" + // RevisionHistoryAnnotation maintains the history of all old revisions that a machine set has served for a machine deployment. + RevisionHistoryAnnotation = "machinedeployment.clusters.k8s.io/revision-history" + // DesiredReplicasAnnotation is the desired replicas for a machine deployment recorded as an annotation + // in its machine sets. Helps in separating scaling events from the rollout process and for + // determining if the new machine set for a deployment is really saturated. + DesiredReplicasAnnotation = "machinedeployment.clusters.k8s.io/desired-replicas" + // MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which + // is machinedeployment.spec.replicas + maxSurge. Used by the underlying machine sets to estimate their + // proportions in case the deployment has surge replicas. + MaxReplicasAnnotation = "machinedeployment.clusters.k8s.io/max-replicas" + + // FailedMSCreateReason is added in a machine deployment when it cannot create a new machine set. + FailedMSCreateReason = "MachineSetCreateError" + // FoundNewMSReason is added in a machine deployment when it adopts an existing machine set. + FoundNewMSReason = "FoundNewMachineSet" + // PausedDeployReason is added in a deployment when it is paused. Lack of progress shouldn't be + // estimated once a deployment is paused. + PausedDeployReason = "DeploymentPaused" + + // + // Available: + // + // MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available. + MinimumReplicasAvailable = "MinimumReplicasAvailable" + // MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas + // available. + MinimumReplicasUnavailable = "MinimumReplicasUnavailable" +) + +// MachineSetsByCreationTimestamp sorts a list of MachineSet by creation timestamp, using their names as a tie breaker. +type MachineSetsByCreationTimestamp []*v1alpha1.MachineSet + +func (o MachineSetsByCreationTimestamp) Len() int { return len(o) } +func (o MachineSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o MachineSetsByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) +} + +// MachineSetsBySizeOlder sorts a list of MachineSet by size in descending order, using their creation timestamp or name as a tie breaker. +// By using the creation timestamp, this sorts from old to new machine sets. +type MachineSetsBySizeOlder []*v1alpha1.MachineSet + +func (o MachineSetsBySizeOlder) Len() int { return len(o) } +func (o MachineSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o MachineSetsBySizeOlder) Less(i, j int) bool { + if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) { + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) + } + return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas) +} + +// MachineSetsBySizeNewer sorts a list of MachineSet by size in descending order, using their creation timestamp or name as a tie breaker. +// By using the creation timestamp, this sorts from new to old machine sets. +type MachineSetsBySizeNewer []*v1alpha1.MachineSet + +func (o MachineSetsBySizeNewer) Len() int { return len(o) } +func (o MachineSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o MachineSetsBySizeNewer) Less(i, j int) bool { + if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) { + return o[j].CreationTimestamp.Before(&o[i].CreationTimestamp) + } + return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas) +} + +// SetDeploymentRevision updates the revision for a deployment. +func SetDeploymentRevision(deployment *v1alpha1.MachineDeployment, revision string) bool { + updated := false + + if deployment.Annotations == nil { + deployment.Annotations = make(map[string]string) + } + if deployment.Annotations[RevisionAnnotation] != revision { + deployment.Annotations[RevisionAnnotation] = revision + updated = true + } + + return updated +} + +// MaxRevision finds the highest revision in the machine sets +func MaxRevision(allMSs []*v1alpha1.MachineSet) int64 { + max := int64(0) + for _, ms := range allMSs { + if v, err := Revision(ms); err != nil { + // Skip the machine sets when it failed to parse their revision information + glog.V(4).Infof("Error: %v. Couldn't parse revision for machine set %#v, deployment controller will skip it when reconciling revisions.", err, ms) + } else if v > max { + max = v + } + } + return max +} + +// Revision returns the revision number of the input object. +func Revision(obj runtime.Object) (int64, error) { + acc, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + v, ok := acc.GetAnnotations()[RevisionAnnotation] + if !ok { + return 0, nil + } + return strconv.ParseInt(v, 10, 64) +} + +var annotationsToSkip = map[string]bool{ + v1.LastAppliedConfigAnnotation: true, + RevisionAnnotation: true, + RevisionHistoryAnnotation: true, + DesiredReplicasAnnotation: true, + MaxReplicasAnnotation: true, +} + +// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key +// TODO: How to decide which annotations should / should not be copied? +// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 +func skipCopyAnnotation(key string) bool { + return annotationsToSkip[key] +} + +// copyDeploymentAnnotationsToMachineSet copies deployment's annotations to machine set's annotations, +// and returns true if machine set's annotation is changed. +// Note that apply and revision annotations are not copied. +func copyDeploymentAnnotationsToMachineSet(deployment *v1alpha1.MachineDeployment, ms *v1alpha1.MachineSet) bool { + msAnnotationsChanged := false + if ms.Annotations == nil { + ms.Annotations = make(map[string]string) + } + for k, v := range deployment.Annotations { + // newMS revision is updated automatically in getNewMachineSet, and the deployment's revision number is then updated + // by copying its newMS revision number. We should not copy deployment's revision to its newMS, since the update of + // deployment revision number may fail (revision becomes stale) and the revision number in newMS is more reliable. + if skipCopyAnnotation(k) || ms.Annotations[k] == v { + continue + } + ms.Annotations[k] = v + msAnnotationsChanged = true + } + return msAnnotationsChanged +} + +// GetDesiredReplicasAnnotation returns the number of desired replicas +func GetDesiredReplicasAnnotation(ms *v1alpha1.MachineSet) (int32, bool) { + return getIntFromAnnotation(ms, DesiredReplicasAnnotation) +} + +func getMaxReplicasAnnotation(ms *v1alpha1.MachineSet) (int32, bool) { + return getIntFromAnnotation(ms, MaxReplicasAnnotation) +} + +func getIntFromAnnotation(ms *v1alpha1.MachineSet, annotationKey string) (int32, bool) { + annotationValue, ok := ms.Annotations[annotationKey] + if !ok { + return int32(0), false + } + intValue, err := strconv.Atoi(annotationValue) + if err != nil { + glog.V(2).Infof("Cannot convert the value %q with annotation key %q for the machine set %q", annotationValue, annotationKey, ms.Name) + return int32(0), false + } + return int32(intValue), true +} + +// SetNewMachineSetAnnotations sets new machine set's annotations appropriately by updating its revision and +// copying required deployment annotations to it; it returns true if machine set's annotation is changed. +func SetNewMachineSetAnnotations(deployment *v1alpha1.MachineDeployment, newMS *v1alpha1.MachineSet, newRevision string, exists bool) bool { + // First, copy deployment's annotations (except for apply and revision annotations) + annotationChanged := copyDeploymentAnnotationsToMachineSet(deployment, newMS) + // Then, update machine set's revision annotation + if newMS.Annotations == nil { + newMS.Annotations = make(map[string]string) + } + oldRevision, ok := newMS.Annotations[RevisionAnnotation] + // The newMS's revision should be the greatest among all MSes. Usually, its revision number is newRevision (the max revision number + // of all old MSes + 1). However, it's possible that some of the old MSes are deleted after the newMS revision being updated, and + // newRevision becomes smaller than newMS's revision. We should only update newMS revision when it's smaller than newRevision. + + oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64) + if err != nil { + if oldRevision != "" { + glog.Warningf("Updating machine set revision OldRevision not int %s", err) + return false + } + //If the MS annotation is empty then initialise it to 0 + oldRevisionInt = 0 + } + newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64) + if err != nil { + glog.Warningf("Updating machine set revision NewRevision not int %s", err) + return false + } + if oldRevisionInt < newRevisionInt { + newMS.Annotations[RevisionAnnotation] = newRevision + annotationChanged = true + glog.V(4).Infof("Updating machine set %q revision to %s", newMS.Name, newRevision) + } + // If a revision annotation already existed and this machine set was updated with a new revision + // then that means we are rolling back to this machine set. We need to preserve the old revisions + // for historical information. + if ok && annotationChanged { + revisionHistoryAnnotation := newMS.Annotations[RevisionHistoryAnnotation] + oldRevisions := strings.Split(revisionHistoryAnnotation, ",") + if len(oldRevisions[0]) == 0 { + newMS.Annotations[RevisionHistoryAnnotation] = oldRevision + } else { + oldRevisions = append(oldRevisions, oldRevision) + newMS.Annotations[RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",") + } + } + // If the new machine set is about to be created, we need to add replica annotations to it. + if !exists && SetReplicasAnnotations(newMS, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+MaxSurge(*deployment)) { + annotationChanged = true + } + return annotationChanged +} + +// FindOneActiveOrLatest returns the only active or the latest machine set in case there is at most one active +// machine set. If there are more than one active machine sets, return nil so machine sets can be scaled down +// to the point where there is only one active machine set. +func FindOneActiveOrLatest(newMS *v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet) *v1alpha1.MachineSet { + if newMS == nil && len(oldMSs) == 0 { + return nil + } + + sort.Sort(sort.Reverse(MachineSetsByCreationTimestamp(oldMSs))) + allMSs := FilterActiveMachineSets(append(oldMSs, newMS)) + + switch len(allMSs) { + case 0: + // If there is no active machine set then we should return the newest. + if newMS != nil { + return newMS + } + return oldMSs[0] + case 1: + return allMSs[0] + default: + return nil + } +} + +// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations +func SetReplicasAnnotations(ms *v1alpha1.MachineSet, desiredReplicas, maxReplicas int32) bool { + updated := false + if ms.Annotations == nil { + ms.Annotations = make(map[string]string) + } + desiredString := fmt.Sprintf("%d", desiredReplicas) + if hasString := ms.Annotations[DesiredReplicasAnnotation]; hasString != desiredString { + ms.Annotations[DesiredReplicasAnnotation] = desiredString + updated = true + } + maxString := fmt.Sprintf("%d", maxReplicas) + if hasString := ms.Annotations[MaxReplicasAnnotation]; hasString != maxString { + ms.Annotations[MaxReplicasAnnotation] = maxString + updated = true + } + return updated +} + +// AnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated +func ReplicasAnnotationsNeedUpdate(ms *v1alpha1.MachineSet, desiredReplicas, maxReplicas int32) bool { + if ms.Annotations == nil { + return true + } + desiredString := fmt.Sprintf("%d", desiredReplicas) + if hasString := ms.Annotations[DesiredReplicasAnnotation]; hasString != desiredString { + return true + } + maxString := fmt.Sprintf("%d", maxReplicas) + if hasString := ms.Annotations[MaxReplicasAnnotation]; hasString != maxString { + return true + } + return false +} + +// MaxUnavailable returns the maximum unavailable machines a rolling deployment can take. +func MaxUnavailable(deployment v1alpha1.MachineDeployment) int32 { + if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 { + return int32(0) + } + // Error caught by validation + _, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas)) + if maxUnavailable > *deployment.Spec.Replicas { + return *deployment.Spec.Replicas + } + return maxUnavailable +} + +// MaxSurge returns the maximum surge machines a rolling deployment can take. +func MaxSurge(deployment v1alpha1.MachineDeployment) int32 { + if !IsRollingUpdate(&deployment) { + return int32(0) + } + // Error caught by validation + maxSurge, _, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas)) + return maxSurge +} + +// GetProportion will estimate the proportion for the provided machine set using 1. the current size +// of the parent deployment, 2. the replica count that needs be added on the machine sets of the +// deployment, and 3. the total replicas added in the machine sets of the deployment so far. +func GetProportion(ms *v1alpha1.MachineSet, d v1alpha1.MachineDeployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { + if ms == nil || *(ms.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded { + return int32(0) + } + + msFraction := getMachineSetFraction(*ms, d) + allowed := deploymentReplicasToAdd - deploymentReplicasAdded + + if deploymentReplicasToAdd > 0 { + // Use the minimum between the machine set fraction and the maximum allowed replicas + // when scaling up. This way we ensure we will not scale up more than the allowed + // replicas we can add. + return integer.Int32Min(msFraction, allowed) + } + // Use the maximum between the machine set fraction and the maximum allowed replicas + // when scaling down. This way we ensure we will not scale down more than the allowed + // replicas we can remove. + return integer.Int32Max(msFraction, allowed) +} + +// getMachineSetFraction estimates the fraction of replicas a machine set can have in +// 1. a scaling event during a rollout or 2. when scaling a paused deployment. +func getMachineSetFraction(ms v1alpha1.MachineSet, d v1alpha1.MachineDeployment) int32 { + // If we are scaling down to zero then the fraction of this machine set is its whole size (negative) + if *(d.Spec.Replicas) == int32(0) { + return -*(ms.Spec.Replicas) + } + + deploymentReplicas := *(d.Spec.Replicas) + MaxSurge(d) + annotatedReplicas, ok := getMaxReplicasAnnotation(&ms) + if !ok { + // If we cannot find the annotation then fallback to the current deployment size. Note that this + // will not be an accurate proportion estimation in case other machine sets have different values + // which means that the deployment was scaled at some point but we at least will stay in limits + // due to the min-max comparisons in getProportion. + annotatedReplicas = d.Status.Replicas + } + + // We should never proportionally scale up from zero which means ms.spec.replicas and annotatedReplicas + // will never be zero here. + newMSsize := (float64(*(ms.Spec.Replicas) * deploymentReplicas)) / float64(annotatedReplicas) + return integer.RoundToInt32(newMSsize) - *(ms.Spec.Replicas) +} + +// EqualIgnoreHash returns true if two given machineTemplateSpec are equal, ignoring the diff in value of Labels[machine-template-hash] +// We ignore machine-template-hash because: +// 1. The hash result would be different upon machineTemplateSpec API changes +// (e.g. the addition of a new field will cause the hash code to change) +// 2. The deployment template won't have hash labels +func EqualIgnoreHash(template1, template2 *v1alpha1.MachineTemplateSpec) bool { + t1Copy := template1.DeepCopy() + t2Copy := template2.DeepCopy() + // Remove hash labels from template.Labels before comparing + delete(t1Copy.Labels, DefaultMachineDeploymentUniqueLabelKey) + delete(t2Copy.Labels, DefaultMachineDeploymentUniqueLabelKey) + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) +} + +// FindNewMachineSet returns the new MS this given deployment targets (the one with the same machine template). +func FindNewMachineSet(deployment *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet) *v1alpha1.MachineSet { + sort.Sort(MachineSetsByCreationTimestamp(msList)) + for i := range msList { + if EqualIgnoreHash(&msList[i].Spec.Template, &deployment.Spec.Template) { + // In rare cases, such as after cluster upgrades, Deployment may end up with + // having more than one new MachineSets that have the same template, + // see https://github.com/kubernetes/kubernetes/issues/40415 + // We deterministically choose the oldest new MachineSet with matching template hash. + return msList[i] + } + } + // new MachineSet does not exist. + return nil +} + +// FindOldMachineSets returns the old machine sets targeted by the given Deployment, with the given slice of MSes. +// Returns two list of machine sets +// - the first contains all old machine sets with all non-zero replicas +// - the second contains all old machine sets +func FindOldMachineSets(deployment *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet) ([]*v1alpha1.MachineSet, []*v1alpha1.MachineSet) { + var requiredMSs []*v1alpha1.MachineSet + var allMSs []*v1alpha1.MachineSet + newMS := FindNewMachineSet(deployment, msList) + for _, ms := range msList { + // Filter out new machine set + if newMS != nil && ms.UID == newMS.UID { + continue + } + allMSs = append(allMSs, ms) + if *(ms.Spec.Replicas) != 0 { + requiredMSs = append(requiredMSs, ms) + } + } + return requiredMSs, allMSs +} + +// GetReplicaCountForMachineSets returns the sum of Replicas of the given machine sets. +func GetReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { + totalReplicas := int32(0) + for _, ms := range machineSets { + if ms != nil { + totalReplicas += *(ms.Spec.Replicas) + } + } + return totalReplicas +} + +// GetActualReplicaCountForMachineSets returns the sum of actual replicas of the given machine sets. +func GetActualReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { + totalActualReplicas := int32(0) + for _, ms := range machineSets { + if ms != nil { + totalActualReplicas += ms.Status.Replicas + } + } + return totalActualReplicas +} + +// GetReadyReplicaCountForMachineSets returns the number of ready machines corresponding to the given machine sets. +func GetReadyReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { + totalReadyReplicas := int32(0) + for _, ms := range machineSets { + if ms != nil { + totalReadyReplicas += ms.Status.ReadyReplicas + } + } + return totalReadyReplicas +} + +// GetAvailableReplicaCountForMachineSets returns the number of available machines corresponding to the given machine sets. +func GetAvailableReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { + totalAvailableReplicas := int32(0) + for _, ms := range machineSets { + if ms != nil { + totalAvailableReplicas += ms.Status.AvailableReplicas + } + } + return totalAvailableReplicas +} + +// IsRollingUpdate returns true if the strategy type is a rolling update. +func IsRollingUpdate(deployment *v1alpha1.MachineDeployment) bool { + return deployment.Spec.Strategy.Type == common.RollingUpdateMachineDeploymentStrategyType +} + +// DeploymentComplete considers a deployment to be complete once all of its desired replicas +// are updated and available, and no old machines are running. +func DeploymentComplete(deployment *v1alpha1.MachineDeployment, newStatus *v1alpha1.MachineDeploymentStatus) bool { + return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) && + newStatus.Replicas == *(deployment.Spec.Replicas) && + newStatus.AvailableReplicas == *(deployment.Spec.Replicas) && + newStatus.ObservedGeneration >= deployment.Generation +} + +// NewMSNewReplicas calculates the number of replicas a deployment's new MS should have. +// When one of the followings is true, we're rolling out the deployment; otherwise, we're scaling it. +// 1) The new MS is saturated: newMS's replicas == deployment's replicas +// 2) Max number of machines allowed is reached: deployment's replicas + maxSurge == all MSs' replicas +func NewMSNewReplicas(deployment *v1alpha1.MachineDeployment, allMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet) (int32, error) { + switch deployment.Spec.Strategy.Type { + case common.RollingUpdateMachineDeploymentStrategyType: + // Check if we can scale up. + maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true) + if err != nil { + return 0, err + } + // Find the total number of machines + currentMachineCount := GetReplicaCountForMachineSets(allMSs) + maxTotalMachines := *(deployment.Spec.Replicas) + int32(maxSurge) + if currentMachineCount >= maxTotalMachines { + // Cannot scale up. + return *(newMS.Spec.Replicas), nil + } + // Scale up. + scaleUpCount := maxTotalMachines - currentMachineCount + // Do not exceed the number of desired replicas. + scaleUpCount = int32(integer.Int32Min(scaleUpCount, *(deployment.Spec.Replicas)-*(newMS.Spec.Replicas))) + return *(newMS.Spec.Replicas) + scaleUpCount, nil + default: + // Check if we can scale up. + maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true) + if err != nil { + return 0, err + } + // Find the total number of machines + currentMachineCount := GetReplicaCountForMachineSets(allMSs) + maxTotalMachines := *(deployment.Spec.Replicas) + int32(maxSurge) + if currentMachineCount >= maxTotalMachines { + // Cannot scale up. + return *(newMS.Spec.Replicas), nil + } + // Scale up. + scaleUpCount := maxTotalMachines - currentMachineCount + // Do not exceed the number of desired replicas. + scaleUpCount = int32(integer.Int32Min(scaleUpCount, *(deployment.Spec.Replicas)-*(newMS.Spec.Replicas))) + return *(newMS.Spec.Replicas) + scaleUpCount, nil + // -- return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type) + } +} + +// IsSaturated checks if the new machine set is saturated by comparing its size with its deployment size. +// Both the deployment and the machine set have to believe this machine set can own all of the desired +// replicas in the deployment and the annotation helps in achieving that. All machines of the MachineSet +// need to be available. +func IsSaturated(deployment *v1alpha1.MachineDeployment, ms *v1alpha1.MachineSet) bool { + if ms == nil { + return false + } + desiredString := ms.Annotations[DesiredReplicasAnnotation] + desired, err := strconv.Atoi(desiredString) + if err != nil { + return false + } + return *(ms.Spec.Replicas) == *(deployment.Spec.Replicas) && + int32(desired) == *(deployment.Spec.Replicas) && + ms.Status.AvailableReplicas == *(deployment.Spec.Replicas) +} + +// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one +// step. For example: +// +// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) +// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) +// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) +// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1) +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetValueFromIntOrPercent(maxSurge, int(desired), true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetValueFromIntOrPercent(maxUnavailable, int(desired), false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return int32(surge), int32(unavailable), nil +} + +// FilterActiveMachineSets returns machine sets that have (or at least ought to have) machines. +func FilterActiveMachineSets(machineSets []*v1alpha1.MachineSet) []*v1alpha1.MachineSet { + activeFilter := func(ms *v1alpha1.MachineSet) bool { + return ms != nil && ms.Spec.Replicas != nil && *(ms.Spec.Replicas) > 0 + } + return FilterMachineSets(machineSets, activeFilter) +} + +type filterMS func(ms *v1alpha1.MachineSet) bool + +// FilterMachineSets returns machine sets that are filtered by filterFn (all returned ones should match filterFn). +func FilterMachineSets(MSes []*v1alpha1.MachineSet, filterFn filterMS) []*v1alpha1.MachineSet { + var filtered []*v1alpha1.MachineSet + for i := range MSes { + if filterFn(MSes[i]) { + filtered = append(filtered, MSes[i]) + } + } + return filtered +} + +// Clones the given map and returns a new map with the given key and value added. +// Returns the given map, if labelKey is empty. +func CloneAndAddLabel(labels map[string]string, labelKey, labelValue string) map[string]string { + if labelKey == "" { + // Don't need to add a label. + return labels + } + // Clone. + newLabels := map[string]string{} + for key, value := range labels { + newLabels[key] = value + } + newLabels[labelKey] = labelValue + return newLabels +} + +// Clones the given selector and returns a new selector with the given key and value added. +// Returns the given selector, if labelKey is empty. +func CloneSelectorAndAddLabel(selector *metav1.LabelSelector, labelKey, labelValue string) *metav1.LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + + // Clone. + newSelector := new(metav1.LabelSelector) + + // TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here. + newSelector.MatchLabels = make(map[string]string) + if selector.MatchLabels != nil { + for key, val := range selector.MatchLabels { + newSelector.MatchLabels[key] = val + } + } + newSelector.MatchLabels[labelKey] = labelValue + + if selector.MatchExpressions != nil { + newMExps := make([]metav1.LabelSelectorRequirement, len(selector.MatchExpressions)) + for i, me := range selector.MatchExpressions { + newMExps[i].Key = me.Key + newMExps[i].Operator = me.Operator + if me.Values != nil { + newMExps[i].Values = make([]string, len(me.Values)) + copy(newMExps[i].Values, me.Values) + } else { + newMExps[i].Values = nil + } + } + newSelector.MatchExpressions = newMExps + } else { + newSelector.MatchExpressions = nil + } + + return newSelector +} + +// DeepHashObject writes specified object to hash using the spew library +// which follows pointers and prints actual values of the nested objects +// ensuring the hash does not change when a pointer changes. +func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { + hasher.Reset() + printer := spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + } + printer.Fprintf(hasher, "%#v", objectToWrite) +} + +func ComputeHash(template *v1alpha1.MachineTemplateSpec) uint32 { + machineTemplateSpecHasher := fnv.New32a() + DeepHashObject(machineTemplateSpecHasher, *template) + + return machineTemplateSpecHasher.Sum32() +} diff --git a/pkg/controller/machinedeployment/util/util_test.go b/pkg/controller/machinedeployment/util/util_test.go new file mode 100644 index 000000000000..d68d76478a99 --- /dev/null +++ b/pkg/controller/machinedeployment/util/util_test.go @@ -0,0 +1,841 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + "math/rand" + "reflect" + "sort" + "strconv" + "testing" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apiserver/pkg/storage/names" + core "k8s.io/client-go/testing" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/common" + "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" + "sigs.k8s.io/cluster-api/pkg/client/clientset_generated/clientset/fake" +) + +func addListMSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { + fakeClient.AddReactor("list", "machinesets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return true, obj, nil + }) + return fakeClient +} + +func addListMachinesReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { + fakeClient.AddReactor("list", "machines", func(action core.Action) (handled bool, ret runtime.Object, err error) { + return true, obj, nil + }) + return fakeClient +} + +func addGetMSReactor(fakeClient *fake.Clientset, obj runtime.Object) *fake.Clientset { + msList, ok := obj.(*v1alpha1.MachineSetList) + fakeClient.AddReactor("get", "machinesets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + name := action.(core.GetAction).GetName() + if ok { + for _, ms := range msList.Items { + if ms.Name == name { + return true, &ms, nil + } + } + } + return false, nil, fmt.Errorf("could not find the requested machine set: %s", name) + + }) + return fakeClient +} + +func addUpdateMSReactor(fakeClient *fake.Clientset) *fake.Clientset { + fakeClient.AddReactor("update", "machinesets", func(action core.Action) (handled bool, ret runtime.Object, err error) { + obj := action.(core.UpdateAction).GetObject().(*v1alpha1.MachineSet) + return true, obj, nil + }) + return fakeClient +} + +func addUpdateMachinesReactor(fakeClient *fake.Clientset) *fake.Clientset { + fakeClient.AddReactor("update", "machines", func(action core.Action) (handled bool, ret runtime.Object, err error) { + obj := action.(core.UpdateAction).GetObject().(*v1alpha1.Machine) + return true, obj, nil + }) + return fakeClient +} + +func generateMSWithLabel(labels map[string]string, image string) v1alpha1.MachineSet { + return v1alpha1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.SimpleNameGenerator.GenerateName("machineset"), + Labels: labels, + }, + Spec: v1alpha1.MachineSetSpec{ + Replicas: func(i int32) *int32 { return &i }(1), + Selector: metav1.LabelSelector{MatchLabels: labels}, + Template: v1alpha1.MachineTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: labels, + }, + Spec: v1alpha1.MachineSpec{}, + }, + }, + } +} + +func newDControllerRef(d *v1alpha1.MachineDeployment) *metav1.OwnerReference { + isController := true + return &metav1.OwnerReference{ + APIVersion: "clusters/v1alpha", + Kind: "MachineDeployment", + Name: d.GetName(), + UID: d.GetUID(), + Controller: &isController, + } +} + +// generateMS creates a machine set, with the input deployment's template as its template +func generateMS(deployment v1alpha1.MachineDeployment) v1alpha1.MachineSet { + template := deployment.Spec.Template.DeepCopy() + return v1alpha1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + UID: randomUID(), + Name: names.SimpleNameGenerator.GenerateName("machineset"), + Labels: template.Labels, + OwnerReferences: []metav1.OwnerReference{*newDControllerRef(&deployment)}, + }, + Spec: v1alpha1.MachineSetSpec{ + Replicas: new(int32), + Template: *template, + Selector: metav1.LabelSelector{MatchLabels: template.Labels}, + }, + } +} + +func randomUID() types.UID { + return types.UID(strconv.FormatInt(rand.Int63(), 10)) +} + +// generateDeployment creates a deployment, with the input image as its template +func generateDeployment(image string) v1alpha1.MachineDeployment { + machineLabels := map[string]string{"name": image} + return v1alpha1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: image, + Annotations: make(map[string]string), + }, + Spec: v1alpha1.MachineDeploymentSpec{ + Replicas: func(i int32) *int32 { return &i }(1), + Selector: metav1.LabelSelector{MatchLabels: machineLabels}, + Template: v1alpha1.MachineTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: machineLabels, + }, + Spec: v1alpha1.MachineSpec{}, + }, + }, + } +} + +func generateMachineTemplateSpec(name, nodeName string, annotations, labels map[string]string) v1alpha1.MachineTemplateSpec { + return v1alpha1.MachineTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Annotations: annotations, + Labels: labels, + }, + Spec: v1alpha1.MachineSpec{}, + } +} + +func TestEqualIgnoreHash(t *testing.T) { + tests := []struct { + Name string + former, latter v1alpha1.MachineTemplateSpec + expected bool + }{ + { + "Same spec, same labels", + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-1", "something": "else"}), + true, + }, + { + "Same spec, only machine-template-hash label value is different", + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-2", "something": "else"}), + true, + }, + { + "Same spec, the former doesn't have machine-template-hash label", + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}), + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-2", "something": "else"}), + true, + }, + { + "Same spec, the label is different, the former doesn't have machine-template-hash label, same number of labels", + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}), + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-2"}), + false, + }, + { + "Same spec, the label is different, the latter doesn't have machine-template-hash label, same number of labels", + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-1"}), + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{"something": "else"}), + false, + }, + { + "Same spec, the label is different, and the machine-template-hash label value is the same", + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-1"}), + generateMachineTemplateSpec("foo", "foo-node", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-1", "something": "else"}), + false, + }, + { + "Different spec, same labels", + generateMachineTemplateSpec("foo", "foo-node", map[string]string{"former": "value"}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generateMachineTemplateSpec("foo", "foo-node", map[string]string{"latter": "value"}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-1", "something": "else"}), + false, + }, + { + "Different spec, different machine-template-hash label value", + generateMachineTemplateSpec("foo-1", "foo-node", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-1", "something": "else"}), + generateMachineTemplateSpec("foo-2", "foo-node", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-2", "something": "else"}), + false, + }, + { + "Different spec, the former doesn't have machine-template-hash label", + generateMachineTemplateSpec("foo-1", "foo-node-1", map[string]string{}, map[string]string{"something": "else"}), + generateMachineTemplateSpec("foo-2", "foo-node-2", map[string]string{}, map[string]string{DefaultMachineDeploymentUniqueLabelKey: "value-2", "something": "else"}), + false, + }, + { + "Different spec, different labels", + generateMachineTemplateSpec("foo", "foo-node-1", map[string]string{}, map[string]string{"something": "else"}), + generateMachineTemplateSpec("foo", "foo-node-2", map[string]string{}, map[string]string{"nothing": "else"}), + false, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + runTest := func(t1, t2 *v1alpha1.MachineTemplateSpec, reversed bool) { + reverseString := "" + if reversed { + reverseString = " (reverse order)" + } + // Run + equal := EqualIgnoreHash(t1, t2) + if equal != test.expected { + t.Errorf("%q%s: expected %v", test.Name, reverseString, test.expected) + return + } + if t1.Labels == nil || t2.Labels == nil { + t.Errorf("%q%s: unexpected labels becomes nil", test.Name, reverseString) + } + } + + runTest(&test.former, &test.latter, false) + // Test the same case in reverse order + runTest(&test.latter, &test.former, true) + }) + } +} + +func TestFindNewMachineSet(t *testing.T) { + now := metav1.Now() + later := metav1.Time{Time: now.Add(time.Minute)} + + deployment := generateDeployment("nginx") + newMS := generateMS(deployment) + newMS.Labels[DefaultMachineDeploymentUniqueLabelKey] = "hash" + newMS.CreationTimestamp = later + + newMSDup := generateMS(deployment) + newMSDup.Labels[DefaultMachineDeploymentUniqueLabelKey] = "different-hash" + newMSDup.CreationTimestamp = now + + oldDeployment := generateDeployment("nginx") + oldDeployment.Spec.Template.Spec.Name = "nginx-old-1" + oldMS := generateMS(oldDeployment) + oldMS.Status.FullyLabeledReplicas = *(oldMS.Spec.Replicas) + + tests := []struct { + Name string + deployment v1alpha1.MachineDeployment + msList []*v1alpha1.MachineSet + expected *v1alpha1.MachineSet + }{ + { + Name: "Get new MachineSet with the same template as Deployment spec but different machine-template-hash value", + deployment: deployment, + msList: []*v1alpha1.MachineSet{&newMS, &oldMS}, + expected: &newMS, + }, + { + Name: "Get the oldest new MachineSet when there are more than one MachineSet with the same template", + deployment: deployment, + msList: []*v1alpha1.MachineSet{&newMS, &oldMS, &newMSDup}, + expected: &newMSDup, + }, + { + Name: "Get nil new MachineSet", + deployment: deployment, + msList: []*v1alpha1.MachineSet{&oldMS}, + expected: nil, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + if ms := FindNewMachineSet(&test.deployment, test.msList); !reflect.DeepEqual(ms, test.expected) { + t.Errorf("In test case %q, expected %#v, got %#v", test.Name, test.expected, ms) + } + }) + } +} + +func TestFindOldMachineSets(t *testing.T) { + now := metav1.Now() + later := metav1.Time{Time: now.Add(time.Minute)} + before := metav1.Time{Time: now.Add(-time.Minute)} + + deployment := generateDeployment("nginx") + newMS := generateMS(deployment) + *(newMS.Spec.Replicas) = 1 + newMS.Labels[DefaultMachineDeploymentUniqueLabelKey] = "hash" + newMS.CreationTimestamp = later + + newMSDup := generateMS(deployment) + newMSDup.Labels[DefaultMachineDeploymentUniqueLabelKey] = "different-hash" + newMSDup.CreationTimestamp = now + + oldDeployment := generateDeployment("nginx") + oldDeployment.Spec.Template.Spec.Name = "nginx-old-1" + oldMS := generateMS(oldDeployment) + oldMS.Status.FullyLabeledReplicas = *(oldMS.Spec.Replicas) + oldMS.CreationTimestamp = before + + tests := []struct { + Name string + deployment v1alpha1.MachineDeployment + msList []*v1alpha1.MachineSet + machineList *v1alpha1.MachineList + expected []*v1alpha1.MachineSet + expectedRequire []*v1alpha1.MachineSet + }{ + { + Name: "Get old MachineSets", + deployment: deployment, + msList: []*v1alpha1.MachineSet{&newMS, &oldMS}, + expected: []*v1alpha1.MachineSet{&oldMS}, + expectedRequire: nil, + }, + { + Name: "Get old MachineSets with no new MachineSet", + deployment: deployment, + msList: []*v1alpha1.MachineSet{&oldMS}, + expected: []*v1alpha1.MachineSet{&oldMS}, + expectedRequire: nil, + }, + { + Name: "Get old MachineSets with two new MachineSets, only the oldest new MachineSet is seen as new MachineSet", + deployment: deployment, + msList: []*v1alpha1.MachineSet{&oldMS, &newMS, &newMSDup}, + expected: []*v1alpha1.MachineSet{&oldMS, &newMS}, + expectedRequire: []*v1alpha1.MachineSet{&newMS}, + }, + { + Name: "Get empty old MachineSets", + deployment: deployment, + msList: []*v1alpha1.MachineSet{&newMS}, + expected: nil, + expectedRequire: nil, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + requireMS, allMS := FindOldMachineSets(&test.deployment, test.msList) + sort.Sort(MachineSetsByCreationTimestamp(allMS)) + sort.Sort(MachineSetsByCreationTimestamp(test.expected)) + if !reflect.DeepEqual(allMS, test.expected) { + t.Errorf("In test case %q, expected %#v, got %#v", test.Name, test.expected, allMS) + } + // MSs are getting filtered correctly by ms.spec.replicas + if !reflect.DeepEqual(requireMS, test.expectedRequire) { + t.Errorf("In test case %q, expected %#v, got %#v", test.Name, test.expectedRequire, requireMS) + } + }) + } +} + +// equal compares the equality of two MachineSet slices regardless of their ordering +func equal(mss1, mss2 []*v1alpha1.MachineSet) bool { + if reflect.DeepEqual(mss1, mss2) { + return true + } + if mss1 == nil || mss2 == nil || len(mss1) != len(mss2) { + return false + } + count := 0 + for _, ms1 := range mss1 { + for _, ms2 := range mss2 { + if reflect.DeepEqual(ms1, ms2) { + count++ + break + } + } + } + return count == len(mss1) +} + +func TestGetReplicaCountForMachineSets(t *testing.T) { + ms1 := generateMS(generateDeployment("foo")) + *(ms1.Spec.Replicas) = 1 + ms1.Status.Replicas = 2 + ms2 := generateMS(generateDeployment("bar")) + *(ms2.Spec.Replicas) = 2 + ms2.Status.Replicas = 3 + + tests := []struct { + Name string + sets []*v1alpha1.MachineSet + expectedCount int32 + expectedActual int32 + }{ + { + "1:2 Replicas", + []*v1alpha1.MachineSet{&ms1}, + 1, + 2, + }, + { + "3:5 Replicas", + []*v1alpha1.MachineSet{&ms1, &ms2}, + 3, + 5, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + ms := GetReplicaCountForMachineSets(test.sets) + if ms != test.expectedCount { + t.Errorf("In test case %s, expectedCount %+v, got %+v", test.Name, test.expectedCount, ms) + } + ms = GetActualReplicaCountForMachineSets(test.sets) + if ms != test.expectedActual { + t.Errorf("In test case %s, expectedActual %+v, got %+v", test.Name, test.expectedActual, ms) + } + }) + } +} + +func TestResolveFenceposts(t *testing.T) { + tests := []struct { + maxSurge string + maxUnavailable string + desired int32 + expectSurge int32 + expectUnavailable int32 + expectError bool + }{ + { + maxSurge: "0%", + maxUnavailable: "0%", + desired: 0, + expectSurge: 0, + expectUnavailable: 1, + expectError: false, + }, + { + maxSurge: "39%", + maxUnavailable: "39%", + desired: 10, + expectSurge: 4, + expectUnavailable: 3, + expectError: false, + }, + { + maxSurge: "oops", + maxUnavailable: "39%", + desired: 10, + expectSurge: 0, + expectUnavailable: 0, + expectError: true, + }, + { + maxSurge: "55%", + maxUnavailable: "urg", + desired: 10, + expectSurge: 0, + expectUnavailable: 0, + expectError: true, + }, + } + + for num, test := range tests { + t.Run("maxSurge="+test.maxSurge, func(t *testing.T) { + maxSurge := intstr.FromString(test.maxSurge) + maxUnavail := intstr.FromString(test.maxUnavailable) + surge, unavail, err := ResolveFenceposts(&maxSurge, &maxUnavail, test.desired) + if err != nil && !test.expectError { + t.Errorf("unexpected error %v", err) + } + if err == nil && test.expectError { + t.Error("expected error") + } + if surge != test.expectSurge || unavail != test.expectUnavailable { + t.Errorf("#%v got %v:%v, want %v:%v", num, surge, unavail, test.expectSurge, test.expectUnavailable) + } + }) + } +} + +func TestNewMSNewReplicas(t *testing.T) { + tests := []struct { + Name string + strategyType common.MachineDeploymentStrategyType + depReplicas int32 + newMSReplicas int32 + maxSurge int + expected int32 + }{ + { + "can not scale up - to newMSReplicas", + common.RollingUpdateMachineDeploymentStrategyType, + 1, 5, 1, 5, + }, + { + "scale up - to depReplicas", + common.RollingUpdateMachineDeploymentStrategyType, + 6, 2, 10, 6, + }, + } + newDeployment := generateDeployment("nginx") + newRC := generateMS(newDeployment) + rs5 := generateMS(newDeployment) + *(rs5.Spec.Replicas) = 5 + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + *(newDeployment.Spec.Replicas) = test.depReplicas + newDeployment.Spec.Strategy = v1alpha1.MachineDeploymentStrategy{Type: test.strategyType} + newDeployment.Spec.Strategy.RollingUpdate = &v1alpha1.MachineRollingUpdateDeployment{ + MaxUnavailable: func(i int) *intstr.IntOrString { + x := intstr.FromInt(i) + return &x + }(1), + MaxSurge: func(i int) *intstr.IntOrString { + x := intstr.FromInt(i) + return &x + }(test.maxSurge), + } + *(newRC.Spec.Replicas) = test.newMSReplicas + ms, err := NewMSNewReplicas(&newDeployment, []*v1alpha1.MachineSet{&rs5}, &newRC) + if err != nil { + t.Errorf("In test case %s, got unexpected error %v", test.Name, err) + } + if ms != test.expected { + t.Errorf("In test case %s, expected %+v, got %+v", test.Name, test.expected, ms) + } + }) + } +} + +func TestDeploymentComplete(t *testing.T) { + deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *v1alpha1.MachineDeployment { + return &v1alpha1.MachineDeployment{ + Spec: v1alpha1.MachineDeploymentSpec{ + Replicas: &desired, + Strategy: v1alpha1.MachineDeploymentStrategy{ + RollingUpdate: &v1alpha1.MachineRollingUpdateDeployment{ + MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)), + MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxSurge)), + }, + Type: common.RollingUpdateMachineDeploymentStrategyType, + }, + }, + Status: v1alpha1.MachineDeploymentStatus{ + Replicas: current, + UpdatedReplicas: updated, + AvailableReplicas: available, + }, + } + } + + tests := []struct { + name string + + d *v1alpha1.MachineDeployment + + expected bool + }{ + { + name: "not complete: min but not all machines become available", + + d: deployment(5, 5, 5, 4, 1, 0), + expected: false, + }, + { + name: "not complete: min availability is not honored", + + d: deployment(5, 5, 5, 3, 1, 0), + expected: false, + }, + { + name: "complete", + + d: deployment(5, 5, 5, 5, 0, 0), + expected: true, + }, + { + name: "not complete: all machines are available but not updated", + + d: deployment(5, 5, 4, 5, 0, 0), + expected: false, + }, + { + name: "not complete: still running old machines", + + // old machine set: spec.replicas=1, status.replicas=1, status.availableReplicas=1 + // new machine set: spec.replicas=1, status.replicas=1, status.availableReplicas=0 + d: deployment(1, 2, 1, 1, 0, 1), + expected: false, + }, + { + name: "not complete: one replica deployment never comes up", + + d: deployment(1, 1, 1, 0, 1, 1), + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if got, exp := DeploymentComplete(test.d, &test.d.Status), test.expected; got != exp { + t.Errorf("expected complete: %t, got: %t", exp, got) + } + }) + } +} + +func TestMaxUnavailable(t *testing.T) { + deployment := func(replicas int32, maxUnavailable intstr.IntOrString) v1alpha1.MachineDeployment { + return v1alpha1.MachineDeployment{ + Spec: v1alpha1.MachineDeploymentSpec{ + Replicas: func(i int32) *int32 { return &i }(replicas), + Strategy: v1alpha1.MachineDeploymentStrategy{ + RollingUpdate: &v1alpha1.MachineRollingUpdateDeployment{ + MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(1)), + MaxUnavailable: &maxUnavailable, + }, + Type: common.RollingUpdateMachineDeploymentStrategyType, + }, + }, + } + } + tests := []struct { + name string + deployment v1alpha1.MachineDeployment + expected int32 + }{ + { + name: "maxUnavailable less than replicas", + deployment: deployment(10, intstr.FromInt(5)), + expected: int32(5), + }, + { + name: "maxUnavailable equal replicas", + deployment: deployment(10, intstr.FromInt(10)), + expected: int32(10), + }, + { + name: "maxUnavailable greater than replicas", + deployment: deployment(5, intstr.FromInt(10)), + expected: int32(5), + }, + { + name: "maxUnavailable with replicas is 0", + deployment: deployment(0, intstr.FromInt(10)), + expected: int32(0), + }, + { + name: "maxUnavailable less than replicas with percents", + deployment: deployment(10, intstr.FromString("50%")), + expected: int32(5), + }, + { + name: "maxUnavailable equal replicas with percents", + deployment: deployment(10, intstr.FromString("100%")), + expected: int32(10), + }, + { + name: "maxUnavailable greater than replicas with percents", + deployment: deployment(5, intstr.FromString("100%")), + expected: int32(5), + }, + } + + for _, test := range tests { + t.Log(test.name) + t.Run(test.name, func(t *testing.T) { + maxUnavailable := MaxUnavailable(test.deployment) + if test.expected != maxUnavailable { + t.Fatalf("expected:%v, got:%v", test.expected, maxUnavailable) + } + }) + } +} + +//Set of simple tests for annotation related util functions +func TestAnnotationUtils(t *testing.T) { + + //Setup + tDeployment := generateDeployment("nginx") + tMS := generateMS(tDeployment) + tDeployment.Annotations[RevisionAnnotation] = "1" + + //Test Case 1: Check if anotations are copied properly from deployment to MS + t.Run("SetNewMachineSetAnnotations", func(t *testing.T) { + //Try to set the increment revision from 1 through 20 + for i := 0; i < 20; i++ { + + nextRevision := fmt.Sprintf("%d", i+1) + SetNewMachineSetAnnotations(&tDeployment, &tMS, nextRevision, true) + //Now the MachineSets Revision Annotation should be i+1 + + if tMS.Annotations[RevisionAnnotation] != nextRevision { + t.Errorf("Revision Expected=%s Obtained=%s", nextRevision, tMS.Annotations[RevisionAnnotation]) + } + } + }) + + //Test Case 2: Check if annotations are set properly + t.Run("SetReplicasAnnotations", func(t *testing.T) { + updated := SetReplicasAnnotations(&tMS, 10, 11) + if !updated { + t.Errorf("SetReplicasAnnotations() failed") + } + value, ok := tMS.Annotations[DesiredReplicasAnnotation] + if !ok { + t.Errorf("SetReplicasAnnotations did not set DesiredReplicasAnnotation") + } + if value != "10" { + t.Errorf("SetReplicasAnnotations did not set DesiredReplicasAnnotation correctly value=%s", value) + } + if value, ok = tMS.Annotations[MaxReplicasAnnotation]; !ok { + t.Errorf("SetReplicasAnnotations did not set DesiredReplicasAnnotation") + } + if value != "11" { + t.Errorf("SetReplicasAnnotations did not set MaxReplicasAnnotation correctly value=%s", value) + } + }) + + //Test Case 3: Check if annotations reflect deployments state + tMS.Annotations[DesiredReplicasAnnotation] = "1" + tMS.Status.AvailableReplicas = 1 + tMS.Spec.Replicas = new(int32) + *tMS.Spec.Replicas = 1 + + t.Run("IsSaturated", func(t *testing.T) { + saturated := IsSaturated(&tDeployment, &tMS) + if !saturated { + t.Errorf("SetReplicasAnnotations Expected=true Obtained=false") + } + }) + //Tear Down +} + +func TestReplicasAnnotationsNeedUpdate(t *testing.T) { + + desiredReplicas := fmt.Sprintf("%d", int32(10)) + maxReplicas := fmt.Sprintf("%d", int32(20)) + + tests := []struct { + name string + machineSet *v1alpha1.MachineSet + expected bool + }{ + { + name: "test Annotations nil", + machineSet: &v1alpha1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: "test"}, + Spec: v1alpha1.MachineSetSpec{ + Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + expected: true, + }, + { + name: "test desiredReplicas update", + machineSet: &v1alpha1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hello", + Namespace: "test", + Annotations: map[string]string{DesiredReplicasAnnotation: "8", MaxReplicasAnnotation: maxReplicas}, + }, + Spec: v1alpha1.MachineSetSpec{ + Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + expected: true, + }, + { + name: "test maxReplicas update", + machineSet: &v1alpha1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hello", + Namespace: "test", + Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: "16"}, + }, + Spec: v1alpha1.MachineSetSpec{ + Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + expected: true, + }, + { + name: "test needn't update", + machineSet: &v1alpha1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hello", + Namespace: "test", + Annotations: map[string]string{DesiredReplicasAnnotation: desiredReplicas, MaxReplicasAnnotation: maxReplicas}, + }, + Spec: v1alpha1.MachineSetSpec{ + Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + expected: false, + }, + } + + for i, test := range tests { + t.Run(test.name, func(t *testing.T) { + result := ReplicasAnnotationsNeedUpdate(test.machineSet, 10, 20) + if result != test.expected { + t.Errorf("case[%d]:%s Expected %v, Got: %v", i, test.name, test.expected, result) + } + }) + } +} diff --git a/sample/machinedeployment.yaml b/sample/machinedeployment.yaml index 9a688d46cbd4..31f02141f9b4 100644 --- a/sample/machinedeployment.yaml +++ b/sample/machinedeployment.yaml @@ -1,5 +1,29 @@ -apiVersion: cluster.k8s.io/v1alpha1 +apiVersion: "cluster.k8s.io/v1alpha1" kind: MachineDeployment metadata: - name: machinedeployment-example + name: sample-machinedeployment spec: + replicas: 3 + selector: + matchLabels: + foo: bar + template: + metadata: + labels: + foo: bar + spec: + providerConfig: + value: + apiVersion: "gceproviderconfig/v1alpha1" + kind: "GCEProviderConfig" + project: "${GCLOUD_PROJECT}" + zone: "us-central1-c" + machineType: "n1-standard-2" + os: "ubuntu-1604-lts" + versions: + kubelet: 1.9.4 + containerRuntime: + name: docker + version: 1.12.0 + roles: + - Node