From 89f1680109cc0220af681175987c7f42fd828ac8 Mon Sep 17 00:00:00 2001 From: Enxebre Date: Thu, 16 Sep 2021 15:29:43 +0200 Subject: [PATCH] Move mdutil into an internal package Creates controllers/internal/machinedeployment Deprecate all exported funcs and consts in controllers/mdutil Fixes https://github.com/kubernetes-sigs/cluster-api/issues/5244 --- api/v1alpha4/machinedeployment_types.go | 4 + .../client/alpha/machinedeployment.go | 19 +- .../client/alpha/rollout_rollbacker.go | 3 +- controllers/internal/mdutil/doc.go | 19 + controllers/internal/mdutil/util.go | 708 +++++++++++++++ controllers/internal/mdutil/util_test.go | 828 ++++++++++++++++++ controllers/machinedeployment_rolling.go | 2 +- controllers/machinedeployment_rolling_test.go | 2 +- .../machinedeployment_rollout_ondelete.go | 2 +- controllers/machinedeployment_sync.go | 6 +- controllers/machinedeployment_sync_test.go | 2 +- controllers/mdutil/util.go | 36 + controllers/topology/internal/scope/state.go | 2 +- 13 files changed, 1621 insertions(+), 12 deletions(-) create mode 100644 controllers/internal/mdutil/doc.go create mode 100644 controllers/internal/mdutil/util.go create mode 100644 controllers/internal/mdutil/util_test.go diff --git a/api/v1alpha4/machinedeployment_types.go b/api/v1alpha4/machinedeployment_types.go index 3389aa0b26eb..4d99917a74e2 100644 --- a/api/v1alpha4/machinedeployment_types.go +++ b/api/v1alpha4/machinedeployment_types.go @@ -53,6 +53,10 @@ const ( // is machinedeployment.spec.replicas + maxSurge. Used by the underlying machine sets to estimate their // proportions in case the deployment has surge replicas. MaxReplicasAnnotation = "machinedeployment.clusters.x-k8s.io/max-replicas" + + // MachineDeploymentUniqueLabel is the label applied to Machines + // in a MachineDeployment containing the hash of the template. + MachineDeploymentUniqueLabel = "machine-template-hash" ) // ANCHOR: MachineDeploymentSpec diff --git a/cmd/clusterctl/client/alpha/machinedeployment.go b/cmd/clusterctl/client/alpha/machinedeployment.go index 5fadcce50231..f64b8f80c107 100644 --- a/cmd/clusterctl/client/alpha/machinedeployment.go +++ b/cmd/clusterctl/client/alpha/machinedeployment.go @@ -17,13 +17,16 @@ limitations under the License. package alpha import ( + "strconv" + "github.com/pkg/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" - "sigs.k8s.io/cluster-api/controllers/mdutil" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -75,7 +78,7 @@ func findMachineDeploymentRevision(toRevision int64, allMSs []*clusterv1.Machine previousRevision = int64(-1) ) for _, ms := range allMSs { - if v, err := mdutil.Revision(ms); err == nil { + if v, err := revision(ms); err == nil { if toRevision == 0 { if latestRevision < v { // newest one we've seen so far @@ -147,3 +150,15 @@ func getMachineSetsForDeployment(proxy cluster.Proxy, d *clusterv1.MachineDeploy return filtered, nil } + +func revision(obj runtime.Object) (int64, error) { + acc, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + v, ok := acc.GetAnnotations()[clusterv1.RevisionAnnotation] + if !ok { + return 0, nil + } + return strconv.ParseInt(v, 10, 64) +} diff --git a/cmd/clusterctl/client/alpha/rollout_rollbacker.go b/cmd/clusterctl/client/alpha/rollout_rollbacker.go index 0f0b8a56f725..43b4958e2eba 100644 --- a/cmd/clusterctl/client/alpha/rollout_rollbacker.go +++ b/cmd/clusterctl/client/alpha/rollout_rollbacker.go @@ -22,7 +22,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" - "sigs.k8s.io/cluster-api/controllers/mdutil" "sigs.k8s.io/cluster-api/util/patch" ) @@ -73,7 +72,7 @@ func rollbackMachineDeployment(proxy cluster.Proxy, d *clusterv1.MachineDeployme } // Copy template into the machinedeployment (excluding the hash) revMSTemplate := *msForRevision.Spec.Template.DeepCopy() - delete(revMSTemplate.Labels, mdutil.DefaultMachineDeploymentUniqueLabelKey) + delete(revMSTemplate.Labels, clusterv1.MachineDeploymentUniqueLabel) d.Spec.Template = revMSTemplate return patchHelper.Patch(ctx, d) diff --git a/controllers/internal/mdutil/doc.go b/controllers/internal/mdutil/doc.go new file mode 100644 index 000000000000..b7f3f3282870 --- /dev/null +++ b/controllers/internal/mdutil/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package machinedeployment implements MachineDeployment utilities +// meant to be consumed internally by the controller. +package mdutil diff --git a/controllers/internal/mdutil/util.go b/controllers/internal/mdutil/util.go new file mode 100644 index 000000000000..108217c7e640 --- /dev/null +++ b/controllers/internal/mdutil/util.go @@ -0,0 +1,708 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package mdutil implements MachineDeployment utilities. +package mdutil + +import ( + "fmt" + "hash" + "hash/fnv" + "sort" + "strconv" + "strings" + + "github.com/davecgh/go-spew/spew" + "github.com/go-logr/logr" + corev1 "k8s.io/api/core/v1" + apiequality "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/integer" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + "sigs.k8s.io/cluster-api/util/conversion" +) + +// MachineSetsByCreationTimestamp sorts a list of MachineSet by creation timestamp, using their names as a tie breaker. +type MachineSetsByCreationTimestamp []*clusterv1.MachineSet + +func (o MachineSetsByCreationTimestamp) Len() int { return len(o) } +func (o MachineSetsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o MachineSetsByCreationTimestamp) Less(i, j int) bool { + if o[i].CreationTimestamp.Equal(&o[j].CreationTimestamp) { + return o[i].Name < o[j].Name + } + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) +} + +// MachineSetsBySizeOlder sorts a list of MachineSet by size in descending order, using their creation timestamp or name as a tie breaker. +// By using the creation timestamp, this sorts from old to new machine sets. +type MachineSetsBySizeOlder []*clusterv1.MachineSet + +func (o MachineSetsBySizeOlder) Len() int { return len(o) } +func (o MachineSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o MachineSetsBySizeOlder) Less(i, j int) bool { + if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) { + return o[i].CreationTimestamp.Before(&o[j].CreationTimestamp) + } + return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas) +} + +// MachineSetsBySizeNewer sorts a list of MachineSet by size in descending order, using their creation timestamp or name as a tie breaker. +// By using the creation timestamp, this sorts from new to old machine sets. +type MachineSetsBySizeNewer []*clusterv1.MachineSet + +func (o MachineSetsBySizeNewer) Len() int { return len(o) } +func (o MachineSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] } +func (o MachineSetsBySizeNewer) Less(i, j int) bool { + if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) { + return o[j].CreationTimestamp.Before(&o[i].CreationTimestamp) + } + return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas) +} + +// SetDeploymentRevision updates the revision for a deployment. +func SetDeploymentRevision(deployment *clusterv1.MachineDeployment, revision string) bool { + updated := false + + if deployment.Annotations == nil { + deployment.Annotations = make(map[string]string) + } + if deployment.Annotations[clusterv1.RevisionAnnotation] != revision { + deployment.Annotations[clusterv1.RevisionAnnotation] = revision + updated = true + } + + return updated +} + +// MaxRevision finds the highest revision in the machine sets. +func MaxRevision(allMSs []*clusterv1.MachineSet, logger logr.Logger) int64 { + max := int64(0) + for _, ms := range allMSs { + if v, err := Revision(ms); err != nil { + // Skip the machine sets when it failed to parse their revision information + logger.Error(err, "Couldn't parse revision for machine set, deployment controller will skip it when reconciling revisions", + "machineset", ms.Name) + } else if v > max { + max = v + } + } + return max +} + +// Revision returns the revision number of the input object. +func Revision(obj runtime.Object) (int64, error) { + acc, err := meta.Accessor(obj) + if err != nil { + return 0, err + } + v, ok := acc.GetAnnotations()[clusterv1.RevisionAnnotation] + if !ok { + return 0, nil + } + return strconv.ParseInt(v, 10, 64) +} + +var annotationsToSkip = map[string]bool{ + corev1.LastAppliedConfigAnnotation: true, + clusterv1.RevisionAnnotation: true, + clusterv1.RevisionHistoryAnnotation: true, + clusterv1.DesiredReplicasAnnotation: true, + clusterv1.MaxReplicasAnnotation: true, + + // Exclude the conversion annotation, to avoid infinite loops between the conversion webhook + // and the MachineDeployment controller syncing the annotations between a MachineDeployment + // and its linked MachineSets. + // + // See https://github.com/kubernetes-sigs/cluster-api/pull/3010#issue-413767831 for more details. + conversion.DataAnnotation: true, +} + +// skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key +// TODO(tbd): How to decide which annotations should / should not be copied? +// See https://github.com/kubernetes/kubernetes/pull/20035#issuecomment-179558615 +func skipCopyAnnotation(key string) bool { + return annotationsToSkip[key] +} + +// copyDeploymentAnnotationsToMachineSet copies deployment's annotations to machine set's annotations, +// and returns true if machine set's annotation is changed. +// Note that apply and revision annotations are not copied. +func copyDeploymentAnnotationsToMachineSet(deployment *clusterv1.MachineDeployment, ms *clusterv1.MachineSet) bool { + msAnnotationsChanged := false + if ms.Annotations == nil { + ms.Annotations = make(map[string]string) + } + for k, v := range deployment.Annotations { + // newMS revision is updated automatically in getNewMachineSet, and the deployment's revision number is then updated + // by copying its newMS revision number. We should not copy deployment's revision to its newMS, since the update of + // deployment revision number may fail (revision becomes stale) and the revision number in newMS is more reliable. + if skipCopyAnnotation(k) || ms.Annotations[k] == v { + continue + } + ms.Annotations[k] = v + msAnnotationsChanged = true + } + return msAnnotationsChanged +} + +func getMaxReplicasAnnotation(ms *clusterv1.MachineSet, logger logr.Logger) (int32, bool) { + return getIntFromAnnotation(ms, clusterv1.MaxReplicasAnnotation, logger) +} + +func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger logr.Logger) (int32, bool) { + logger = logger.WithValues("machineset", ms.Name, "annotationKey", annotationKey) + + annotationValue, ok := ms.Annotations[annotationKey] + if !ok { + return int32(0), false + } + intValue, err := strconv.ParseInt(annotationValue, 10, 32) + if err != nil { + logger.V(2).Info("Cannot convert the value to integer", "annotationValue", annotationValue) + return int32(0), false + } + return int32(intValue), true +} + +// SetNewMachineSetAnnotations sets new machine set's annotations appropriately by updating its revision and +// copying required deployment annotations to it; it returns true if machine set's annotation is changed. +func SetNewMachineSetAnnotations(deployment *clusterv1.MachineDeployment, newMS *clusterv1.MachineSet, newRevision string, exists bool, logger logr.Logger) bool { + logger = logger.WithValues("machineset", newMS.Name) + + // First, copy deployment's annotations (except for apply and revision annotations) + annotationChanged := copyDeploymentAnnotationsToMachineSet(deployment, newMS) + // Then, update machine set's revision annotation + if newMS.Annotations == nil { + newMS.Annotations = make(map[string]string) + } + oldRevision, ok := newMS.Annotations[clusterv1.RevisionAnnotation] + // The newMS's revision should be the greatest among all MSes. Usually, its revision number is newRevision (the max revision number + // of all old MSes + 1). However, it's possible that some of the old MSes are deleted after the newMS revision being updated, and + // newRevision becomes smaller than newMS's revision. We should only update newMS revision when it's smaller than newRevision. + + oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64) + if err != nil { + if oldRevision != "" { + logger.Error(err, "Updating machine set revision OldRevision not int") + return false + } + // If the MS annotation is empty then initialise it to 0 + oldRevisionInt = 0 + } + newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64) + if err != nil { + logger.Error(err, "Updating machine set revision NewRevision not int") + return false + } + if oldRevisionInt < newRevisionInt { + newMS.Annotations[clusterv1.RevisionAnnotation] = newRevision + annotationChanged = true + logger.V(4).Info("Updating machine set revision", "revision", newRevision) + } + // If a revision annotation already existed and this machine set was updated with a new revision + // then that means we are rolling back to this machine set. We need to preserve the old revisions + // for historical information. + if ok && annotationChanged { + revisionHistoryAnnotation := newMS.Annotations[clusterv1.RevisionHistoryAnnotation] + oldRevisions := strings.Split(revisionHistoryAnnotation, ",") + if len(oldRevisions[0]) == 0 { + newMS.Annotations[clusterv1.RevisionHistoryAnnotation] = oldRevision + } else { + oldRevisions = append(oldRevisions, oldRevision) + newMS.Annotations[clusterv1.RevisionHistoryAnnotation] = strings.Join(oldRevisions, ",") + } + } + // If the new machine set is about to be created, we need to add replica annotations to it. + if !exists && SetReplicasAnnotations(newMS, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+MaxSurge(*deployment)) { + annotationChanged = true + } + return annotationChanged +} + +// FindOneActiveOrLatest returns the only active or the latest machine set in case there is at most one active +// machine set. If there are more than one active machine sets, return nil so machine sets can be scaled down +// to the point where there is only one active machine set. +func FindOneActiveOrLatest(newMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet) *clusterv1.MachineSet { + if newMS == nil && len(oldMSs) == 0 { + return nil + } + + sort.Sort(sort.Reverse(MachineSetsByCreationTimestamp(oldMSs))) + allMSs := FilterActiveMachineSets(append(oldMSs, newMS)) + + switch len(allMSs) { + case 0: + // If there is no active machine set then we should return the newest. + if newMS != nil { + return newMS + } + return oldMSs[0] + case 1: + return allMSs[0] + default: + return nil + } +} + +// SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations. +func SetReplicasAnnotations(ms *clusterv1.MachineSet, desiredReplicas, maxReplicas int32) bool { + updated := false + if ms.Annotations == nil { + ms.Annotations = make(map[string]string) + } + desiredString := fmt.Sprintf("%d", desiredReplicas) + if hasString := ms.Annotations[clusterv1.DesiredReplicasAnnotation]; hasString != desiredString { + ms.Annotations[clusterv1.DesiredReplicasAnnotation] = desiredString + updated = true + } + if hasString := ms.Annotations[clusterv1.MaxReplicasAnnotation]; hasString != fmt.Sprintf("%d", maxReplicas) { + ms.Annotations[clusterv1.MaxReplicasAnnotation] = fmt.Sprintf("%d", maxReplicas) + updated = true + } + return updated +} + +// ReplicasAnnotationsNeedUpdate return true if the replicas annotation needs to be updated. +func ReplicasAnnotationsNeedUpdate(ms *clusterv1.MachineSet, desiredReplicas, maxReplicas int32) bool { + if ms.Annotations == nil { + return true + } + desiredString := fmt.Sprintf("%d", desiredReplicas) + if hasString := ms.Annotations[clusterv1.DesiredReplicasAnnotation]; hasString != desiredString { + return true + } + if hasString := ms.Annotations[clusterv1.MaxReplicasAnnotation]; hasString != fmt.Sprintf("%d", maxReplicas) { + return true + } + return false +} + +// MaxUnavailable returns the maximum unavailable machines a rolling deployment can take. +func MaxUnavailable(deployment clusterv1.MachineDeployment) int32 { + if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 { + return int32(0) + } + // Error caught by validation + _, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas)) + if maxUnavailable > *deployment.Spec.Replicas { + return *deployment.Spec.Replicas + } + return maxUnavailable +} + +// MaxSurge returns the maximum surge machines a rolling deployment can take. +func MaxSurge(deployment clusterv1.MachineDeployment) int32 { + if !IsRollingUpdate(&deployment) { + return int32(0) + } + // Error caught by validation + maxSurge, _, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas)) + return maxSurge +} + +// GetProportion will estimate the proportion for the provided machine set using 1. the current size +// of the parent deployment, 2. the replica count that needs be added on the machine sets of the +// deployment, and 3. the total replicas added in the machine sets of the deployment so far. +func GetProportion(ms *clusterv1.MachineSet, d clusterv1.MachineDeployment, deploymentReplicasToAdd, deploymentReplicasAdded int32, logger logr.Logger) int32 { + if ms == nil || *(ms.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded { + return int32(0) + } + + msFraction := getMachineSetFraction(*ms, d, logger) + allowed := deploymentReplicasToAdd - deploymentReplicasAdded + + if deploymentReplicasToAdd > 0 { + // Use the minimum between the machine set fraction and the maximum allowed replicas + // when scaling up. This way we ensure we will not scale up more than the allowed + // replicas we can add. + return integer.Int32Min(msFraction, allowed) + } + // Use the maximum between the machine set fraction and the maximum allowed replicas + // when scaling down. This way we ensure we will not scale down more than the allowed + // replicas we can remove. + return integer.Int32Max(msFraction, allowed) +} + +// getMachineSetFraction estimates the fraction of replicas a machine set can have in +// 1. a scaling event during a rollout or 2. when scaling a paused deployment. +func getMachineSetFraction(ms clusterv1.MachineSet, d clusterv1.MachineDeployment, logger logr.Logger) int32 { + // If we are scaling down to zero then the fraction of this machine set is its whole size (negative) + if *(d.Spec.Replicas) == int32(0) { + return -*(ms.Spec.Replicas) + } + + deploymentReplicas := *(d.Spec.Replicas) + MaxSurge(d) + annotatedReplicas, ok := getMaxReplicasAnnotation(&ms, logger) + if !ok { + // If we cannot find the annotation then fallback to the current deployment size. Note that this + // will not be an accurate proportion estimation in case other machine sets have different values + // which means that the deployment was scaled at some point but we at least will stay in limits + // due to the min-max comparisons in getProportion. + annotatedReplicas = d.Status.Replicas + } + + // We should never proportionally scale up from zero which means ms.spec.replicas and annotatedReplicas + // will never be zero here. + newMSsize := (float64(*(ms.Spec.Replicas) * deploymentReplicas)) / float64(annotatedReplicas) + return integer.RoundToInt32(newMSsize) - *(ms.Spec.Replicas) +} + +// EqualMachineTemplate returns true if two given machineTemplateSpec are equal, +// ignoring the diff in value of Labels["machine-template-hash"], and the version from external references. +func EqualMachineTemplate(template1, template2 *clusterv1.MachineTemplateSpec) bool { + t1Copy := template1.DeepCopy() + t2Copy := template2.DeepCopy() + + // Remove `machine-template-hash` from the comparison: + // 1. The hash result would be different upon machineTemplateSpec API changes + // (e.g. the addition of a new field will cause the hash code to change) + // 2. The deployment template won't have hash labels + delete(t1Copy.Labels, clusterv1.MachineDeploymentUniqueLabel) + delete(t2Copy.Labels, clusterv1.MachineDeploymentUniqueLabel) + + // Remove the version part from the references APIVersion field, + // for more details see issue #2183 and #2140. + t1Copy.Spec.InfrastructureRef.APIVersion = t1Copy.Spec.InfrastructureRef.GroupVersionKind().Group + if t1Copy.Spec.Bootstrap.ConfigRef != nil { + t1Copy.Spec.Bootstrap.ConfigRef.APIVersion = t1Copy.Spec.Bootstrap.ConfigRef.GroupVersionKind().Group + } + t2Copy.Spec.InfrastructureRef.APIVersion = t2Copy.Spec.InfrastructureRef.GroupVersionKind().Group + if t2Copy.Spec.Bootstrap.ConfigRef != nil { + t2Copy.Spec.Bootstrap.ConfigRef.APIVersion = t2Copy.Spec.Bootstrap.ConfigRef.GroupVersionKind().Group + } + + return apiequality.Semantic.DeepEqual(t1Copy, t2Copy) +} + +// FindNewMachineSet returns the new MS this given deployment targets (the one with the same machine template). +func FindNewMachineSet(deployment *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet) *clusterv1.MachineSet { + sort.Sort(MachineSetsByCreationTimestamp(msList)) + for i := range msList { + if EqualMachineTemplate(&msList[i].Spec.Template, &deployment.Spec.Template) { + // In rare cases, such as after cluster upgrades, Deployment may end up with + // having more than one new MachineSets that have the same template, + // see https://github.com/kubernetes/kubernetes/issues/40415 + // We deterministically choose the oldest new MachineSet with matching template hash. + return msList[i] + } + } + // new MachineSet does not exist. + return nil +} + +// FindOldMachineSets returns the old machine sets targeted by the given Deployment, with the given slice of MSes. +// Returns two list of machine sets +// - the first contains all old machine sets with all non-zero replicas +// - the second contains all old machine sets +func FindOldMachineSets(deployment *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet) ([]*clusterv1.MachineSet, []*clusterv1.MachineSet) { + var requiredMSs []*clusterv1.MachineSet + allMSs := make([]*clusterv1.MachineSet, 0, len(msList)) + newMS := FindNewMachineSet(deployment, msList) + for _, ms := range msList { + // Filter out new machine set + if newMS != nil && ms.UID == newMS.UID { + continue + } + allMSs = append(allMSs, ms) + if *(ms.Spec.Replicas) != 0 { + requiredMSs = append(requiredMSs, ms) + } + } + return requiredMSs, allMSs +} + +// GetReplicaCountForMachineSets returns the sum of Replicas of the given machine sets. +func GetReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { + totalReplicas := int32(0) + for _, ms := range machineSets { + if ms != nil { + totalReplicas += *(ms.Spec.Replicas) + } + } + return totalReplicas +} + +// GetActualReplicaCountForMachineSets returns the sum of actual replicas of the given machine sets. +func GetActualReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { + totalActualReplicas := int32(0) + for _, ms := range machineSets { + if ms != nil { + totalActualReplicas += ms.Status.Replicas + } + } + return totalActualReplicas +} + +// TotalMachineSetsReplicaSum returns sum of max(ms.Spec.Replicas, ms.Status.Replicas) across all the machine sets. +// +// This is used to guarantee that the total number of machines will not exceed md.Spec.Replicas + maxSurge. +// Use max(spec.Replicas,status.Replicas) to cover the cases that: +// 1. Scale up, where spec.Replicas increased but no machine created yet, so spec.Replicas > status.Replicas +// 2. Scale down, where spec.Replicas decreased but machine not deleted yet, so spec.Replicas < status.Replicas. +func TotalMachineSetsReplicaSum(machineSets []*clusterv1.MachineSet) int32 { + totalReplicas := int32(0) + for _, ms := range machineSets { + if ms != nil { + totalReplicas += integer.Int32Max(*(ms.Spec.Replicas), ms.Status.Replicas) + } + } + return totalReplicas +} + +// GetReadyReplicaCountForMachineSets returns the number of ready machines corresponding to the given machine sets. +func GetReadyReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { + totalReadyReplicas := int32(0) + for _, ms := range machineSets { + if ms != nil { + totalReadyReplicas += ms.Status.ReadyReplicas + } + } + return totalReadyReplicas +} + +// GetAvailableReplicaCountForMachineSets returns the number of available machines corresponding to the given machine sets. +func GetAvailableReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { + totalAvailableReplicas := int32(0) + for _, ms := range machineSets { + if ms != nil { + totalAvailableReplicas += ms.Status.AvailableReplicas + } + } + return totalAvailableReplicas +} + +// IsRollingUpdate returns true if the strategy type is a rolling update. +func IsRollingUpdate(deployment *clusterv1.MachineDeployment) bool { + return deployment.Spec.Strategy.Type == clusterv1.RollingUpdateMachineDeploymentStrategyType +} + +// DeploymentComplete considers a deployment to be complete once all of its desired replicas +// are updated and available, and no old machines are running. +func DeploymentComplete(deployment *clusterv1.MachineDeployment, newStatus *clusterv1.MachineDeploymentStatus) bool { + return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) && + newStatus.Replicas == *(deployment.Spec.Replicas) && + newStatus.AvailableReplicas == *(deployment.Spec.Replicas) && + newStatus.ObservedGeneration >= deployment.Generation +} + +// NewMSNewReplicas calculates the number of replicas a deployment's new MS should have. +// When one of the following is true, we're rolling out the deployment; otherwise, we're scaling it. +// 1) The new MS is saturated: newMS's replicas == deployment's replicas +// 2) For RollingUpdateStrategy: Max number of machines allowed is reached: deployment's replicas + maxSurge == all MSs' replicas. +// 3) For OnDeleteStrategy: Max number of machines allowed is reached: deployment's replicas == all MSs' replicas. +func NewMSNewReplicas(deployment *clusterv1.MachineDeployment, allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (int32, error) { + switch deployment.Spec.Strategy.Type { + case clusterv1.RollingUpdateMachineDeploymentStrategyType: + // Check if we can scale up. + maxSurge, err := intstrutil.GetScaledValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true) + if err != nil { + return 0, err + } + // Find the total number of machines + currentMachineCount := TotalMachineSetsReplicaSum(allMSs) + maxTotalMachines := *(deployment.Spec.Replicas) + int32(maxSurge) + if currentMachineCount >= maxTotalMachines { + // Cannot scale up. + return *(newMS.Spec.Replicas), nil + } + // Scale up. + scaleUpCount := maxTotalMachines - currentMachineCount + // Do not exceed the number of desired replicas. + scaleUpCount = integer.Int32Min(scaleUpCount, *(deployment.Spec.Replicas)-*(newMS.Spec.Replicas)) + return *(newMS.Spec.Replicas) + scaleUpCount, nil + case clusterv1.OnDeleteMachineDeploymentStrategyType: + // Find the total number of machines + currentMachineCount := TotalMachineSetsReplicaSum(allMSs) + if currentMachineCount >= *(deployment.Spec.Replicas) { + // Cannot scale up as more replicas exist than desired number of replicas in the MachineDeployment. + return *(newMS.Spec.Replicas), nil + } + // Scale up the latest MachineSet so the total amount of replicas across all MachineSets match + // the desired number of replicas in the MachineDeployment + scaleUpCount := *(deployment.Spec.Replicas) - currentMachineCount + return *(newMS.Spec.Replicas) + scaleUpCount, nil + default: + return 0, fmt.Errorf("deployment strategy %v isn't supported", deployment.Spec.Strategy.Type) + } +} + +// IsSaturated checks if the new machine set is saturated by comparing its size with its deployment size. +// Both the deployment and the machine set have to believe this machine set can own all of the desired +// replicas in the deployment and the annotation helps in achieving that. All machines of the MachineSet +// need to be available. +func IsSaturated(deployment *clusterv1.MachineDeployment, ms *clusterv1.MachineSet) bool { + if ms == nil { + return false + } + desiredString := ms.Annotations[clusterv1.DesiredReplicasAnnotation] + desired, err := strconv.ParseInt(desiredString, 10, 32) + if err != nil { + return false + } + return *(ms.Spec.Replicas) == *(deployment.Spec.Replicas) && + int32(desired) == *(deployment.Spec.Replicas) && + ms.Status.AvailableReplicas == *(deployment.Spec.Replicas) +} + +// ResolveFenceposts resolves both maxSurge and maxUnavailable. This needs to happen in one +// step. For example: +// +// 2 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1), then old(-1), then new(+1) +// 1 desired, max unavailable 1%, surge 0% - should scale old(-1), then new(+1) +// 2 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) +// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) +// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1). +func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { + surge, err := intstrutil.GetScaledValueFromIntOrPercent(maxSurge, int(desired), true) + if err != nil { + return 0, 0, err + } + unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(maxUnavailable, int(desired), false) + if err != nil { + return 0, 0, err + } + + if surge == 0 && unavailable == 0 { + // Validation should never allow the user to explicitly use zero values for both maxSurge + // maxUnavailable. Due to rounding down maxUnavailable though, it may resolve to zero. + // If both fenceposts resolve to zero, then we should set maxUnavailable to 1 on the + // theory that surge might not work due to quota. + unavailable = 1 + } + + return int32(surge), int32(unavailable), nil +} + +// FilterActiveMachineSets returns machine sets that have (or at least ought to have) machines. +func FilterActiveMachineSets(machineSets []*clusterv1.MachineSet) []*clusterv1.MachineSet { + activeFilter := func(ms *clusterv1.MachineSet) bool { + return ms != nil && ms.Spec.Replicas != nil && *(ms.Spec.Replicas) > 0 + } + return FilterMachineSets(machineSets, activeFilter) +} + +type filterMS func(ms *clusterv1.MachineSet) bool + +// FilterMachineSets returns machine sets that are filtered by filterFn (all returned ones should match filterFn). +func FilterMachineSets(mSes []*clusterv1.MachineSet, filterFn filterMS) []*clusterv1.MachineSet { + var filtered []*clusterv1.MachineSet + for i := range mSes { + if filterFn(mSes[i]) { + filtered = append(filtered, mSes[i]) + } + } + return filtered +} + +// CloneAndAddLabel clones the given map and returns a new map with the given key and value added. +// Returns the given map, if labelKey is empty. +func CloneAndAddLabel(labels map[string]string, labelKey, labelValue string) map[string]string { + if labelKey == "" { + // Don't need to add a label. + return labels + } + // Clone. + newLabels := map[string]string{} + for key, value := range labels { + newLabels[key] = value + } + newLabels[labelKey] = labelValue + return newLabels +} + +// CloneSelectorAndAddLabel clones the given selector and returns a new selector with the given key and value added. +// Returns the given selector, if labelKey is empty. +func CloneSelectorAndAddLabel(selector *metav1.LabelSelector, labelKey, labelValue string) *metav1.LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + + // Clone. + newSelector := new(metav1.LabelSelector) + + // TODO(madhusudancs): Check if you can use deepCopy_extensions_LabelSelector here. + newSelector.MatchLabels = make(map[string]string) + if selector.MatchLabels != nil { + for key, val := range selector.MatchLabels { + newSelector.MatchLabels[key] = val + } + } + newSelector.MatchLabels[labelKey] = labelValue + + if selector.MatchExpressions != nil { + newMExps := make([]metav1.LabelSelectorRequirement, len(selector.MatchExpressions)) + for i, me := range selector.MatchExpressions { + newMExps[i].Key = me.Key + newMExps[i].Operator = me.Operator + if me.Values != nil { + newMExps[i].Values = make([]string, len(me.Values)) + copy(newMExps[i].Values, me.Values) + } else { + newMExps[i].Values = nil + } + } + newSelector.MatchExpressions = newMExps + } else { + newSelector.MatchExpressions = nil + } + + return newSelector +} + +// SpewHashObject writes specified object to hash using the spew library +// which follows pointers and prints actual values of the nested objects +// ensuring the hash does not change when a pointer changes. +func SpewHashObject(hasher hash.Hash, objectToWrite interface{}) error { + hasher.Reset() + printer := spew.ConfigState{ + Indent: " ", + SortKeys: true, + DisableMethods: true, + SpewKeys: true, + } + + if _, err := printer.Fprintf(hasher, "%#v", objectToWrite); err != nil { + return fmt.Errorf("failed to write object to hasher") + } + return nil +} + +// ComputeSpewHash computes the hash of a MachineTemplateSpec using the spew library. +func ComputeSpewHash(objectToWrite interface{}) (uint32, error) { + machineTemplateSpecHasher := fnv.New32a() + if err := SpewHashObject(machineTemplateSpecHasher, objectToWrite); err != nil { + return 0, err + } + return machineTemplateSpecHasher.Sum32(), nil +} + +// GetDeletingMachineCount gets the number of machines that are in the process of being deleted +// in a machineList. +func GetDeletingMachineCount(machineList *clusterv1.MachineList) int32 { + var deletingMachineCount int32 + for _, machine := range machineList.Items { + if !machine.GetDeletionTimestamp().IsZero() { + deletingMachineCount++ + } + } + return deletingMachineCount +} diff --git a/controllers/internal/mdutil/util_test.go b/controllers/internal/mdutil/util_test.go new file mode 100644 index 000000000000..3a7b8eaac80f --- /dev/null +++ b/controllers/internal/mdutil/util_test.go @@ -0,0 +1,828 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mdutil + +import ( + "fmt" + "math/rand" + "strconv" + "testing" + "time" + + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/klog/v2/klogr" + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" +) + +func newDControllerRef(d *clusterv1.MachineDeployment) *metav1.OwnerReference { + isController := true + return &metav1.OwnerReference{ + APIVersion: "clusters/v1alpha", + Kind: "MachineDeployment", + Name: d.GetName(), + UID: d.GetUID(), + Controller: &isController, + } +} + +// generateMS creates a machine set, with the input deployment's template as its template. +func generateMS(deployment clusterv1.MachineDeployment) clusterv1.MachineSet { + template := deployment.Spec.Template.DeepCopy() + return clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + UID: randomUID(), + Name: names.SimpleNameGenerator.GenerateName("machineset"), + Labels: template.Labels, + OwnerReferences: []metav1.OwnerReference{*newDControllerRef(&deployment)}, + }, + Spec: clusterv1.MachineSetSpec{ + Replicas: new(int32), + Template: *template, + Selector: metav1.LabelSelector{MatchLabels: template.Labels}, + }, + } +} + +func randomUID() types.UID { + return types.UID(strconv.FormatInt(rand.Int63(), 10)) //nolint:gosec +} + +// generateDeployment creates a deployment, with the input image as its template. +func generateDeployment(image string) clusterv1.MachineDeployment { + machineLabels := map[string]string{"name": image} + return clusterv1.MachineDeployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: image, + Annotations: make(map[string]string), + }, + Spec: clusterv1.MachineDeploymentSpec{ + Replicas: func(i int32) *int32 { return &i }(1), + Selector: metav1.LabelSelector{MatchLabels: machineLabels}, + Template: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: machineLabels, + }, + Spec: clusterv1.MachineSpec{}, + }, + }, + } +} + +func generateMachineTemplateSpec(annotations, labels map[string]string) clusterv1.MachineTemplateSpec { + return clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Annotations: annotations, + Labels: labels, + }, + Spec: clusterv1.MachineSpec{}, + } +} + +func TestEqualMachineTemplate(t *testing.T) { + tests := []struct { + Name string + Former, Latter clusterv1.MachineTemplateSpec + Expected bool + }{ + { + Name: "Same spec, same labels", + Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), + Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), + Expected: true, + }, + { + Name: "Same spec, only machine-template-hash label value is different", + Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), + Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2", "something": "else"}), + Expected: true, + }, + { + Name: "Same spec, the former doesn't have machine-template-hash label", + Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{"something": "else"}), + Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2", "something": "else"}), + Expected: true, + }, + { + Name: "Same spec, the former doesn't have machine-template-hash label", + Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{"something": "else"}), + Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2", "something": "else"}), + Expected: true, + }, + { + Name: "Same spec, the label is different, the former doesn't have machine-template-hash label, same number of labels", + Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{"something": "else"}), + Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2"}), + Expected: false, + }, + { + Name: "Same spec, the label is different, the latter doesn't have machine-template-hash label, same number of labels", + Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1"}), + Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{"something": "else"}), + Expected: false, + }, + { + Name: "Same spec, the label is different, and the machine-template-hash label value is the same", + Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1"}), + Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), + Expected: false, + }, + { + Name: "Different spec, same labels", + Former: generateMachineTemplateSpec(map[string]string{"former": "value"}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), + Latter: generateMachineTemplateSpec(map[string]string{"latter": "value"}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), + Expected: false, + }, + { + Name: "Different spec, different machine-template-hash label value", + Former: generateMachineTemplateSpec(map[string]string{"x": ""}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-1", "something": "else"}), + Latter: generateMachineTemplateSpec(map[string]string{"x": "1"}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2", "something": "else"}), + Expected: false, + }, + { + Name: "Different spec, the former doesn't have machine-template-hash label", + Former: generateMachineTemplateSpec(map[string]string{"x": ""}, map[string]string{"something": "else"}), + Latter: generateMachineTemplateSpec(map[string]string{"x": "1"}, map[string]string{clusterv1.MachineDeploymentUniqueLabel: "value-2", "something": "else"}), + Expected: false, + }, + { + Name: "Different spec, different labels", + Former: generateMachineTemplateSpec(map[string]string{}, map[string]string{"something": "else"}), + Latter: generateMachineTemplateSpec(map[string]string{}, map[string]string{"nothing": "else"}), + Expected: false, + }, + { + Name: "Same spec, except for references versions", + Former: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: map[string]string{}, + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha2", + Kind: "MachineBootstrap", + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "MachineInfrastructure", + }, + }, + }, + Latter: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: map[string]string{}, + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha4", + Kind: "MachineBootstrap", + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha4", + Kind: "MachineInfrastructure", + }, + }, + }, + Expected: true, + }, + { + Name: "Same spec, bootstrap references are different kinds", + Former: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: map[string]string{}, + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha2", + Kind: "MachineBootstrap1", + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha2", + Kind: "MachineInfrastructure", + }, + }, + }, + Latter: clusterv1.MachineTemplateSpec{ + ObjectMeta: clusterv1.ObjectMeta{ + Labels: map[string]string{}, + }, + Spec: clusterv1.MachineSpec{ + Bootstrap: clusterv1.Bootstrap{ + ConfigRef: &corev1.ObjectReference{ + APIVersion: "bootstrap.cluster.x-k8s.io/v1alpha4", + Kind: "MachineBootstrap2", + }, + }, + InfrastructureRef: corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1alpha4", + Kind: "MachineInfrastructure", + }, + }, + }, + Expected: false, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + g := NewWithT(t) + + runTest := func(t1, t2 *clusterv1.MachineTemplateSpec) { + // Run + equal := EqualMachineTemplate(t1, t2) + g.Expect(equal).To(Equal(test.Expected)) + g.Expect(t1.Labels).NotTo(BeNil()) + g.Expect(t2.Labels).NotTo(BeNil()) + } + + runTest(&test.Former, &test.Latter) + // Test the same case in reverse order + runTest(&test.Latter, &test.Former) + }) + } +} + +func TestFindNewMachineSet(t *testing.T) { + now := metav1.Now() + later := metav1.Time{Time: now.Add(time.Minute)} + + deployment := generateDeployment("nginx") + newMS := generateMS(deployment) + newMS.Labels[clusterv1.MachineDeploymentUniqueLabel] = "hash" + newMS.CreationTimestamp = later + + newMSDup := generateMS(deployment) + newMSDup.Labels[clusterv1.MachineDeploymentUniqueLabel] = "different-hash" + newMSDup.CreationTimestamp = now + + oldDeployment := generateDeployment("nginx") + oldMS := generateMS(oldDeployment) + oldMS.Spec.Template.Annotations = map[string]string{ + "old": "true", + } + oldMS.Status.FullyLabeledReplicas = *(oldMS.Spec.Replicas) + + tests := []struct { + Name string + deployment clusterv1.MachineDeployment + msList []*clusterv1.MachineSet + expected *clusterv1.MachineSet + }{ + { + Name: "Get new MachineSet with the same template as Deployment spec but different machine-template-hash value", + deployment: deployment, + msList: []*clusterv1.MachineSet{&newMS, &oldMS}, + expected: &newMS, + }, + { + Name: "Get the oldest new MachineSet when there are more than one MachineSet with the same template", + deployment: deployment, + msList: []*clusterv1.MachineSet{&newMS, &oldMS, &newMSDup}, + expected: &newMSDup, + }, + { + Name: "Get nil new MachineSet", + deployment: deployment, + msList: []*clusterv1.MachineSet{&oldMS}, + expected: nil, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + g := NewWithT(t) + + ms := FindNewMachineSet(&test.deployment, test.msList) + g.Expect(ms).To(Equal(test.expected)) + }) + } +} + +func TestFindOldMachineSets(t *testing.T) { + now := metav1.Now() + later := metav1.Time{Time: now.Add(time.Minute)} + before := metav1.Time{Time: now.Add(-time.Minute)} + + deployment := generateDeployment("nginx") + newMS := generateMS(deployment) + *(newMS.Spec.Replicas) = 1 + newMS.Labels[clusterv1.MachineDeploymentUniqueLabel] = "hash" + newMS.CreationTimestamp = later + + newMSDup := generateMS(deployment) + newMSDup.Labels[clusterv1.MachineDeploymentUniqueLabel] = "different-hash" + newMSDup.CreationTimestamp = now + + oldDeployment := generateDeployment("nginx") + oldMS := generateMS(oldDeployment) + oldMS.Spec.Template.Annotations = map[string]string{ + "old": "true", + } + oldMS.Status.FullyLabeledReplicas = *(oldMS.Spec.Replicas) + oldMS.CreationTimestamp = before + + oldDeployment = generateDeployment("nginx") + oldDeployment.Spec.Selector.MatchLabels["old-label"] = "old-value" + oldDeployment.Spec.Template.Labels["old-label"] = "old-value" + oldMSwithOldLabel := generateMS(oldDeployment) + oldMSwithOldLabel.Status.FullyLabeledReplicas = *(oldMSwithOldLabel.Spec.Replicas) + oldMSwithOldLabel.CreationTimestamp = before + + tests := []struct { + Name string + deployment clusterv1.MachineDeployment + msList []*clusterv1.MachineSet + expected []*clusterv1.MachineSet + expectedRequire []*clusterv1.MachineSet + }{ + { + Name: "Get old MachineSets", + deployment: deployment, + msList: []*clusterv1.MachineSet{&newMS, &oldMS}, + expected: []*clusterv1.MachineSet{&oldMS}, + expectedRequire: nil, + }, + { + Name: "Get old MachineSets with no new MachineSet", + deployment: deployment, + msList: []*clusterv1.MachineSet{&oldMS}, + expected: []*clusterv1.MachineSet{&oldMS}, + expectedRequire: nil, + }, + { + Name: "Get old MachineSets with two new MachineSets, only the oldest new MachineSet is seen as new MachineSet", + deployment: deployment, + msList: []*clusterv1.MachineSet{&oldMS, &newMS, &newMSDup}, + expected: []*clusterv1.MachineSet{&oldMS, &newMS}, + expectedRequire: []*clusterv1.MachineSet{&newMS}, + }, + { + Name: "Get empty old MachineSets", + deployment: deployment, + msList: []*clusterv1.MachineSet{&newMS}, + expected: []*clusterv1.MachineSet{}, + expectedRequire: nil, + }, + { + Name: "Get old MachineSets after label changed in MachineDeployments", + deployment: deployment, + msList: []*clusterv1.MachineSet{&newMS, &oldMSwithOldLabel}, + expected: []*clusterv1.MachineSet{&oldMSwithOldLabel}, + expectedRequire: nil, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + g := NewWithT(t) + + requireMS, allMS := FindOldMachineSets(&test.deployment, test.msList) + g.Expect(allMS).To(ConsistOf(test.expected)) + // MSs are getting filtered correctly by ms.spec.replicas + g.Expect(requireMS).To(ConsistOf(test.expectedRequire)) + }) + } +} + +func TestGetReplicaCountForMachineSets(t *testing.T) { + ms1 := generateMS(generateDeployment("foo")) + *(ms1.Spec.Replicas) = 1 + ms1.Status.Replicas = 2 + ms2 := generateMS(generateDeployment("bar")) + *(ms2.Spec.Replicas) = 5 + ms2.Status.Replicas = 3 + + tests := []struct { + Name string + Sets []*clusterv1.MachineSet + ExpectedCount int32 + ExpectedActual int32 + ExpectedTotal int32 + }{ + { + Name: "1:2 Replicas", + Sets: []*clusterv1.MachineSet{&ms1}, + ExpectedCount: 1, + ExpectedActual: 2, + ExpectedTotal: 2, + }, + { + Name: "6:5 Replicas", + Sets: []*clusterv1.MachineSet{&ms1, &ms2}, + ExpectedCount: 6, + ExpectedActual: 5, + ExpectedTotal: 7, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(GetReplicaCountForMachineSets(test.Sets)).To(Equal(test.ExpectedCount)) + g.Expect(GetActualReplicaCountForMachineSets(test.Sets)).To(Equal(test.ExpectedActual)) + g.Expect(TotalMachineSetsReplicaSum(test.Sets)).To(Equal(test.ExpectedTotal)) + }) + } +} + +func TestResolveFenceposts(t *testing.T) { + tests := []struct { + maxSurge string + maxUnavailable string + desired int32 + expectSurge int32 + expectUnavailable int32 + expectError bool + }{ + { + maxSurge: "0%", + maxUnavailable: "0%", + desired: 0, + expectSurge: 0, + expectUnavailable: 1, + expectError: false, + }, + { + maxSurge: "39%", + maxUnavailable: "39%", + desired: 10, + expectSurge: 4, + expectUnavailable: 3, + expectError: false, + }, + { + maxSurge: "oops", + maxUnavailable: "39%", + desired: 10, + expectSurge: 0, + expectUnavailable: 0, + expectError: true, + }, + { + maxSurge: "55%", + maxUnavailable: "urg", + desired: 10, + expectSurge: 0, + expectUnavailable: 0, + expectError: true, + }, + { + maxSurge: "5", + maxUnavailable: "1", + desired: 7, + expectSurge: 0, + expectUnavailable: 0, + expectError: true, + }, + } + + for _, test := range tests { + t.Run("maxSurge="+test.maxSurge, func(t *testing.T) { + g := NewWithT(t) + + maxSurge := intstr.FromString(test.maxSurge) + maxUnavail := intstr.FromString(test.maxUnavailable) + surge, unavail, err := ResolveFenceposts(&maxSurge, &maxUnavail, test.desired) + if test.expectError { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).NotTo(HaveOccurred()) + } + g.Expect(surge).To(Equal(test.expectSurge)) + g.Expect(unavail).To(Equal(test.expectUnavailable)) + }) + } +} + +func TestNewMSNewReplicas(t *testing.T) { + tests := []struct { + Name string + strategyType clusterv1.MachineDeploymentStrategyType + depReplicas int32 + newMSReplicas int32 + maxSurge int + expected int32 + }{ + { + "can not scale up - to newMSReplicas", + clusterv1.RollingUpdateMachineDeploymentStrategyType, + 1, 5, 1, 5, + }, + { + "scale up - to depReplicas", + clusterv1.RollingUpdateMachineDeploymentStrategyType, + 6, 2, 10, 6, + }, + } + newDeployment := generateDeployment("nginx") + newRC := generateMS(newDeployment) + rs5 := generateMS(newDeployment) + *(rs5.Spec.Replicas) = 5 + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + g := NewWithT(t) + + *(newDeployment.Spec.Replicas) = test.depReplicas + newDeployment.Spec.Strategy = &clusterv1.MachineDeploymentStrategy{Type: test.strategyType} + newDeployment.Spec.Strategy.RollingUpdate = &clusterv1.MachineRollingUpdateDeployment{ + MaxUnavailable: func(i int) *intstr.IntOrString { + x := intstr.FromInt(i) + return &x + }(1), + MaxSurge: func(i int) *intstr.IntOrString { + x := intstr.FromInt(i) + return &x + }(test.maxSurge), + } + *(newRC.Spec.Replicas) = test.newMSReplicas + ms, err := NewMSNewReplicas(&newDeployment, []*clusterv1.MachineSet{&rs5}, &newRC) + g.Expect(err).NotTo(HaveOccurred()) + g.Expect(ms).To(Equal(test.expected)) + }) + } +} + +func TestDeploymentComplete(t *testing.T) { + deployment := func(desired, current, updated, available, maxUnavailable, maxSurge int32) *clusterv1.MachineDeployment { + return &clusterv1.MachineDeployment{ + Spec: clusterv1.MachineDeploymentSpec{ + Replicas: &desired, + Strategy: &clusterv1.MachineDeploymentStrategy{ + RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ + MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)), + MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxSurge)), + }, + Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, + }, + }, + Status: clusterv1.MachineDeploymentStatus{ + Replicas: current, + UpdatedReplicas: updated, + AvailableReplicas: available, + }, + } + } + + tests := []struct { + name string + + d *clusterv1.MachineDeployment + + expected bool + }{ + { + name: "not complete: min but not all machines become available", + + d: deployment(5, 5, 5, 4, 1, 0), + expected: false, + }, + { + name: "not complete: min availability is not honored", + + d: deployment(5, 5, 5, 3, 1, 0), + expected: false, + }, + { + name: "complete", + + d: deployment(5, 5, 5, 5, 0, 0), + expected: true, + }, + { + name: "not complete: all machines are available but not updated", + + d: deployment(5, 5, 4, 5, 0, 0), + expected: false, + }, + { + name: "not complete: still running old machines", + + // old machine set: spec.replicas=1, status.replicas=1, status.availableReplicas=1 + // new machine set: spec.replicas=1, status.replicas=1, status.availableReplicas=0 + d: deployment(1, 2, 1, 1, 0, 1), + expected: false, + }, + { + name: "not complete: one replica deployment never comes up", + + d: deployment(1, 1, 1, 0, 1, 1), + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(DeploymentComplete(test.d, &test.d.Status)).To(Equal(test.expected)) + }) + } +} + +func TestMaxUnavailable(t *testing.T) { + deployment := func(replicas int32, maxUnavailable intstr.IntOrString) clusterv1.MachineDeployment { + return clusterv1.MachineDeployment{ + Spec: clusterv1.MachineDeploymentSpec{ + Replicas: func(i int32) *int32 { return &i }(replicas), + Strategy: &clusterv1.MachineDeploymentStrategy{ + RollingUpdate: &clusterv1.MachineRollingUpdateDeployment{ + MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(1)), + MaxUnavailable: &maxUnavailable, + }, + Type: clusterv1.RollingUpdateMachineDeploymentStrategyType, + }, + }, + } + } + tests := []struct { + name string + deployment clusterv1.MachineDeployment + expected int32 + }{ + { + name: "maxUnavailable less than replicas", + deployment: deployment(10, intstr.FromInt(5)), + expected: int32(5), + }, + { + name: "maxUnavailable equal replicas", + deployment: deployment(10, intstr.FromInt(10)), + expected: int32(10), + }, + { + name: "maxUnavailable greater than replicas", + deployment: deployment(5, intstr.FromInt(10)), + expected: int32(5), + }, + { + name: "maxUnavailable with replicas is 0", + deployment: deployment(0, intstr.FromInt(10)), + expected: int32(0), + }, + { + name: "maxUnavailable less than replicas with percents", + deployment: deployment(10, intstr.FromString("50%")), + expected: int32(5), + }, + { + name: "maxUnavailable equal replicas with percents", + deployment: deployment(10, intstr.FromString("100%")), + expected: int32(10), + }, + { + name: "maxUnavailable greater than replicas with percents", + deployment: deployment(5, intstr.FromString("100%")), + expected: int32(5), + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(MaxUnavailable(test.deployment)).To(Equal(test.expected)) + }) + } +} + +// TestAnnotationUtils is a set of simple tests for annotation related util functions. +func TestAnnotationUtils(t *testing.T) { + // Setup + tDeployment := generateDeployment("nginx") + tMS := generateMS(tDeployment) + tDeployment.Annotations[clusterv1.RevisionAnnotation] = "999" + logger := klogr.New() + + // Test Case 1: Check if anotations are copied properly from deployment to MS + t.Run("SetNewMachineSetAnnotations", func(t *testing.T) { + g := NewWithT(t) + + // Try to set the increment revision from 1 through 20 + for i := 0; i < 20; i++ { + nextRevision := fmt.Sprintf("%d", i+1) + SetNewMachineSetAnnotations(&tDeployment, &tMS, nextRevision, true, logger) + // Now the MachineSets Revision Annotation should be i+1 + g.Expect(tMS.Annotations).To(HaveKeyWithValue(clusterv1.RevisionAnnotation, nextRevision)) + } + }) + + // Test Case 2: Check if annotations are set properly + t.Run("SetReplicasAnnotations", func(t *testing.T) { + g := NewWithT(t) + + g.Expect(SetReplicasAnnotations(&tMS, 10, 11)).To(BeTrue()) + g.Expect(tMS.Annotations).To(HaveKeyWithValue(clusterv1.DesiredReplicasAnnotation, "10")) + g.Expect(tMS.Annotations).To(HaveKeyWithValue(clusterv1.MaxReplicasAnnotation, "11")) + }) + + // Test Case 3: Check if annotations reflect deployments state + tMS.Annotations[clusterv1.DesiredReplicasAnnotation] = "1" + tMS.Status.AvailableReplicas = 1 + tMS.Spec.Replicas = new(int32) + *tMS.Spec.Replicas = 1 + + t.Run("IsSaturated", func(t *testing.T) { + g := NewWithT(t) + + g.Expect(IsSaturated(&tDeployment, &tMS)).To(BeTrue()) + }) +} + +func TestReplicasAnnotationsNeedUpdate(t *testing.T) { + desiredReplicas := fmt.Sprintf("%d", int32(10)) + maxReplicas := fmt.Sprintf("%d", int32(20)) + + tests := []struct { + name string + machineSet *clusterv1.MachineSet + expected bool + }{ + { + name: "test Annotations nil", + machineSet: &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{Name: "hello", Namespace: metav1.NamespaceDefault}, + Spec: clusterv1.MachineSetSpec{ + Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + expected: true, + }, + { + name: "test desiredReplicas update", + machineSet: &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hello", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{clusterv1.DesiredReplicasAnnotation: "8", clusterv1.MaxReplicasAnnotation: maxReplicas}, + }, + Spec: clusterv1.MachineSetSpec{ + Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + expected: true, + }, + { + name: "test maxReplicas update", + machineSet: &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hello", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{clusterv1.DesiredReplicasAnnotation: desiredReplicas, clusterv1.MaxReplicasAnnotation: "16"}, + }, + Spec: clusterv1.MachineSetSpec{ + Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + expected: true, + }, + { + name: "test needn't update", + machineSet: &clusterv1.MachineSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "hello", + Namespace: metav1.NamespaceDefault, + Annotations: map[string]string{clusterv1.DesiredReplicasAnnotation: desiredReplicas, clusterv1.MaxReplicasAnnotation: maxReplicas}, + }, + Spec: clusterv1.MachineSetSpec{ + Selector: metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, + }, + }, + expected: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + g := NewWithT(t) + + g.Expect(ReplicasAnnotationsNeedUpdate(test.machineSet, 10, 20)).To(Equal(test.expected)) + }) + } +} diff --git a/controllers/machinedeployment_rolling.go b/controllers/machinedeployment_rolling.go index a86bd01af91e..f83287ce6355 100644 --- a/controllers/machinedeployment_rolling.go +++ b/controllers/machinedeployment_rolling.go @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" "k8s.io/utils/integer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - "sigs.k8s.io/cluster-api/controllers/mdutil" + "sigs.k8s.io/cluster-api/controllers/internal/mdutil" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/controllers/machinedeployment_rolling_test.go b/controllers/machinedeployment_rolling_test.go index 6aa0332cc2db..cd8238fab815 100644 --- a/controllers/machinedeployment_rolling_test.go +++ b/controllers/machinedeployment_rolling_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - "sigs.k8s.io/cluster-api/controllers/mdutil" + "sigs.k8s.io/cluster-api/controllers/internal/mdutil" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" ) diff --git a/controllers/machinedeployment_rollout_ondelete.go b/controllers/machinedeployment_rollout_ondelete.go index 3363414453b1..a1ad2a469c8d 100644 --- a/controllers/machinedeployment_rollout_ondelete.go +++ b/controllers/machinedeployment_rollout_ondelete.go @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - "sigs.k8s.io/cluster-api/controllers/mdutil" + "sigs.k8s.io/cluster-api/controllers/internal/mdutil" "sigs.k8s.io/cluster-api/util/patch" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" diff --git a/controllers/machinedeployment_sync.go b/controllers/machinedeployment_sync.go index c3264ff7a00f..b6ccd91fa3e4 100644 --- a/controllers/machinedeployment_sync.go +++ b/controllers/machinedeployment_sync.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/retry" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - "sigs.k8s.io/cluster-api/controllers/mdutil" + "sigs.k8s.io/cluster-api/controllers/internal/mdutil" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" @@ -147,11 +147,11 @@ func (r *MachineDeploymentReconciler) getNewMachineSet(ctx context.Context, d *c } machineTemplateSpecHash := fmt.Sprintf("%d", hash) newMSTemplate.Labels = mdutil.CloneAndAddLabel(d.Spec.Template.Labels, - mdutil.DefaultMachineDeploymentUniqueLabelKey, machineTemplateSpecHash) + clusterv1.MachineDeploymentUniqueLabel, machineTemplateSpecHash) // Add machineTemplateHash label to selector. newMSSelector := mdutil.CloneSelectorAndAddLabel(&d.Spec.Selector, - mdutil.DefaultMachineDeploymentUniqueLabelKey, machineTemplateSpecHash) + clusterv1.MachineDeploymentUniqueLabel, machineTemplateSpecHash) minReadySeconds := int32(0) if d.Spec.MinReadySeconds != nil { diff --git a/controllers/machinedeployment_sync_test.go b/controllers/machinedeployment_sync_test.go index 7f97e355d1ed..ed23fdd13d82 100644 --- a/controllers/machinedeployment_sync_test.go +++ b/controllers/machinedeployment_sync_test.go @@ -28,7 +28,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - "sigs.k8s.io/cluster-api/controllers/mdutil" + "sigs.k8s.io/cluster-api/controllers/internal/mdutil" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" diff --git a/controllers/mdutil/util.go b/controllers/mdutil/util.go index 0388989dc5d3..b6b6d15bd1fc 100644 --- a/controllers/mdutil/util.go +++ b/controllers/mdutil/util.go @@ -15,6 +15,7 @@ limitations under the License. */ // Package mdutil implements MachineDeployment utilities. +// Deprecated: This package is becoming internal and it will be removed in a next release. package mdutil import ( @@ -41,6 +42,7 @@ import ( const ( // DefaultMachineDeploymentUniqueLabelKey is the label applied to Machines // in a MachineDeployment containing the hash of the template. + // Deprecated: This field package is removed, please use the one in the API package instead. DefaultMachineDeploymentUniqueLabelKey = "machine-template-hash" // FailedMSCreateReason is added in a machine deployment when it cannot create a new machine set. @@ -66,6 +68,7 @@ const ( ) // MachineSetsByCreationTimestamp sorts a list of MachineSet by creation timestamp, using their names as a tie breaker. +// Deprecated: This package is becoming internal and it will be removed in a next release. type MachineSetsByCreationTimestamp []*clusterv1.MachineSet func (o MachineSetsByCreationTimestamp) Len() int { return len(o) } @@ -79,6 +82,7 @@ func (o MachineSetsByCreationTimestamp) Less(i, j int) bool { // MachineSetsBySizeOlder sorts a list of MachineSet by size in descending order, using their creation timestamp or name as a tie breaker. // By using the creation timestamp, this sorts from old to new machine sets. +// Deprecated: This package is becoming internal and it will be removed in a next release. type MachineSetsBySizeOlder []*clusterv1.MachineSet func (o MachineSetsBySizeOlder) Len() int { return len(o) } @@ -92,6 +96,7 @@ func (o MachineSetsBySizeOlder) Less(i, j int) bool { // MachineSetsBySizeNewer sorts a list of MachineSet by size in descending order, using their creation timestamp or name as a tie breaker. // By using the creation timestamp, this sorts from new to old machine sets. +// Deprecated: This package is becoming internal and it will be removed in a next release. type MachineSetsBySizeNewer []*clusterv1.MachineSet func (o MachineSetsBySizeNewer) Len() int { return len(o) } @@ -104,6 +109,7 @@ func (o MachineSetsBySizeNewer) Less(i, j int) bool { } // SetDeploymentRevision updates the revision for a deployment. +// Deprecated: This package is becoming internal and it will be removed in a next release. func SetDeploymentRevision(deployment *clusterv1.MachineDeployment, revision string) bool { updated := false @@ -119,6 +125,7 @@ func SetDeploymentRevision(deployment *clusterv1.MachineDeployment, revision str } // MaxRevision finds the highest revision in the machine sets. +// Deprecated: This package is becoming internal and it will be removed in a next release. func MaxRevision(allMSs []*clusterv1.MachineSet, logger logr.Logger) int64 { max := int64(0) for _, ms := range allMSs { @@ -134,6 +141,7 @@ func MaxRevision(allMSs []*clusterv1.MachineSet, logger logr.Logger) int64 { } // Revision returns the revision number of the input object. +// Deprecated: This package is becoming internal and it will be removed in a next release. func Revision(obj runtime.Object) (int64, error) { acc, err := meta.Accessor(obj) if err != nil { @@ -210,6 +218,7 @@ func getIntFromAnnotation(ms *clusterv1.MachineSet, annotationKey string, logger // SetNewMachineSetAnnotations sets new machine set's annotations appropriately by updating its revision and // copying required deployment annotations to it; it returns true if machine set's annotation is changed. +// Deprecated: This package is becoming internal and it will be removed in a next release. func SetNewMachineSetAnnotations(deployment *clusterv1.MachineDeployment, newMS *clusterv1.MachineSet, newRevision string, exists bool, logger logr.Logger) bool { logger = logger.WithValues("machineset", newMS.Name) @@ -266,6 +275,7 @@ func SetNewMachineSetAnnotations(deployment *clusterv1.MachineDeployment, newMS // FindOneActiveOrLatest returns the only active or the latest machine set in case there is at most one active // machine set. If there are more than one active machine sets, return nil so machine sets can be scaled down // to the point where there is only one active machine set. +// Deprecated: This package is becoming internal and it will be removed in a next release. func FindOneActiveOrLatest(newMS *clusterv1.MachineSet, oldMSs []*clusterv1.MachineSet) *clusterv1.MachineSet { if newMS == nil && len(oldMSs) == 0 { return nil @@ -289,6 +299,7 @@ func FindOneActiveOrLatest(newMS *clusterv1.MachineSet, oldMSs []*clusterv1.Mach } // SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations. +// Deprecated: This package is becoming internal and it will be removed in a next release. func SetReplicasAnnotations(ms *clusterv1.MachineSet, desiredReplicas, maxReplicas int32) bool { updated := false if ms.Annotations == nil { @@ -308,6 +319,7 @@ func SetReplicasAnnotations(ms *clusterv1.MachineSet, desiredReplicas, maxReplic } // ReplicasAnnotationsNeedUpdate return true if the replicas annotation needs to be updated. +// Deprecated: This package is becoming internal and it will be removed in a next release. func ReplicasAnnotationsNeedUpdate(ms *clusterv1.MachineSet, desiredReplicas, maxReplicas int32) bool { if ms.Annotations == nil { return true @@ -324,6 +336,7 @@ func ReplicasAnnotationsNeedUpdate(ms *clusterv1.MachineSet, desiredReplicas, ma } // MaxUnavailable returns the maximum unavailable machines a rolling deployment can take. +// Deprecated: This package is becoming internal and it will be removed in a next release. func MaxUnavailable(deployment clusterv1.MachineDeployment) int32 { if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 { return int32(0) @@ -337,6 +350,7 @@ func MaxUnavailable(deployment clusterv1.MachineDeployment) int32 { } // MaxSurge returns the maximum surge machines a rolling deployment can take. +// Deprecated: This package is becoming internal and it will be removed in a next release. func MaxSurge(deployment clusterv1.MachineDeployment) int32 { if !IsRollingUpdate(&deployment) { return int32(0) @@ -349,6 +363,7 @@ func MaxSurge(deployment clusterv1.MachineDeployment) int32 { // GetProportion will estimate the proportion for the provided machine set using 1. the current size // of the parent deployment, 2. the replica count that needs be added on the machine sets of the // deployment, and 3. the total replicas added in the machine sets of the deployment so far. +// Deprecated: This package is becoming internal and it will be removed in a next release. func GetProportion(ms *clusterv1.MachineSet, d clusterv1.MachineDeployment, deploymentReplicasToAdd, deploymentReplicasAdded int32, logger logr.Logger) int32 { if ms == nil || *(ms.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded { return int32(0) @@ -371,6 +386,7 @@ func GetProportion(ms *clusterv1.MachineSet, d clusterv1.MachineDeployment, depl // getMachineSetFraction estimates the fraction of replicas a machine set can have in // 1. a scaling event during a rollout or 2. when scaling a paused deployment. +// Deprecated: This package is becoming internal and it will be removed in a next release. func getMachineSetFraction(ms clusterv1.MachineSet, d clusterv1.MachineDeployment, logger logr.Logger) int32 { // If we are scaling down to zero then the fraction of this machine set is its whole size (negative) if *(d.Spec.Replicas) == int32(0) { @@ -395,6 +411,7 @@ func getMachineSetFraction(ms clusterv1.MachineSet, d clusterv1.MachineDeploymen // EqualMachineTemplate returns true if two given machineTemplateSpec are equal, // ignoring the diff in value of Labels["machine-template-hash"], and the version from external references. +// Deprecated: This package is becoming internal and it will be removed in a next release. func EqualMachineTemplate(template1, template2 *clusterv1.MachineTemplateSpec) bool { t1Copy := template1.DeepCopy() t2Copy := template2.DeepCopy() @@ -421,6 +438,7 @@ func EqualMachineTemplate(template1, template2 *clusterv1.MachineTemplateSpec) b } // FindNewMachineSet returns the new MS this given deployment targets (the one with the same machine template). +// Deprecated: This package is becoming internal and it will be removed in a next release. func FindNewMachineSet(deployment *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet) *clusterv1.MachineSet { sort.Sort(MachineSetsByCreationTimestamp(msList)) for i := range msList { @@ -440,6 +458,7 @@ func FindNewMachineSet(deployment *clusterv1.MachineDeployment, msList []*cluste // Returns two list of machine sets // - the first contains all old machine sets with all non-zero replicas // - the second contains all old machine sets +// Deprecated: This package is becoming internal and it will be removed in a next release. func FindOldMachineSets(deployment *clusterv1.MachineDeployment, msList []*clusterv1.MachineSet) ([]*clusterv1.MachineSet, []*clusterv1.MachineSet) { var requiredMSs []*clusterv1.MachineSet allMSs := make([]*clusterv1.MachineSet, 0, len(msList)) @@ -458,6 +477,7 @@ func FindOldMachineSets(deployment *clusterv1.MachineDeployment, msList []*clust } // GetReplicaCountForMachineSets returns the sum of Replicas of the given machine sets. +// Deprecated: This package is becoming internal and it will be removed in a next release. func GetReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { totalReplicas := int32(0) for _, ms := range machineSets { @@ -469,6 +489,7 @@ func GetReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { } // GetActualReplicaCountForMachineSets returns the sum of actual replicas of the given machine sets. +// Deprecated: This package is becoming internal and it will be removed in a next release. func GetActualReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { totalActualReplicas := int32(0) for _, ms := range machineSets { @@ -485,6 +506,7 @@ func GetActualReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) in // Use max(spec.Replicas,status.Replicas) to cover the cases that: // 1. Scale up, where spec.Replicas increased but no machine created yet, so spec.Replicas > status.Replicas // 2. Scale down, where spec.Replicas decreased but machine not deleted yet, so spec.Replicas < status.Replicas. +// Deprecated: This package is becoming internal and it will be removed in a next release. func TotalMachineSetsReplicaSum(machineSets []*clusterv1.MachineSet) int32 { totalReplicas := int32(0) for _, ms := range machineSets { @@ -496,6 +518,7 @@ func TotalMachineSetsReplicaSum(machineSets []*clusterv1.MachineSet) int32 { } // GetReadyReplicaCountForMachineSets returns the number of ready machines corresponding to the given machine sets. +// Deprecated: This package is becoming internal and it will be removed in a next release. func GetReadyReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { totalReadyReplicas := int32(0) for _, ms := range machineSets { @@ -507,6 +530,7 @@ func GetReadyReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int } // GetAvailableReplicaCountForMachineSets returns the number of available machines corresponding to the given machine sets. +// Deprecated: This package is becoming internal and it will be removed in a next release. func GetAvailableReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) int32 { totalAvailableReplicas := int32(0) for _, ms := range machineSets { @@ -518,12 +542,14 @@ func GetAvailableReplicaCountForMachineSets(machineSets []*clusterv1.MachineSet) } // IsRollingUpdate returns true if the strategy type is a rolling update. +// Deprecated: This package is becoming internal and it will be removed in a next release. func IsRollingUpdate(deployment *clusterv1.MachineDeployment) bool { return deployment.Spec.Strategy.Type == clusterv1.RollingUpdateMachineDeploymentStrategyType } // DeploymentComplete considers a deployment to be complete once all of its desired replicas // are updated and available, and no old machines are running. +// Deprecated: This package is becoming internal and it will be removed in a next release. func DeploymentComplete(deployment *clusterv1.MachineDeployment, newStatus *clusterv1.MachineDeploymentStatus) bool { return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) && newStatus.Replicas == *(deployment.Spec.Replicas) && @@ -536,6 +562,7 @@ func DeploymentComplete(deployment *clusterv1.MachineDeployment, newStatus *clus // 1) The new MS is saturated: newMS's replicas == deployment's replicas // 2) For RollingUpdateStrategy: Max number of machines allowed is reached: deployment's replicas + maxSurge == all MSs' replicas. // 3) For OnDeleteStrategy: Max number of machines allowed is reached: deployment's replicas == all MSs' replicas. +// Deprecated: This package is becoming internal and it will be removed in a next release. func NewMSNewReplicas(deployment *clusterv1.MachineDeployment, allMSs []*clusterv1.MachineSet, newMS *clusterv1.MachineSet) (int32, error) { switch deployment.Spec.Strategy.Type { case clusterv1.RollingUpdateMachineDeploymentStrategyType: @@ -576,6 +603,7 @@ func NewMSNewReplicas(deployment *clusterv1.MachineDeployment, allMSs []*cluster // Both the deployment and the machine set have to believe this machine set can own all of the desired // replicas in the deployment and the annotation helps in achieving that. All machines of the MachineSet // need to be available. +// Deprecated: This package is becoming internal and it will be removed in a next release. func IsSaturated(deployment *clusterv1.MachineDeployment, ms *clusterv1.MachineSet) bool { if ms == nil { return false @@ -599,6 +627,7 @@ func IsSaturated(deployment *clusterv1.MachineDeployment, ms *clusterv1.MachineS // 1 desired, max unavailable 25%, surge 1% - should scale new(+1), then old(-1) // 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1) // 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1). +// Deprecated: This package is becoming internal and it will be removed in a next release. func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) { surge, err := intstrutil.GetScaledValueFromIntOrPercent(maxSurge, int(desired), true) if err != nil { @@ -621,6 +650,7 @@ func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired } // FilterActiveMachineSets returns machine sets that have (or at least ought to have) machines. +// Deprecated: This package is becoming internal and it will be removed in a next release. func FilterActiveMachineSets(machineSets []*clusterv1.MachineSet) []*clusterv1.MachineSet { activeFilter := func(ms *clusterv1.MachineSet) bool { return ms != nil && ms.Spec.Replicas != nil && *(ms.Spec.Replicas) > 0 @@ -631,6 +661,7 @@ func FilterActiveMachineSets(machineSets []*clusterv1.MachineSet) []*clusterv1.M type filterMS func(ms *clusterv1.MachineSet) bool // FilterMachineSets returns machine sets that are filtered by filterFn (all returned ones should match filterFn). +// Deprecated: This package is becoming internal and it will be removed in a next release. func FilterMachineSets(mSes []*clusterv1.MachineSet, filterFn filterMS) []*clusterv1.MachineSet { var filtered []*clusterv1.MachineSet for i := range mSes { @@ -643,6 +674,7 @@ func FilterMachineSets(mSes []*clusterv1.MachineSet, filterFn filterMS) []*clust // CloneAndAddLabel clones the given map and returns a new map with the given key and value added. // Returns the given map, if labelKey is empty. +// Deprecated: This package is becoming internal and it will be removed in a next release. func CloneAndAddLabel(labels map[string]string, labelKey, labelValue string) map[string]string { if labelKey == "" { // Don't need to add a label. @@ -659,6 +691,7 @@ func CloneAndAddLabel(labels map[string]string, labelKey, labelValue string) map // CloneSelectorAndAddLabel clones the given selector and returns a new selector with the given key and value added. // Returns the given selector, if labelKey is empty. +// Deprecated: This package is becoming internal and it will be removed in a next release. func CloneSelectorAndAddLabel(selector *metav1.LabelSelector, labelKey, labelValue string) *metav1.LabelSelector { if labelKey == "" { // Don't need to add a label. @@ -717,6 +750,7 @@ func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { // SpewHashObject writes specified object to hash using the spew library // which follows pointers and prints actual values of the nested objects // ensuring the hash does not change when a pointer changes. +// Deprecated: This package is becoming internal and it will be removed in a next release. func SpewHashObject(hasher hash.Hash, objectToWrite interface{}) error { hasher.Reset() printer := spew.ConfigState{ @@ -741,6 +775,7 @@ func ComputeHash(template *clusterv1.MachineTemplateSpec) uint32 { } // ComputeSpewHash computes the hash of a MachineTemplateSpec using the spew library. +// Deprecated: This package is becoming internal and it will be removed in a next release. func ComputeSpewHash(template *clusterv1.MachineTemplateSpec) (uint32, error) { machineTemplateSpecHasher := fnv.New32a() if err := SpewHashObject(machineTemplateSpecHasher, *template); err != nil { @@ -751,6 +786,7 @@ func ComputeSpewHash(template *clusterv1.MachineTemplateSpec) (uint32, error) { // GetDeletingMachineCount gets the number of machines that are in the process of being deleted // in a machineList. +// Deprecated: This package is becoming internal and it will be removed in a next release. func GetDeletingMachineCount(machineList *clusterv1.MachineList) int32 { var deletingMachineCount int32 for _, machine := range machineList.Items { diff --git a/controllers/topology/internal/scope/state.go b/controllers/topology/internal/scope/state.go index 123b9539a6e8..fe9978cce252 100644 --- a/controllers/topology/internal/scope/state.go +++ b/controllers/topology/internal/scope/state.go @@ -19,7 +19,7 @@ package scope import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" - "sigs.k8s.io/cluster-api/controllers/mdutil" + "sigs.k8s.io/cluster-api/controllers/internal/mdutil" ) // ClusterState holds all the objects representing the state of a managed Cluster topology.