From 1fb525fcdbcc04d25b944eda6884bd49d9404dad Mon Sep 17 00:00:00 2001 From: Enxebre Date: Fri, 19 Oct 2018 09:36:16 +0200 Subject: [PATCH] revendor for cluster api 2d88aef --- Gopkg.lock | 5 +- vendor/sigs.k8s.io/cluster-api/Makefile | 4 +- .../clusterdeployer/clusterdeployer.go | 14 +-- .../cluster_v1alpha1_machinedeployment.yaml | 4 +- .../pkg/controller/machine/controller.go | 6 ++ .../controller/machinedeployment/rolling.go | 61 ++++++------- .../pkg/controller/machinedeployment/sync.go | 87 +++++++++++-------- 7 files changed, 96 insertions(+), 85 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index c73fbcc6f6..9f5de65f8f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -947,7 +947,7 @@ [[projects]] branch = "master" - digest = "1:5e1957c10fbc2d3663ad0c9069c1daf31b077447b2d72ac5376ac854507b6d42" + digest = "1:83227f5bf2052c6d0e58b550a33e4bbd83acd78fb5ea772f30f3a2af4e3a31fd" name = "sigs.k8s.io/cluster-api" packages = [ "pkg/apis", @@ -961,7 +961,7 @@ "pkg/util", ] pruneopts = "T" - revision = "19551f1e342dfef3c73657de8cd7db54594e95c3" + revision = "f80969d1a60cc19ecce34bb9f8db4acda369a11d" [[projects]] digest = "1:4c3be496e0e0977b54e265fea184b57e9928fed12a1a14687c04b5d42e044abc" @@ -1050,7 +1050,6 @@ "github.com/prometheus/common/log", "github.com/sirupsen/logrus", "github.com/spf13/cobra", - "github.com/spf13/pflag", "github.com/stretchr/testify/assert", "golang.org/x/net/context", "k8s.io/api/core/v1", diff --git a/vendor/sigs.k8s.io/cluster-api/Makefile b/vendor/sigs.k8s.io/cluster-api/Makefile index 34f654204f..257bc62d4c 100644 --- a/vendor/sigs.k8s.io/cluster-api/Makefile +++ b/vendor/sigs.k8s.io/cluster-api/Makefile @@ -43,8 +43,8 @@ manifests: go run vendor/sigs.k8s.io/controller-tools/cmd/controller-gen/main.go all @# Kubebuilder CRD generation can't handle intstr.IntOrString properly: @# https://github.com/kubernetes-sigs/kubebuilder/issues/442 - sed -i '/maxSurge:/{n;d}' config/crds/cluster_v1alpha1_machinedeployment.yaml - sed -i '/maxUnavailable:/{n;d}' config/crds/cluster_v1alpha1_machinedeployment.yaml + sed -i -e 's/maxSurge:/maxSurge: {}/g' -e '/maxSurge:/{n;d};' config/crds/cluster_v1alpha1_machinedeployment.yaml + sed -i -e 's/maxUnavailable:/maxUnavailable: {}/g' -e '/maxUnavailable:/{n;d};' config/crds/cluster_v1alpha1_machinedeployment.yaml # Run go fmt against code fmt: diff --git a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go index f4cd4c3df5..cdea9083f6 100644 --- a/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go +++ b/vendor/sigs.k8s.io/cluster-api/cmd/clusterctl/clusterdeployer/clusterdeployer.go @@ -137,6 +137,13 @@ func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*cluster } defer closeClient(targetClient, "target") + if d.addonComponents != "" { + glog.Info("Creating addons in target cluster.") + if err := targetClient.Apply(d.addonComponents); err != nil { + return fmt.Errorf("unable to apply addons: %v", err) + } + } + glog.Info("Applying Cluster API stack to target cluster") if err := d.applyClusterAPIStackWithPivoting(targetClient, bootstrapClient, cluster.Namespace); err != nil { return fmt.Errorf("unable to apply cluster api stack to target cluster: %v", err) @@ -165,13 +172,6 @@ func (d *ClusterDeployer) Create(cluster *clusterv1.Cluster, machines []*cluster return fmt.Errorf("unable to create node machines: %v", err) } - if d.addonComponents != "" { - glog.Info("Creating addons in target cluster.") - if err := targetClient.Apply(d.addonComponents); err != nil { - return fmt.Errorf("unable to apply addons: %v", err) - } - } - glog.Infof("Done provisioning cluster. You can now access your cluster with kubectl --kubeconfig %v", kubeconfigOutput) return nil diff --git a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml index 36f6e52361..bc960441de 100644 --- a/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml +++ b/vendor/sigs.k8s.io/cluster-api/config/crds/cluster_v1alpha1_machinedeployment.yaml @@ -44,8 +44,8 @@ spec: properties: rollingUpdate: properties: - maxSurge: - maxUnavailable: + maxSurge: {} + maxUnavailable: {} type: object type: type: string diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/controller.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/controller.go index c8dd6b31b6..0c9d3527e9 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/controller.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machine/controller.go @@ -167,12 +167,18 @@ func (r *ReconcileMachine) Reconcile(request reconcile.Request) (reconcile.Resul glog.Infof("Actuator returned requeue-after error: %v", requeueErr) return reconcile.Result{Requeue: true, RequeueAfter: requeueErr.RequeueAfter}, nil } + return reconcile.Result{}, err } + return reconcile.Result{}, nil } // Machine resource created. Machine does not yet exist. glog.Infof("Reconciling machine object %v triggers idempotent create.", m.ObjectMeta.Name) if err := r.create(m); err != nil { glog.Warningf("unable to create machine %v: %v", name, err) + if requeueErr, ok := err.(*controllerError.RequeueAfterError); ok { + glog.Infof("Actuator returned requeue-after error: %v", requeueErr) + return reconcile.Result{Requeue: true, RequeueAfter: requeueErr.RequeueAfter}, nil + } return reconcile.Result{}, err } return reconcile.Result{}, nil diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/rolling.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/rolling.go index bad88729ed..417f298643 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/rolling.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/rolling.go @@ -38,23 +38,19 @@ func (r *ReconcileMachineDeployment) rolloutRolling(d *v1alpha1.MachineDeploymen allMSs := append(oldMSs, newMS) // Scale up, if we can. - scaledUp, err := r.reconcileNewMachineSet(allMSs, newMS, d) - if err != nil { + if err := r.reconcileNewMachineSet(allMSs, newMS, d); err != nil { return err } - if scaledUp { - // TODO: update deployment status for deployment progress - return nil + if err := r.syncDeploymentStatus(allMSs, newMS, d); err != nil { + return err } // Scale down, if we can. - scaledDown, err := r.reconcileOldMachineSets(allMSs, dutil.FilterActiveMachineSets(oldMSs), newMS, d) - if err != nil { + if err := r.reconcileOldMachineSets(allMSs, oldMSs, newMS, d); err != nil { return err } - if scaledDown { - // TODO: update deployment status for deployment progress - return nil + if err := r.syncDeploymentStatus(allMSs, newMS, d); err != nil { + return err } if dutil.DeploymentComplete(d, &d.Status) { @@ -63,47 +59,46 @@ func (r *ReconcileMachineDeployment) rolloutRolling(d *v1alpha1.MachineDeploymen } } - // TODO: update deployment status for deployment progress return nil } -func (r *ReconcileMachineDeployment) reconcileNewMachineSet(allMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) (bool, error) { +func (r *ReconcileMachineDeployment) reconcileNewMachineSet(allMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) error { if deployment.Spec.Replicas == nil { - return false, fmt.Errorf("spec replicas for deployment set %v is nil, this is unexpected", deployment.Name) + return fmt.Errorf("spec replicas for deployment set %v is nil, this is unexpected", deployment.Name) } if newMS.Spec.Replicas == nil { - return false, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", newMS.Name) + return fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", newMS.Name) } if *(newMS.Spec.Replicas) == *(deployment.Spec.Replicas) { // Scaling not required. - return false, nil + return nil } if *(newMS.Spec.Replicas) > *(deployment.Spec.Replicas) { // Scale down. - scaled, _, err := r.scaleMachineSet(newMS, *(deployment.Spec.Replicas), deployment) - return scaled, err + _, err := r.scaleMachineSet(newMS, *(deployment.Spec.Replicas), deployment) + return err } newReplicasCount, err := dutil.NewMSNewReplicas(deployment, allMSs, newMS) if err != nil { - return false, err + return err } - scaled, _, err := r.scaleMachineSet(newMS, newReplicasCount, deployment) - return scaled, err + _, err = r.scaleMachineSet(newMS, newReplicasCount, deployment) + return err } -func (r *ReconcileMachineDeployment) reconcileOldMachineSets(allMSs []*v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) (bool, error) { +func (r *ReconcileMachineDeployment) reconcileOldMachineSets(allMSs []*v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) error { if deployment.Spec.Replicas == nil { - return false, fmt.Errorf("spec replicas for deployment set %v is nil, this is unexpected", deployment.Name) + return fmt.Errorf("spec replicas for deployment set %v is nil, this is unexpected", deployment.Name) } if newMS.Spec.Replicas == nil { - return false, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", newMS.Name) + return fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", newMS.Name) } oldMachinesCount := dutil.GetReplicaCountForMachineSets(oldMSs) if oldMachinesCount == 0 { // Can't scale down further - return false, nil + return nil } allMachinesCount := dutil.GetReplicaCountForMachineSets(allMSs) @@ -144,14 +139,14 @@ func (r *ReconcileMachineDeployment) reconcileOldMachineSets(allMSs []*v1alpha1. newMSUnavailableMachineCount := *(newMS.Spec.Replicas) - newMS.Status.AvailableReplicas maxScaledDown := allMachinesCount - minAvailable - newMSUnavailableMachineCount if maxScaledDown <= 0 { - return false, nil + return nil } // Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment // and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737 oldMSs, cleanupCount, err := r.cleanupUnhealthyReplicas(oldMSs, deployment, maxScaledDown) if err != nil { - return false, nil + return nil } glog.V(4).Infof("Cleaned up unhealthy replicas from old MSes by %d", cleanupCount) @@ -159,12 +154,11 @@ func (r *ReconcileMachineDeployment) reconcileOldMachineSets(allMSs []*v1alpha1. allMSs = append(oldMSs, newMS) scaledDownCount, err := r.scaleDownOldMachineSetsForRollingUpdate(allMSs, oldMSs, deployment) if err != nil { - return false, nil + return err } glog.V(4).Infof("Scaled down old MSes of deployment %s by %d", deployment.Name, scaledDownCount) - totalScaledDown := cleanupCount + scaledDownCount - return totalScaledDown > 0, nil + return nil } // cleanupUnhealthyReplicas will scale down old machine sets with unhealthy replicas, so that all unhealthy replicas will be deleted. @@ -174,7 +168,7 @@ func (r *ReconcileMachineDeployment) cleanupUnhealthyReplicas(oldMSs []*v1alpha1 // such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will // been deleted first and won't increase unavailability. totalScaledDown := int32(0) - for i, targetMS := range oldMSs { + for _, targetMS := range oldMSs { if targetMS.Spec.Replicas == nil { return nil, 0, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", targetMS.Name) } @@ -202,12 +196,11 @@ func (r *ReconcileMachineDeployment) cleanupUnhealthyReplicas(oldMSs []*v1alpha1 if newReplicasCount > oldMSReplicas { return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetMS.Namespace, targetMS.Name, oldMSReplicas, newReplicasCount) } - _, updatedOldMS, err := r.scaleMachineSet(targetMS, newReplicasCount, deployment) + _, err := r.scaleMachineSet(targetMS, newReplicasCount, deployment) if err != nil { return nil, totalScaledDown, err } totalScaledDown += scaledDownCount - oldMSs[i] = updatedOldMS } return oldMSs, totalScaledDown, nil } @@ -252,9 +245,9 @@ func (r *ReconcileMachineDeployment) scaleDownOldMachineSetsForRollingUpdate(all scaleDownCount := int32(integer.Int32Min(*(targetMS.Spec.Replicas), totalScaleDownCount-totalScaledDown)) newReplicasCount := *(targetMS.Spec.Replicas) - scaleDownCount if newReplicasCount > *(targetMS.Spec.Replicas) { - return 0, fmt.Errorf("when scaling down old MS, got invalid request to scale down %s/%s %d -> %d", targetMS.Namespace, targetMS.Name, *(targetMS.Spec.Replicas), newReplicasCount) + return totalScaledDown, fmt.Errorf("when scaling down old MS, got invalid request to scale down %s/%s %d -> %d", targetMS.Namespace, targetMS.Name, *(targetMS.Spec.Replicas), newReplicasCount) } - _, _, err := r.scaleMachineSet(targetMS, newReplicasCount, deployment) + _, err := r.scaleMachineSet(targetMS, newReplicasCount, deployment) if err != nil { return totalScaledDown, err } diff --git a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/sync.go b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/sync.go index fb35978f5c..515e8f9863 100644 --- a/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/sync.go +++ b/vendor/sigs.k8s.io/cluster-api/pkg/controller/machinedeployment/sync.go @@ -24,10 +24,13 @@ import ( "strconv" "github.com/golang/glog" + + "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" apirand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/util/retry" clusterv1alpha1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1" "sigs.k8s.io/controller-runtime/pkg/client" @@ -108,16 +111,10 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(d *clusterv1alpha1.Machine } // Apply revision annotation from existingNewMS if it is missing from the deployment. - // needsUpdate is false if there is not annotation or is already present. - needsUpdate := dutil.SetDeploymentRevision(d, msCopy.Annotations[dutil.RevisionAnnotation]) - - if needsUpdate { - err := r.Status().Update(context.Background(), d) - if err != nil { - return nil, err - } - } - return msCopy, nil + err := r.updateMachineDeployment(d, func(innerDeployment *clusterv1alpha1.MachineDeployment) { + dutil.SetDeploymentRevision(d, msCopy.Annotations[dutil.RevisionAnnotation]) + }) + return msCopy, err } if !createIfNotExisted { @@ -190,13 +187,12 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(d *clusterv1alpha1.Machine return nil, err } - needsUpdate := dutil.SetDeploymentRevision(d, newRevision) if !alreadyExists { glog.V(4).Infof("Created new machine set %q", createdMS.Name) } - if needsUpdate { - err = r.Status().Update(context.Background(), d) - } + err = r.updateMachineDeployment(d, func(innerDeployment *clusterv1alpha1.MachineDeployment) { + dutil.SetDeploymentRevision(d, newRevision) + }) return createdMS, err } @@ -218,7 +214,7 @@ func (r *ReconcileMachineDeployment) scale(deployment *clusterv1alpha1.MachineDe if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) { return nil } - _, _, err := r.scaleMachineSet(activeOrLatest, *(deployment.Spec.Replicas), deployment) + _, err := r.scaleMachineSet(activeOrLatest, *(deployment.Spec.Replicas), deployment) return err } @@ -226,7 +222,7 @@ func (r *ReconcileMachineDeployment) scale(deployment *clusterv1alpha1.MachineDe // This case handles machine set adoption during a saturated new machine set. if dutil.IsSaturated(deployment, newMS) { for _, old := range dutil.FilterActiveMachineSets(oldMSs) { - if _, _, err := r.scaleMachineSet(old, 0, deployment); err != nil { + if _, err := r.scaleMachineSet(old, 0, deployment); err != nil { return err } } @@ -304,7 +300,7 @@ func (r *ReconcileMachineDeployment) scale(deployment *clusterv1alpha1.MachineDe } // TODO: Use transactions when we have them. - if _, _, err := r.scaleMachineSetOperation(ms, nameToSize[ms.Name], deployment, scalingOperation); err != nil { + if _, err := r.scaleMachineSetOperation(ms, nameToSize[ms.Name], deployment, scalingOperation); err != nil { // Return as soon as we fail, the deployment is requeued return err } @@ -321,10 +317,8 @@ func (r *ReconcileMachineDeployment) syncDeploymentStatus(allMSs []*clusterv1alp return nil } - newDeployment := d - newDeployment.Status = newStatus - err := r.Status().Update(context.Background(), newDeployment) - return err + d.Status = newStatus + return r.Status().Update(context.Background(), d) } // calculateStatus calculates the latest status for the provided deployment by looking into the provided machine sets. @@ -351,13 +345,13 @@ func calculateStatus(allMSs []*clusterv1alpha1.MachineSet, newMS *clusterv1alpha return status } -func (r *ReconcileMachineDeployment) scaleMachineSet(ms *clusterv1alpha1.MachineSet, newScale int32, deployment *clusterv1alpha1.MachineDeployment) (bool, *clusterv1alpha1.MachineSet, error) { +func (r *ReconcileMachineDeployment) scaleMachineSet(ms *clusterv1alpha1.MachineSet, newScale int32, deployment *clusterv1alpha1.MachineDeployment) (bool, error) { if ms.Spec.Replicas == nil { - return false, nil, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) + return false, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) } // No need to scale if *(ms.Spec.Replicas) == newScale { - return false, ms, nil + return false, nil } var scalingOperation string if *(ms.Spec.Replicas) < newScale { @@ -366,13 +360,13 @@ func (r *ReconcileMachineDeployment) scaleMachineSet(ms *clusterv1alpha1.Machine scalingOperation = "down" } - scaled, newMS, err := r.scaleMachineSetOperation(ms, newScale, deployment, scalingOperation) - return scaled, newMS, err + scaled, err := r.scaleMachineSetOperation(ms, newScale, deployment, scalingOperation) + return scaled, err } -func (r *ReconcileMachineDeployment) scaleMachineSetOperation(ms *clusterv1alpha1.MachineSet, newScale int32, deployment *clusterv1alpha1.MachineDeployment, scaleOperation string) (bool, *clusterv1alpha1.MachineSet, error) { +func (r *ReconcileMachineDeployment) scaleMachineSetOperation(ms *clusterv1alpha1.MachineSet, newScale int32, deployment *clusterv1alpha1.MachineDeployment, scaleOperation string) (bool, error) { if ms.Spec.Replicas == nil { - return false, nil, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) + return false, fmt.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) } sizeNeedsUpdate := *(ms.Spec.Replicas) != newScale @@ -381,19 +375,14 @@ func (r *ReconcileMachineDeployment) scaleMachineSetOperation(ms *clusterv1alpha scaled := false var err error if sizeNeedsUpdate || annotationsNeedUpdate { - // TODO(droot): we don't need deepcopy with new client, so revisit this - // to clean up - msCopy := ms.DeepCopy() - *(msCopy.Spec.Replicas) = newScale - dutil.SetReplicasAnnotations(msCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+dutil.MaxSurge(*deployment)) - // ms, err = dc.machineClient.ClusterV1alpha1().MachineSets(msCopy.Namespace).Update(msCopy) - err = r.Update(context.Background(), msCopy) - ms = msCopy + *(ms.Spec.Replicas) = newScale + dutil.SetReplicasAnnotations(ms, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+dutil.MaxSurge(*deployment)) + err = r.Update(context.Background(), ms) if err == nil && sizeNeedsUpdate { scaled = true } } - return scaled, ms, err + return scaled, err } // cleanupDeployment is responsible for cleaning up a deployment ie. retains all but the latest N old machine sets @@ -463,3 +452,27 @@ func (r *ReconcileMachineDeployment) isScalingEvent(d *clusterv1alpha1.MachineDe } return false, nil } + +func (r *ReconcileMachineDeployment) updateMachineDeployment(d *clusterv1alpha1.MachineDeployment, modify func(*clusterv1alpha1.MachineDeployment)) error { + return updateMachineDeployment(r.Client, d, modify) +} + +// We have this as standalone variant to be able to use it from the tests +func updateMachineDeployment(c client.Client, d *clusterv1alpha1.MachineDeployment, modify func(*clusterv1alpha1.MachineDeployment)) error { + dCopy := d.DeepCopy() + modify(dCopy) + if equality.Semantic.DeepEqual(dCopy, d) { + return nil + } + err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { + //Get latest version from API + if err := c.Get(context.Background(), types.NamespacedName{Namespace: d.Namespace, Name: d.Name}, d); err != nil { + return err + } + // Apply modifications + modify(d) + // Update the machineDeployment + return c.Update(context.Background(), d) + }) + return err +}