diff --git a/internal/controllers/cluster/cluster_controller.go b/internal/controllers/cluster/cluster_controller.go index 78c5dc5d90e5..c4db5be3b2c3 100644 --- a/internal/controllers/cluster/cluster_controller.go +++ b/internal/controllers/cluster/cluster_controller.go @@ -19,7 +19,6 @@ package cluster import ( "context" "fmt" - "path" "strings" "time" @@ -28,6 +27,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" @@ -118,7 +118,9 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt return nil } -func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Result, reterr error) { +func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (retRes ctrl.Result, reterr error) { + log := ctrl.LoggerFrom(ctx) + // Fetch the Cluster instance. cluster := &clusterv1.Cluster{} if err := r.Client.Get(ctx, req.NamespacedName, cluster); err != nil { @@ -141,6 +143,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } + s := &scope{ + cluster: cluster, + } + // Initialize the patch helper. patchHelper, err := patch.NewHelper(cluster, r.Client) if err != nil { @@ -160,6 +166,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re if err := patchCluster(ctx, patchHelper, cluster, patchOpts...); err != nil { reterr = kerrors.NewAggregate([]error{reterr, err}) } + + if reterr != nil { + retRes = ctrl.Result{} + } }() lastProbeSuccessTime := r.ClusterCache.GetLastProbeSuccessTimestamp(ctx, client.ObjectKeyFromObject(cluster)) @@ -185,13 +195,37 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re }) } + alwaysReconcile := []clusterReconcileFunc{ + r.reconcileInfrastructure, + r.reconcileControlPlane, + r.getDescendants, + } + // Handle deletion reconciliation loop. if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, cluster) + reconcileDelete := append( + alwaysReconcile, + r.reconcileDelete, + ) + + return doReconcile(ctx, reconcileDelete, s) } // Handle normal reconciliation loop. - return r.reconcile(ctx, cluster) + if cluster.Spec.Topology != nil { + if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.InfrastructureRef == nil { + // TODO: add a condition to surface this scenario + log.Info("Waiting for the topology to be generated") + return ctrl.Result{}, nil + } + } + + reconcileNormal := append( + alwaysReconcile, + r.reconcileKubeconfig, + r.reconcileControlPlaneInitialized, + ) + return doReconcile(ctx, reconcileNormal, s) } func patchCluster(ctx context.Context, patchHelper *patch.Helper, cluster *clusterv1.Cluster, options ...patch.Option) error { @@ -216,30 +250,14 @@ func patchCluster(ctx context.Context, patchHelper *patch.Helper, cluster *clust return patchHelper.Patch(ctx, cluster, options...) } -// reconcile handles cluster reconciliation. -func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) { - log := ctrl.LoggerFrom(ctx) - - if cluster.Spec.Topology != nil { - if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.InfrastructureRef == nil { - // TODO: add a condition to surface this scenario - log.Info("Waiting for the topology to be generated") - return ctrl.Result{}, nil - } - } - - phases := []func(context.Context, *clusterv1.Cluster) (ctrl.Result, error){ - r.reconcileInfrastructure, - r.reconcileControlPlane, - r.reconcileKubeconfig, - r.reconcileControlPlaneInitialized, - } +type clusterReconcileFunc func(context.Context, *scope) (ctrl.Result, error) +func doReconcile(ctx context.Context, phases []clusterReconcileFunc, s *scope) (ctrl.Result, error) { res := ctrl.Result{} errs := []error{} for _, phase := range phases { // Call the inner reconciliation methods. - phaseResult, err := phase(ctx, cluster) + phaseResult, err := phase(ctx, s) if err != nil { errs = append(errs, err) } @@ -248,12 +266,45 @@ func (r *Reconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster) } res = util.LowestNonZeroResult(res, phaseResult) } - return res, kerrors.NewAggregate(errs) + + if len(errs) > 0 { + return ctrl.Result{}, kerrors.NewAggregate(errs) + } + + return res, nil +} + +// scope holds the different objects that are read and used during the reconcile. +type scope struct { + // cluster is the Cluster object being reconciled. + // It is set at the beginning of the reconcile function. + cluster *clusterv1.Cluster + + // infraCluster is the Infrastructure Cluster object that is referenced by the + // Cluster. It is set after reconcileInfrastructure is called. + infraCluster *unstructured.Unstructured + + // infraClusterNotFound is true if getting the infra cluster object failed with an NotFound err + infraClusterIsNotFound bool + + // controlPlane is the ControlPlane object that is referenced by the + // Cluster. It is set after reconcileControlPlane is called. + controlPlane *unstructured.Unstructured + + // controlPlaneNotFound is true if getting the ControlPlane object failed with an NotFound err + controlPlaneIsNotFound bool + + // descendants is the list of objects related to this Cluster + descendants clusterDescendants + + // getDescendantsSucceeded documents if getDescendants succeeded. + getDescendantsSucceeded bool } // reconcileDelete handles cluster deletion. -func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster) (reconcile.Result, error) { +func (r *Reconciler) reconcileDelete(ctx context.Context, s *scope) (reconcile.Result, error) { log := ctrl.LoggerFrom(ctx) + cluster := s.cluster // If the RuntimeSDK and ClusterTopology flags are enabled, for clusters with managed topologies // only proceed with delete if the cluster is marked as `ok-to-delete` @@ -263,13 +314,12 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu } } - descendants, err := r.listDescendants(ctx, cluster) - if err != nil { - log.Error(err, "Failed to list descendants") - return reconcile.Result{}, err + // If it failed to get descendants, no-op. + if !s.getDescendantsSucceeded { + return reconcile.Result{}, nil } - children, err := descendants.filterOwnedDescendants(cluster) + children, err := s.descendants.filterOwnedDescendants(cluster) if err != nil { log.Error(err, "Failed to extract direct descendants") return reconcile.Result{}, err @@ -306,36 +356,35 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu } } - if descendantCount := descendants.length(); descendantCount > 0 { + if descendantCount := s.descendants.objectsPendingDeleteCount(cluster); descendantCount > 0 { indirect := descendantCount - len(children) - log.Info("Cluster still has descendants - need to requeue", "descendants", descendants.descendantNames(), "indirect descendants count", indirect) + log.Info("Cluster still has descendants - need to requeue", "descendants", s.descendants.objectsPendingDeleteNames(cluster), "indirect descendants count", indirect) // Requeue so we can check the next time to see if there are still any descendants left. return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil } if cluster.Spec.ControlPlaneRef != nil { - obj, err := external.Get(ctx, r.Client, cluster.Spec.ControlPlaneRef, cluster.Namespace) - switch { - case apierrors.IsNotFound(errors.Cause(err)): + if s.controlPlane == nil { + if !s.controlPlaneIsNotFound { + // In case there was a generic error (different than isNotFound) in reading the InfraCluster, do not continue with deletion. + // Note: this error surfaces in reconcile reconcileControlPlane. + return ctrl.Result{}, nil + } // All good - the control plane resource has been deleted conditions.MarkFalse(cluster, clusterv1.ControlPlaneReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") - case err != nil: - return reconcile.Result{}, errors.Wrapf(err, "failed to get %s %q for Cluster %s/%s", - path.Join(cluster.Spec.ControlPlaneRef.APIVersion, cluster.Spec.ControlPlaneRef.Kind), - cluster.Spec.ControlPlaneRef.Name, cluster.Namespace, cluster.Name) - default: - // Report a summary of current status of the control plane object defined for this cluster. - conditions.SetMirror(cluster, clusterv1.ControlPlaneReadyCondition, - conditions.UnstructuredGetter(obj), - conditions.WithFallbackValue(false, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, ""), - ) - - // Issue a deletion request for the control plane object. - // Once it's been deleted, the cluster will get processed again. - if err := r.Client.Delete(ctx, obj); err != nil { - return ctrl.Result{}, errors.Wrapf(err, - "failed to delete %v %q for Cluster %q in namespace %q", - obj.GroupVersionKind(), obj.GetName(), cluster.Name, cluster.Namespace) + } + + if s.controlPlane != nil { + if s.controlPlane.GetDeletionTimestamp().IsZero() { + // Issue a deletion request for the control plane object. + // Once it's been deleted, the cluster will get processed again. + if err := r.Client.Delete(ctx, s.controlPlane); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, + "failed to delete %v %q for Cluster %q in namespace %q", + s.controlPlane.GroupVersionKind().Kind, s.controlPlane.GetName(), cluster.Name, cluster.Namespace) + } + } } // Return here so we don't remove the finalizer yet. @@ -345,28 +394,27 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu } if cluster.Spec.InfrastructureRef != nil { - obj, err := external.Get(ctx, r.Client, cluster.Spec.InfrastructureRef, cluster.Namespace) - switch { - case apierrors.IsNotFound(errors.Cause(err)): + if s.infraCluster == nil { + if !s.infraClusterIsNotFound { + // In case there was a generic error (different than isNotFound) in reading the InfraCluster, do not continue with deletion. + // Note: this error surfaces in reconcile reconcileInfrastructure. + return ctrl.Result{}, nil + } // All good - the infra resource has been deleted conditions.MarkFalse(cluster, clusterv1.InfrastructureReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") - case err != nil: - return ctrl.Result{}, errors.Wrapf(err, "failed to get %s %q for Cluster %s/%s", - path.Join(cluster.Spec.InfrastructureRef.APIVersion, cluster.Spec.InfrastructureRef.Kind), - cluster.Spec.InfrastructureRef.Name, cluster.Namespace, cluster.Name) - default: - // Report a summary of current status of the infrastructure object defined for this cluster. - conditions.SetMirror(cluster, clusterv1.InfrastructureReadyCondition, - conditions.UnstructuredGetter(obj), - conditions.WithFallbackValue(false, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, ""), - ) - - // Issue a deletion request for the infrastructure object. - // Once it's been deleted, the cluster will get processed again. - if err := r.Client.Delete(ctx, obj); err != nil { - return ctrl.Result{}, errors.Wrapf(err, - "failed to delete %v %q for Cluster %q in namespace %q", - obj.GroupVersionKind(), obj.GetName(), cluster.Name, cluster.Namespace) + } + + if s.infraCluster != nil { + if s.infraCluster.GetDeletionTimestamp().IsZero() { + // Issue a deletion request for the infrastructure object. + // Once it's been deleted, the cluster will get processed again. + if err := r.Client.Delete(ctx, s.infraCluster); err != nil { + if !apierrors.IsNotFound(err) { + return ctrl.Result{}, errors.Wrapf(err, + "failed to delete %v %q for Cluster %q in namespace %q", + s.infraCluster.GroupVersionKind().Kind, s.infraCluster.GetName(), cluster.Name, cluster.Namespace) + } + } } // Return here so we don't remove the finalizer yet. @@ -388,23 +436,33 @@ type clusterDescendants struct { machinePools expv1.MachinePoolList } -// length returns the number of descendants. -func (c *clusterDescendants) length() int { - return len(c.machineDeployments.Items) + +// objectsPendingDeleteCount returns the number of descendants pending delete. +// Note: infrastructure cluster, control plane object and its controlled machines are not included. +func (c *clusterDescendants) objectsPendingDeleteCount(cluster *clusterv1.Cluster) int { + n := len(c.machineDeployments.Items) + len(c.machineSets.Items) + - len(c.controlPlaneMachines.Items) + len(c.workerMachines.Items) + len(c.machinePools.Items) + + if cluster.Spec.ControlPlaneRef == nil { + n += len(c.controlPlaneMachines.Items) + } + + return n } -func (c *clusterDescendants) descendantNames() string { +// objectsPendingDeleteNames return the names of descendants pending delete. +// Note: infrastructure cluster, control plane object and its controlled machines are not included. +func (c *clusterDescendants) objectsPendingDeleteNames(cluster *clusterv1.Cluster) string { descendants := make([]string, 0) - controlPlaneMachineNames := make([]string, len(c.controlPlaneMachines.Items)) - for i, controlPlaneMachine := range c.controlPlaneMachines.Items { - controlPlaneMachineNames[i] = controlPlaneMachine.Name - } - if len(controlPlaneMachineNames) > 0 { - descendants = append(descendants, "Control plane machines: "+strings.Join(controlPlaneMachineNames, ",")) + if cluster.Spec.ControlPlaneRef == nil { + controlPlaneMachineNames := make([]string, len(c.controlPlaneMachines.Items)) + for i, controlPlaneMachine := range c.controlPlaneMachines.Items { + controlPlaneMachineNames[i] = controlPlaneMachine.Name + } + if len(controlPlaneMachineNames) > 0 { + descendants = append(descendants, "Control plane machines: "+strings.Join(controlPlaneMachineNames, ",")) + } } machineDeploymentNames := make([]string, len(c.machineDeployments.Items)) for i, machineDeployment := range c.machineDeployments.Items { @@ -420,13 +478,6 @@ func (c *clusterDescendants) descendantNames() string { if len(machineSetNames) > 0 { descendants = append(descendants, "Machine sets: "+strings.Join(machineSetNames, ",")) } - workerMachineNames := make([]string, len(c.workerMachines.Items)) - for i, workerMachine := range c.workerMachines.Items { - workerMachineNames[i] = workerMachine.Name - } - if len(workerMachineNames) > 0 { - descendants = append(descendants, "Worker machines: "+strings.Join(workerMachineNames, ",")) - } if feature.Gates.Enabled(feature.MachinePool) { machinePoolNames := make([]string, len(c.machinePools.Items)) for i, machinePool := range c.machinePools.Items { @@ -436,12 +487,20 @@ func (c *clusterDescendants) descendantNames() string { descendants = append(descendants, "Machine pools: "+strings.Join(machinePoolNames, ",")) } } - return strings.Join(descendants, ";") + workerMachineNames := make([]string, len(c.workerMachines.Items)) + for i, workerMachine := range c.workerMachines.Items { + workerMachineNames[i] = workerMachine.Name + } + if len(workerMachineNames) > 0 { + descendants = append(descendants, "Worker machines: "+strings.Join(workerMachineNames, ",")) + } + return strings.Join(descendants, "; ") } -// listDescendants returns a list of all MachineDeployments, MachineSets, MachinePools and Machines for the cluster. -func (r *Reconciler) listDescendants(ctx context.Context, cluster *clusterv1.Cluster) (clusterDescendants, error) { +// getDescendants collects all MachineDeployments, MachineSets, MachinePools and Machines for the cluster. +func (r *Reconciler) getDescendants(ctx context.Context, s *scope) (reconcile.Result, error) { var descendants clusterDescendants + cluster := s.cluster listOptions := []client.ListOption{ client.InNamespace(cluster.Namespace), @@ -449,39 +508,42 @@ func (r *Reconciler) listDescendants(ctx context.Context, cluster *clusterv1.Clu } if err := r.Client.List(ctx, &descendants.machineDeployments, listOptions...); err != nil { - return descendants, errors.Wrapf(err, "failed to list MachineDeployments for cluster %s/%s", cluster.Namespace, cluster.Name) + return reconcile.Result{}, errors.Wrapf(err, "failed to list MachineDeployments for cluster %s/%s", cluster.Namespace, cluster.Name) } if err := r.Client.List(ctx, &descendants.machineSets, listOptions...); err != nil { - return descendants, errors.Wrapf(err, "failed to list MachineSets for cluster %s/%s", cluster.Namespace, cluster.Name) + return reconcile.Result{}, errors.Wrapf(err, "failed to list MachineSets for cluster %s/%s", cluster.Namespace, cluster.Name) } if feature.Gates.Enabled(feature.MachinePool) { if err := r.Client.List(ctx, &descendants.machinePools, listOptions...); err != nil { - return descendants, errors.Wrapf(err, "failed to list MachinePools for the cluster %s/%s", cluster.Namespace, cluster.Name) + return reconcile.Result{}, errors.Wrapf(err, "failed to list MachinePools for the cluster %s/%s", cluster.Namespace, cluster.Name) } } var machines clusterv1.MachineList if err := r.Client.List(ctx, &machines, listOptions...); err != nil { - return descendants, errors.Wrapf(err, "failed to list Machines for cluster %s/%s", cluster.Namespace, cluster.Name) + return reconcile.Result{}, errors.Wrapf(err, "failed to list Machines for cluster %s/%s", cluster.Namespace, cluster.Name) } - // Split machines into control plane and worker machines so we make sure we delete control plane machines last + // Split machines into control plane and worker machines machineCollection := collections.FromMachineList(&machines) controlPlaneMachines := machineCollection.Filter(collections.ControlPlaneMachines(cluster.Name)) workerMachines := machineCollection.Difference(controlPlaneMachines) + + descendants.controlPlaneMachines = collections.ToMachineList(controlPlaneMachines) descendants.workerMachines = collections.ToMachineList(workerMachines) - // Only count control plane machines as descendants if there is no control plane provider. - if cluster.Spec.ControlPlaneRef == nil { - descendants.controlPlaneMachines = collections.ToMachineList(controlPlaneMachines) - } - return descendants, nil + s.descendants = descendants + s.getDescendantsSucceeded = true + + return reconcile.Result{}, nil } // filterOwnedDescendants returns an array of runtime.Objects containing only those descendants that have the cluster // as an owner reference, with control plane machines sorted last. -func (c clusterDescendants) filterOwnedDescendants(cluster *clusterv1.Cluster) ([]client.Object, error) { +// Note: this list must include stand-alone MachineSets and stand-alone Machines; instead MachineSets or Machines controlled +// by higher level abstractions like e.g. MachineDeployment are not be included (if owner references are properly set on those machines). +func (c *clusterDescendants) filterOwnedDescendants(cluster *clusterv1.Cluster) ([]client.Object, error) { var ownedDescendants []client.Object eachFunc := func(o runtime.Object) error { obj := o.(client.Object) @@ -501,12 +563,24 @@ func (c clusterDescendants) filterOwnedDescendants(cluster *clusterv1.Cluster) ( &c.machineDeployments, &c.machineSets, &c.workerMachines, - &c.controlPlaneMachines, } if feature.Gates.Enabled(feature.MachinePool) { lists = append([]client.ObjectList{&c.machinePools}, lists...) } + // Make sure that control plane machines are included only if there is no control plane object + // responsible to manage them. + // Note: Excluding machines controlled by a control plane object is an additional safeguard to ensure + // that the control plane is deleted only after all the workers machine are done. + // Note: Using stand-alone control plane machines is not yet officially deprecated, however this approach + // has well known limitations that have been address by the introduction of control plane objects. One of those + // limitation is about the deletion workflow, which is governed by this function. + // More specifically, when deleting a Cluster with stand-alone control plane machines, stand-alone control plane machines + // will be decommissioned in parallel with other machines, no matter of them being added as last in this list. + if cluster.Spec.ControlPlaneRef == nil { + lists = append(lists, []client.ObjectList{&c.controlPlaneMachines}...) + } + for _, list := range lists { if err := meta.EachListItem(list, eachFunc); err != nil { return nil, errors.Wrapf(err, "error finding owned descendants of cluster %s/%s", cluster.Namespace, cluster.Name) @@ -516,8 +590,9 @@ func (c clusterDescendants) filterOwnedDescendants(cluster *clusterv1.Cluster) ( return ownedDescendants, nil } -func (r *Reconciler) reconcileControlPlaneInitialized(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *Reconciler) reconcileControlPlaneInitialized(ctx context.Context, s *scope) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) + cluster := s.cluster // Skip checking if the control plane is initialized when using a Control Plane Provider (this is reconciled in // reconcileControlPlane instead). diff --git a/internal/controllers/cluster/cluster_controller_phases.go b/internal/controllers/cluster/cluster_controller_phases.go index 4afa3976f5e9..39a957a8a144 100644 --- a/internal/controllers/cluster/cluster_controller_phases.go +++ b/internal/controllers/cluster/cluster_controller_phases.go @@ -24,6 +24,8 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/klog/v2" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -33,7 +35,6 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" capierrors "sigs.k8s.io/cluster-api/errors" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/cluster-api/util/kubeconfig" @@ -41,6 +42,8 @@ import ( "sigs.k8s.io/cluster-api/util/secret" ) +var externalReadyWait = 30 * time.Second + func (r *Reconciler) reconcilePhase(_ context.Context, cluster *clusterv1.Cluster) { preReconcilePhase := cluster.Status.GetTypedPhase() @@ -76,42 +79,36 @@ func (r *Reconciler) reconcilePhase(_ context.Context, cluster *clusterv1.Cluste } // reconcileExternal handles generic unstructured objects referenced by a Cluster. -func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.Cluster, ref *corev1.ObjectReference) (external.ReconcileOutput, error) { +func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.Cluster, ref *corev1.ObjectReference) (*unstructured.Unstructured, error) { log := ctrl.LoggerFrom(ctx) if err := utilconversion.UpdateReferenceAPIContract(ctx, r.Client, ref); err != nil { - return external.ReconcileOutput{}, err + if apierrors.IsNotFound(err) { + // We want to surface the NotFound error only for the referenced object, so we use a generic error in case CRD is not found. + return nil, errors.New(err.Error()) + } + return nil, err } obj, err := external.Get(ctx, r.Client, ref, cluster.Namespace) if err != nil { - if apierrors.IsNotFound(errors.Cause(err)) { - log.Info("Could not find external object for cluster, requeuing", "refGroupVersionKind", ref.GroupVersionKind(), "refName", ref.Name) - return external.ReconcileOutput{RequeueAfter: 30 * time.Second}, nil - } - return external.ReconcileOutput{}, err + return nil, err } // Ensure we add a watcher to the external object. if err := r.externalTracker.Watch(log, obj, handler.EnqueueRequestForOwner(r.Client.Scheme(), r.Client.RESTMapper(), &clusterv1.Cluster{})); err != nil { - return external.ReconcileOutput{}, err - } - - // if external ref is paused, return error. - if annotations.IsPaused(cluster, obj) { - log.V(3).Info("External object referenced is paused") - return external.ReconcileOutput{Paused: true}, nil + return nil, err } // Initialize the patch helper. patchHelper, err := patch.NewHelper(obj, r.Client) if err != nil { - return external.ReconcileOutput{}, err + return nil, err } // Set external object ControllerReference to the Cluster. if err := controllerutil.SetControllerReference(cluster, obj, r.Client.Scheme()); err != nil { - return external.ReconcileOutput{}, err + return nil, err } // Set the Cluster label. @@ -124,13 +121,13 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.C // Always attempt to Patch the external object. if err := patchHelper.Patch(ctx, obj); err != nil { - return external.ReconcileOutput{}, err + return nil, err } // Set failure reason and message, if any. failureReason, failureMessage, err := external.FailuresFrom(obj) if err != nil { - return external.ReconcileOutput{}, err + return nil, err } if failureReason != "" { clusterStatusError := capierrors.ClusterStatusError(failureReason) @@ -143,66 +140,79 @@ func (r *Reconciler) reconcileExternal(ctx context.Context, cluster *clusterv1.C ) } - return external.ReconcileOutput{Result: obj}, nil + return obj, nil } // reconcileInfrastructure reconciles the Spec.InfrastructureRef object on a Cluster. -func (r *Reconciler) reconcileInfrastructure(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) - - if cluster.Spec.InfrastructureRef == nil { - // If the infrastructure ref is not set, marking the infrastructure as ready (no-op). - cluster.Status.InfrastructureReady = true - conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) + cluster := s.cluster + + // If the infrastructure ref is not set, no-op. + if s.cluster.Spec.InfrastructureRef == nil { + // if the cluster is not deleted, mark the infrastructure as ready to unblock other provisioning workflows. + if s.cluster.DeletionTimestamp.IsZero() { + cluster.Status.InfrastructureReady = true + conditions.MarkTrue(cluster, clusterv1.InfrastructureReadyCondition) + } return ctrl.Result{}, nil } // Call generic external reconciler. - infraReconcileResult, err := r.reconcileExternal(ctx, cluster, cluster.Spec.InfrastructureRef) + obj, err := r.reconcileExternal(ctx, cluster, cluster.Spec.InfrastructureRef) if err != nil { - return ctrl.Result{}, err - } - // Return early if we need to requeue. - if infraReconcileResult.RequeueAfter > 0 { - return ctrl.Result{RequeueAfter: infraReconcileResult.RequeueAfter}, nil - } - // If the external object is paused, return without any further processing. - if infraReconcileResult.Paused { - return ctrl.Result{}, nil - } - infraConfig := infraReconcileResult.Result + if apierrors.IsNotFound(err) { + s.infraClusterIsNotFound = true - // There's no need to go any further if the Cluster is marked for deletion. - if !infraConfig.GetDeletionTimestamp().IsZero() { - return ctrl.Result{}, nil + if !cluster.DeletionTimestamp.IsZero() { + // Tolerate infra cluster not found when the cluster is being deleted. + return ctrl.Result{}, nil + } + + if cluster.Status.InfrastructureReady { + // Infra object went missing after the cluster was up and running + return ctrl.Result{}, errors.Errorf("%s has been deleted after being ready", cluster.Spec.InfrastructureRef.Kind) + } + log.Info(fmt.Sprintf("Could not find %s, requeuing", cluster.Spec.InfrastructureRef.Kind)) + return ctrl.Result{RequeueAfter: externalReadyWait}, nil + } + return ctrl.Result{}, err } + s.infraCluster = obj // Determine if the infrastructure provider is ready. - preReconcileInfrastructureReady := cluster.Status.InfrastructureReady - ready, err := external.IsReady(infraConfig) + ready, err := external.IsReady(s.infraCluster) if err != nil { return ctrl.Result{}, err } - cluster.Status.InfrastructureReady = ready - // Only record the event if the status has changed - if preReconcileInfrastructureReady != cluster.Status.InfrastructureReady { - r.recorder.Eventf(cluster, corev1.EventTypeNormal, "InfrastructureReady", "Cluster %s InfrastructureReady is now %t", cluster.Name, cluster.Status.InfrastructureReady) + if ready && !cluster.Status.InfrastructureReady { + log.Info("Infrastructure provider has completed cluster infrastructure provisioning and reports status.ready", cluster.Spec.InfrastructureRef.Kind, klog.KObj(s.infraCluster)) } // Report a summary of current status of the infrastructure object defined for this cluster. + fallBack := conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, "") + if !s.cluster.DeletionTimestamp.IsZero() { + fallBack = conditions.WithFallbackValue(false, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + } conditions.SetMirror(cluster, clusterv1.InfrastructureReadyCondition, - conditions.UnstructuredGetter(infraConfig), - conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""), + conditions.UnstructuredGetter(s.infraCluster), + fallBack, ) - if !ready { + // There's no need to go any further if the infrastructure object is marked for deletion. + if !s.infraCluster.GetDeletionTimestamp().IsZero() { + return ctrl.Result{}, nil + } + + // If the infrastructure provider is not ready (and it wasn't ready before), return early. + if !ready && !cluster.Status.InfrastructureReady { log.V(3).Info("Infrastructure provider is not ready yet") return ctrl.Result{}, nil } // Get and parse Spec.ControlPlaneEndpoint field from the infrastructure provider. if !cluster.Spec.ControlPlaneEndpoint.IsValid() { - if err := util.UnstructuredUnmarshalField(infraConfig, &cluster.Spec.ControlPlaneEndpoint, "spec", "controlPlaneEndpoint"); err != nil && err != util.ErrUnstructuredFieldNotFound { + if err := util.UnstructuredUnmarshalField(s.infraCluster, &cluster.Spec.ControlPlaneEndpoint, "spec", "controlPlaneEndpoint"); err != nil && err != util.ErrUnstructuredFieldNotFound { return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve Spec.ControlPlaneEndpoint from infrastructure provider for Cluster %q in namespace %q", cluster.Name, cluster.Namespace) } @@ -210,65 +220,80 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, cluster *clust // Get and parse Status.FailureDomains from the infrastructure provider. failureDomains := clusterv1.FailureDomains{} - if err := util.UnstructuredUnmarshalField(infraConfig, &failureDomains, "status", "failureDomains"); err != nil && err != util.ErrUnstructuredFieldNotFound { + if err := util.UnstructuredUnmarshalField(s.infraCluster, &failureDomains, "status", "failureDomains"); err != nil && err != util.ErrUnstructuredFieldNotFound { return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve Status.FailureDomains from infrastructure provider for Cluster %q in namespace %q", cluster.Name, cluster.Namespace) } cluster.Status.FailureDomains = failureDomains + // Only record the event if the status has changed + if !cluster.Status.InfrastructureReady { + r.recorder.Eventf(cluster, corev1.EventTypeNormal, "InfrastructureReady", "Cluster %s InfrastructureReady is now True", cluster.Name) + } + cluster.Status.InfrastructureReady = true + return ctrl.Result{}, nil } // reconcileControlPlane reconciles the Spec.ControlPlaneRef object on a Cluster. -func (r *Reconciler) reconcileControlPlane(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) + cluster := s.cluster if cluster.Spec.ControlPlaneRef == nil { return ctrl.Result{}, nil } // Call generic external reconciler. - controlPlaneReconcileResult, err := r.reconcileExternal(ctx, cluster, cluster.Spec.ControlPlaneRef) + obj, err := r.reconcileExternal(ctx, cluster, cluster.Spec.ControlPlaneRef) if err != nil { - return ctrl.Result{}, err - } - // Return early if we need to requeue. - if controlPlaneReconcileResult.RequeueAfter > 0 { - return ctrl.Result{RequeueAfter: controlPlaneReconcileResult.RequeueAfter}, nil - } - // If the external object is paused, return without any further processing. - if controlPlaneReconcileResult.Paused { - return ctrl.Result{}, nil - } - controlPlaneConfig := controlPlaneReconcileResult.Result + if apierrors.IsNotFound(err) { + s.controlPlaneIsNotFound = true - // There's no need to go any further if the control plane resource is marked for deletion. - if !controlPlaneConfig.GetDeletionTimestamp().IsZero() { - return ctrl.Result{}, nil + if !cluster.DeletionTimestamp.IsZero() { + // Tolerate control plane not found when the cluster is being deleted. + return ctrl.Result{}, nil + } + + if cluster.Status.ControlPlaneReady { + // Control plane went missing after the cluster was up and running + return ctrl.Result{}, errors.Errorf("%s has been deleted after being ready", cluster.Spec.ControlPlaneRef.Kind) + } + log.Info(fmt.Sprintf("Could not find %s, requeuing", cluster.Spec.ControlPlaneRef.Kind)) + return ctrl.Result{RequeueAfter: externalReadyWait}, nil + } + return ctrl.Result{}, err } + s.controlPlane = obj - preReconcileControlPlaneReady := cluster.Status.ControlPlaneReady // Determine if the control plane provider is ready. - ready, err := external.IsReady(controlPlaneConfig) + ready, err := external.IsReady(s.controlPlane) if err != nil { return ctrl.Result{}, err } - cluster.Status.ControlPlaneReady = ready - // Only record the event if the status has changed - if preReconcileControlPlaneReady != cluster.Status.ControlPlaneReady { - r.recorder.Eventf(cluster, corev1.EventTypeNormal, "ControlPlaneReady", "Cluster %s ControlPlaneReady is now %t", cluster.Name, cluster.Status.ControlPlaneReady) + if ready && !cluster.Status.ControlPlaneReady { + log.Info("ControlPlane provider has completed provisioning and reports status.ready", cluster.Spec.ControlPlaneRef.Kind, klog.KObj(s.controlPlane)) } // Report a summary of current status of the control plane object defined for this cluster. + fallBack := conditions.WithFallbackValue(ready, clusterv1.WaitingForControlPlaneFallbackReason, clusterv1.ConditionSeverityInfo, "") + if !s.cluster.DeletionTimestamp.IsZero() { + fallBack = conditions.WithFallbackValue(false, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + } conditions.SetMirror(cluster, clusterv1.ControlPlaneReadyCondition, - conditions.UnstructuredGetter(controlPlaneConfig), - conditions.WithFallbackValue(ready, clusterv1.WaitingForControlPlaneFallbackReason, clusterv1.ConditionSeverityInfo, ""), + conditions.UnstructuredGetter(s.controlPlane), + fallBack, ) + // There's no need to go any further if the control plane object is marked for deletion. + if !s.controlPlane.GetDeletionTimestamp().IsZero() { + return ctrl.Result{}, nil + } + // Update cluster.Status.ControlPlaneInitialized if it hasn't already been set // Determine if the control plane provider is initialized. if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { - initialized, err := external.IsInitialized(controlPlaneConfig) + initialized, err := external.IsInitialized(s.controlPlane) if err != nil { return ctrl.Result{}, err } @@ -279,24 +304,32 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, cluster *cluster } } - if !ready { + // If the control plane is not ready (and it wasn't ready before), return early. + if !ready && !cluster.Status.ControlPlaneReady { log.V(3).Info("Control Plane provider is not ready yet") return ctrl.Result{}, nil } // Get and parse Spec.ControlPlaneEndpoint field from the control plane provider. if !cluster.Spec.ControlPlaneEndpoint.IsValid() { - if err := util.UnstructuredUnmarshalField(controlPlaneConfig, &cluster.Spec.ControlPlaneEndpoint, "spec", "controlPlaneEndpoint"); err != nil && err != util.ErrUnstructuredFieldNotFound { + if err := util.UnstructuredUnmarshalField(s.controlPlane, &cluster.Spec.ControlPlaneEndpoint, "spec", "controlPlaneEndpoint"); err != nil && err != util.ErrUnstructuredFieldNotFound { return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve Spec.ControlPlaneEndpoint from control plane provider for Cluster %q in namespace %q", cluster.Name, cluster.Namespace) } } + // Only record the event if the status has changed + if !cluster.Status.ControlPlaneReady { + r.recorder.Eventf(cluster, corev1.EventTypeNormal, "ControlPlaneReady", "Cluster %s ControlPlaneReady is now True", cluster.Name) + } + cluster.Status.ControlPlaneReady = true + return ctrl.Result{}, nil } -func (r *Reconciler) reconcileKubeconfig(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) { +func (r *Reconciler) reconcileKubeconfig(ctx context.Context, s *scope) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) + cluster := s.cluster if !cluster.Spec.ControlPlaneEndpoint.IsValid() { return ctrl.Result{}, nil diff --git a/internal/controllers/cluster/cluster_controller_phases_test.go b/internal/controllers/cluster/cluster_controller_phases_test.go index e086ddb8b693..0f74c366b1a3 100644 --- a/internal/controllers/cluster/cluster_controller_phases_test.go +++ b/internal/controllers/cluster/cluster_controller_phases_test.go @@ -38,475 +38,617 @@ import ( "sigs.k8s.io/cluster-api/util/test/builder" ) -func TestClusterReconcilePhases(t *testing.T) { - t.Run("reconcile infrastructure", func(t *testing.T) { - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Namespace: "test-namespace", - }, - Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, +func TestClusterReconcileInfrastructure(t *testing.T) { + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-namespace", + }, + Status: clusterv1.ClusterStatus{ + InfrastructureReady: true, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: "1.2.3.4", + Port: 8443, }, - Spec: clusterv1.ClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ - Host: "1.2.3.4", - Port: 8443, - }, - InfrastructureRef: &corev1.ObjectReference{ - APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "GenericInfrastructureMachine", - Name: "test", - }, + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "GenericInfrastructureMachine", + Name: "test", }, - } - clusterNoEndpoint := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Namespace: "test-namespace", + }, + } + clusterNoEndpoint := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-namespace", + }, + Status: clusterv1.ClusterStatus{ + InfrastructureReady: true, + }, + Spec: clusterv1.ClusterSpec{ + InfrastructureRef: &corev1.ObjectReference{ + APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", + Kind: "GenericInfrastructureMachine", + Name: "test", }, - Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + }, + } + + tests := []struct { + name string + cluster *clusterv1.Cluster + infraRef map[string]interface{} + expectErr bool + expectResult ctrl.Result + check func(g *GomegaWithT, in *clusterv1.Cluster) + }{ + { + name: "returns no error if infrastructure ref is nil", + cluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: "test-namespace"}}, + expectErr: false, + check: func(g *GomegaWithT, in *clusterv1.Cluster) { + g.Expect(in.Status.InfrastructureReady).To(BeTrue()) + g.Expect(conditions.IsTrue(in, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) }, - Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - APIVersion: "infrastructure.cluster.x-k8s.io/v1beta1", - Kind: "GenericInfrastructureMachine", - Name: "test", + }, + { + name: "requeue if unable to get infrastructure ref and cluster did not yet reported infrastructure ready", + cluster: func() *clusterv1.Cluster { + c := cluster.DeepCopy() + c.Status.InfrastructureReady = false + return c + }(), + expectErr: false, + expectResult: ctrl.Result{RequeueAfter: externalReadyWait}, + }, + { + name: "returns error if unable to get infrastructure ref and cluster already reported infrastructure ready", + cluster: cluster.DeepCopy(), + expectErr: true, + }, + { + name: "tolerate if unable to get infrastructure ref and cluster is deleting", + cluster: func() *clusterv1.Cluster { + c := cluster.DeepCopy() + c.Finalizers = append(c.Finalizers, "test") // Note: this is required to get fake client to accept objects with DeletionTimestamp. + c.DeletionTimestamp = &metav1.Time{Time: time.Now().Add(-1 * time.Hour)} + return c + }(), + expectErr: false, + expectResult: ctrl.Result{}, + }, + { + name: "returns no error if infrastructure is marked ready on cluster", + cluster: cluster.DeepCopy(), + infraRef: map[string]interface{}{ + "kind": "GenericInfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "deletionTimestamp": "sometime", + }, + }, + expectErr: false, + }, + { + name: "returns no error if the control plane endpoint is not yet set", + cluster: clusterNoEndpoint.DeepCopy(), + infraRef: map[string]interface{}{ + "kind": "GenericInfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "deletionTimestamp": "sometime", + }, + "status": map[string]interface{}{ + "ready": true, }, }, - } - - tests := []struct { - name string - cluster *clusterv1.Cluster - infraRef map[string]interface{} - expectErr bool - expectResult ctrl.Result - check func(g *GomegaWithT, in *clusterv1.Cluster) - }{ - { - name: "returns no error if infrastructure ref is nil", - cluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: "test-namespace"}}, - expectErr: false, - check: func(g *GomegaWithT, in *clusterv1.Cluster) { - g.Expect(in.Status.InfrastructureReady).To(BeTrue()) - g.Expect(conditions.IsTrue(in, clusterv1.InfrastructureReadyCondition)).To(BeTrue()) - }, - }, - { - name: "returns error if unable to reconcile infrastructure ref", - cluster: cluster, - expectErr: false, - expectResult: ctrl.Result{RequeueAfter: 30 * time.Second}, - }, - { - name: "returns no error if infra config is marked for deletion", - cluster: cluster, - infraRef: map[string]interface{}{ - "kind": "GenericInfrastructureMachine", - "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", - "metadata": map[string]interface{}{ - "name": "test", - "namespace": "test-namespace", - "deletionTimestamp": "sometime", + expectErr: false, + }, + { + name: "should propagate the control plane endpoint once set", + cluster: clusterNoEndpoint.DeepCopy(), + infraRef: map[string]interface{}{ + "kind": "GenericInfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "deletionTimestamp": "sometime", + }, + "spec": map[string]interface{}{ + "controlPlaneEndpoint": map[string]interface{}{ + "host": "example.com", + "port": int64(6443), }, }, - expectErr: false, - }, - { - name: "returns no error if infrastructure is marked ready on cluster", - cluster: cluster, - infraRef: map[string]interface{}{ - "kind": "GenericInfrastructureMachine", - "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", - "metadata": map[string]interface{}{ - "name": "test", - "namespace": "test-namespace", - "deletionTimestamp": "sometime", - }, + "status": map[string]interface{}{ + "ready": true, }, - expectErr: false, - }, - { - name: "returns no error if infrastructure has the paused annotation", - cluster: cluster, - infraRef: map[string]interface{}{ - "kind": "GenericInfrastructureMachine", - "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", - "metadata": map[string]interface{}{ - "name": "test", - "namespace": "test-namespace", - "annotations": map[string]interface{}{ - "cluster.x-k8s.io/paused": "true", - }, + }, + expectErr: false, + check: func(g *GomegaWithT, in *clusterv1.Cluster) { + g.Expect(in.Spec.ControlPlaneEndpoint.Host).To(Equal("example.com")) + g.Expect(in.Spec.ControlPlaneEndpoint.Port).To(BeEquivalentTo(6443)) + }, + }, + { + name: "do not allows to change infrastructure ready and control plane endpoint once set", + cluster: cluster.DeepCopy(), + infraRef: map[string]interface{}{ + "kind": "GenericInfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "deletionTimestamp": "sometime", + }, + "spec": map[string]interface{}{ + "controlPlaneEndpoint": map[string]interface{}{ + "host": "example.com", + "port": int64(6443), }, }, - expectErr: false, - }, - { - name: "returns no error if the control plane endpoint is not yet set", - cluster: clusterNoEndpoint, - infraRef: map[string]interface{}{ - "kind": "GenericInfrastructureMachine", - "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", - "metadata": map[string]interface{}{ - "name": "test", - "namespace": "test-namespace", - "deletionTimestamp": "sometime", - }, - "status": map[string]interface{}{ - "ready": true, - }, + "status": map[string]interface{}{ + "ready": false, }, - expectErr: false, - }, - { - name: "should propagate the control plane endpoint once set", - cluster: clusterNoEndpoint, - infraRef: map[string]interface{}{ - "kind": "GenericInfrastructureMachine", - "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", - "metadata": map[string]interface{}{ - "name": "test", - "namespace": "test-namespace", - "deletionTimestamp": "sometime", - }, - "spec": map[string]interface{}{ - "controlPlaneEndpoint": map[string]interface{}{ - "host": "example.com", - "port": int64(6443), - }, - }, - "status": map[string]interface{}{ - "ready": true, + }, + expectErr: false, + check: func(g *GomegaWithT, in *clusterv1.Cluster) { + g.Expect(in.Spec.ControlPlaneEndpoint.Host).To(Equal("1.2.3.4")) + g.Expect(in.Spec.ControlPlaneEndpoint.Port).To(BeEquivalentTo(8443)) + g.Expect(in.Status.InfrastructureReady).To(BeTrue()) + }, + }, + { + name: "do not reconcile if infra config is marked for deletion", + cluster: func() *clusterv1.Cluster { + c := clusterNoEndpoint.DeepCopy() + c.Status.InfrastructureReady = false + return c + }(), + infraRef: map[string]interface{}{ + "kind": "GenericInfrastructureMachine", + "apiVersion": "infrastructure.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "finalizers": []interface{}{"test"}, + "deletionTimestamp": "2020-01-01T00:00:00Z", + }, + "spec": map[string]interface{}{ + "controlPlaneEndpoint": map[string]interface{}{ + "host": "example.com", + "port": int64(6443), }, }, - expectErr: false, - check: func(g *GomegaWithT, in *clusterv1.Cluster) { - g.Expect(in.Spec.ControlPlaneEndpoint.Host).To(Equal("example.com")) - g.Expect(in.Spec.ControlPlaneEndpoint.Port).To(BeEquivalentTo(6443)) + "status": map[string]interface{}{ + "ready": true, }, }, - } + expectErr: false, + check: func(g *GomegaWithT, in *clusterv1.Cluster) { + g.Expect(in.Spec.ControlPlaneEndpoint.Host).To(Equal("")) + g.Expect(in.Spec.ControlPlaneEndpoint.Port).To(BeEquivalentTo(0)) + g.Expect(in.Status.InfrastructureReady).To(BeFalse()) + }, + }, + } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - var c client.Client - if tt.infraRef != nil { - infraConfig := &unstructured.Unstructured{Object: tt.infraRef} - c = fake.NewClientBuilder(). - WithObjects(builder.GenericInfrastructureMachineCRD.DeepCopy(), tt.cluster, infraConfig). - Build() - } else { - c = fake.NewClientBuilder(). - WithObjects(builder.GenericInfrastructureMachineCRD.DeepCopy(), tt.cluster). - Build() - } - r := &Reconciler{ - Client: c, - recorder: record.NewFakeRecorder(32), - externalTracker: external.ObjectTracker{ - Controller: externalfake.Controller{}, - Cache: &informertest.FakeInformers{}, - Scheme: c.Scheme(), - }, - } - - res, err := r.reconcileInfrastructure(ctx, tt.cluster) - g.Expect(res).To(BeComparableTo(tt.expectResult)) - if tt.expectErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - - if tt.check != nil { - tt.check(g, tt.cluster) - } - }) - } - }) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - t.Run("reconcile control plane ref", func(t *testing.T) { - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Namespace: "test-namespace", - }, - Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, - }, - Spec: clusterv1.ClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ - Host: "1.2.3.4", - Port: 8443, - }, - ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", - Kind: "GenericControlPlane", - Name: "test", + var c client.Client + if tt.infraRef != nil { + infraConfig := &unstructured.Unstructured{Object: tt.infraRef} + c = fake.NewClientBuilder(). + WithObjects(builder.GenericInfrastructureMachineCRD.DeepCopy(), tt.cluster, infraConfig). + Build() + } else { + c = fake.NewClientBuilder(). + WithObjects(builder.GenericInfrastructureMachineCRD.DeepCopy(), tt.cluster). + Build() + } + r := &Reconciler{ + Client: c, + recorder: record.NewFakeRecorder(32), + externalTracker: external.ObjectTracker{ + Controller: externalfake.Controller{}, + Cache: &informertest.FakeInformers{}, + Scheme: c.Scheme(), }, + } + + s := &scope{ + cluster: tt.cluster, + } + res, err := r.reconcileInfrastructure(ctx, s) + g.Expect(res).To(BeComparableTo(tt.expectResult)) + if tt.expectErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + if tt.check != nil { + tt.check(g, tt.cluster) + } + }) + } +} + +func TestClusterReconcileControlPlane(t *testing.T) { + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-namespace", + }, + Status: clusterv1.ClusterStatus{ + InfrastructureReady: true, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: "1.2.3.4", + Port: 8443, }, - } - clusterNoEndpoint := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - Namespace: "test-namespace", - }, - Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + ControlPlaneRef: &corev1.ObjectReference{ + APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", + Kind: "GenericControlPlane", + Name: "test", }, - Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", - Kind: "GenericControlPlane", - Name: "test", - }, + }, + } + clusterNoEndpoint := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: "test-namespace", + }, + Status: clusterv1.ClusterStatus{ + InfrastructureReady: true, + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneRef: &corev1.ObjectReference{ + APIVersion: "controlplane.cluster.x-k8s.io/v1beta1", + Kind: "GenericControlPlane", + Name: "test", }, - } + }, + } - tests := []struct { - name string - cluster *clusterv1.Cluster - cpRef map[string]interface{} - expectErr bool - expectResult ctrl.Result - check func(g *GomegaWithT, in *clusterv1.Cluster) - }{ - { - name: "returns no error if control plane ref is nil", - cluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: "test-namespace"}}, - expectErr: false, - }, - { - name: "requeues if unable to reconcile control plane ref", - cluster: cluster, - expectErr: false, - expectResult: ctrl.Result{RequeueAfter: 30 * time.Second}, - }, - { - name: "returns no error if control plane ref is marked for deletion", - cluster: cluster, - cpRef: map[string]interface{}{ - "kind": "GenericControlPlane", - "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", - "metadata": map[string]interface{}{ - "name": "test", - "namespace": "test-namespace", - "deletionTimestamp": "sometime", + tests := []struct { + name string + cluster *clusterv1.Cluster + cpRef map[string]interface{} + expectErr bool + expectResult ctrl.Result + check func(g *GomegaWithT, in *clusterv1.Cluster) + }{ + { + name: "returns no error if control plane ref is nil", + cluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "test-cluster", Namespace: "test-namespace"}}, + expectErr: false, + }, + { + name: "requeue if unable to get control plane ref and cluster did not yet reported control plane ready", + cluster: cluster.DeepCopy(), + expectErr: false, + expectResult: ctrl.Result{RequeueAfter: externalReadyWait}, + }, + { + name: "returns error if unable to get control plane ref and cluster already reported control plane ready", + cluster: func() *clusterv1.Cluster { + c := cluster.DeepCopy() + c.Status.ControlPlaneReady = true + return c + }(), + expectErr: true, + }, + { + name: "tolerate if unable to get control plane ref and cluster is deleting", + cluster: func() *clusterv1.Cluster { + c := cluster.DeepCopy() + c.Finalizers = append(c.Finalizers, "test") // Note: this is required to get fake client to accept objects with DeletionTimestamp. + c.DeletionTimestamp = &metav1.Time{Time: time.Now().Add(-1 * time.Hour)} + return c + }(), + expectErr: false, + expectResult: ctrl.Result{}, + }, + { + name: "returns no error if control plane ref is marked for deletion", + cluster: cluster.DeepCopy(), + cpRef: map[string]interface{}{ + "kind": "GenericControlPlane", + "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "deletionTimestamp": "sometime", + }, + }, + expectErr: false, + }, + { + name: "returns no error if control plane has the paused annotation", + cluster: cluster.DeepCopy(), + cpRef: map[string]interface{}{ + "kind": "GenericControlPlane", + "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "annotations": map[string]interface{}{ + "cluster.x-k8s.io/paused": "true", }, }, - expectErr: false, - }, - { - name: "returns no error if control plane has the paused annotation", - cluster: cluster, - cpRef: map[string]interface{}{ - "kind": "GenericControlPlane", - "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", - "metadata": map[string]interface{}{ - "name": "test", - "namespace": "test-namespace", - "annotations": map[string]interface{}{ - "cluster.x-k8s.io/paused": "true", - }, - }, + }, + expectErr: false, + }, + { + name: "returns no error if the control plane endpoint is not yet set", + cluster: clusterNoEndpoint.DeepCopy(), + cpRef: map[string]interface{}{ + "kind": "GenericControlPlane", + "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "deletionTimestamp": "sometime", }, - expectErr: false, - }, - { - name: "returns no error if the control plane endpoint is not yet set", - cluster: clusterNoEndpoint, - cpRef: map[string]interface{}{ - "kind": "GenericControlPlane", - "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", - "metadata": map[string]interface{}{ - "name": "test", - "namespace": "test-namespace", - "deletionTimestamp": "sometime", - }, - "status": map[string]interface{}{ - "ready": true, - }, + "status": map[string]interface{}{ + "ready": true, }, - expectErr: false, - }, - { - name: "should propagate the control plane endpoint if set", - cluster: clusterNoEndpoint, - cpRef: map[string]interface{}{ - "kind": "GenericControlPlane", - "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", - "metadata": map[string]interface{}{ - "name": "test", - "namespace": "test-namespace", - "deletionTimestamp": "sometime", - }, - "spec": map[string]interface{}{ - "controlPlaneEndpoint": map[string]interface{}{ - "host": "example.com", - "port": int64(6443), - }, - }, - "status": map[string]interface{}{ - "ready": true, + }, + expectErr: false, + }, + { + name: "should propagate the control plane endpoint if set", + cluster: clusterNoEndpoint.DeepCopy(), + cpRef: map[string]interface{}{ + "kind": "GenericControlPlane", + "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "deletionTimestamp": "sometime", + }, + "spec": map[string]interface{}{ + "controlPlaneEndpoint": map[string]interface{}{ + "host": "example.com", + "port": int64(6443), }, }, - expectErr: false, - check: func(g *GomegaWithT, in *clusterv1.Cluster) { - g.Expect(in.Spec.ControlPlaneEndpoint.Host).To(Equal("example.com")) - g.Expect(in.Spec.ControlPlaneEndpoint.Port).To(BeEquivalentTo(6443)) - }, - }, - { - name: "should propagate the initialized and ready conditions", - cluster: clusterNoEndpoint, - cpRef: map[string]interface{}{ - "kind": "GenericControlPlane", - "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", - "metadata": map[string]interface{}{ - "name": "test", - "namespace": "test-namespace", - "deletionTimestamp": "sometime", + "status": map[string]interface{}{ + "ready": true, + }, + }, + expectErr: false, + check: func(g *GomegaWithT, in *clusterv1.Cluster) { + g.Expect(in.Spec.ControlPlaneEndpoint.Host).To(Equal("example.com")) + g.Expect(in.Spec.ControlPlaneEndpoint.Port).To(BeEquivalentTo(6443)) + }, + }, + { + name: "should propagate the initialized and ready conditions", + cluster: clusterNoEndpoint.DeepCopy(), + cpRef: map[string]interface{}{ + "kind": "GenericControlPlane", + "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "deletionTimestamp": "sometime", + }, + "spec": map[string]interface{}{}, + "status": map[string]interface{}{ + "ready": true, + "initialized": true, + }, + }, + expectErr: false, + check: func(g *GomegaWithT, in *clusterv1.Cluster) { + g.Expect(conditions.IsTrue(in, clusterv1.ControlPlaneReadyCondition)).To(BeTrue()) + g.Expect(conditions.IsTrue(in, clusterv1.ControlPlaneInitializedCondition)).To(BeTrue()) + }, + }, + { + name: "do not allows to change control plane ready and control plane endpoint once set", + cluster: func() *clusterv1.Cluster { + c := cluster.DeepCopy() + c.Status.ControlPlaneReady = true + return c + }(), + cpRef: map[string]interface{}{ + "kind": "GenericControlPlane", + "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "deletionTimestamp": "sometime", + }, + "spec": map[string]interface{}{ + "controlPlaneEndpoint": map[string]interface{}{ + "host": "example.com", + "port": int64(6443), }, - "spec": map[string]interface{}{}, - "status": map[string]interface{}{ - "ready": true, - "initialized": true, + }, + "status": map[string]interface{}{ + "ready": false, + "initialized": false, + }, + }, + expectErr: false, + check: func(g *GomegaWithT, in *clusterv1.Cluster) { + g.Expect(in.Spec.ControlPlaneEndpoint.Host).To(Equal("1.2.3.4")) + g.Expect(in.Spec.ControlPlaneEndpoint.Port).To(BeEquivalentTo(8443)) + g.Expect(in.Status.ControlPlaneReady).To(BeTrue()) + }, + }, + { + name: "do not reconcile if control plane is marked for deletion", + cluster: clusterNoEndpoint.DeepCopy(), + cpRef: map[string]interface{}{ + "kind": "GenericControlPlane", + "apiVersion": "controlplane.cluster.x-k8s.io/v1beta1", + "metadata": map[string]interface{}{ + "name": "test", + "namespace": "test-namespace", + "finalizers": []interface{}{"test"}, + "deletionTimestamp": "2020-01-01T00:00:00Z", + }, + "spec": map[string]interface{}{ + "controlPlaneEndpoint": map[string]interface{}{ + "host": "example.com", + "port": int64(6443), }, }, - expectErr: false, - check: func(g *GomegaWithT, in *clusterv1.Cluster) { - g.Expect(conditions.IsTrue(in, clusterv1.ControlPlaneReadyCondition)).To(BeTrue()) - g.Expect(conditions.IsTrue(in, clusterv1.ControlPlaneInitializedCondition)).To(BeTrue()) + "status": map[string]interface{}{ + "ready": true, + "initialized": true, }, }, - } + expectErr: false, + check: func(g *GomegaWithT, in *clusterv1.Cluster) { + g.Expect(in.Spec.ControlPlaneEndpoint.Host).To(Equal("")) + g.Expect(in.Spec.ControlPlaneEndpoint.Port).To(BeEquivalentTo(0)) + g.Expect(in.Status.ControlPlaneReady).To(BeFalse()) + }, + }, + } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) - - var c client.Client - if tt.cpRef != nil { - cpConfig := &unstructured.Unstructured{Object: tt.cpRef} - c = fake.NewClientBuilder(). - WithObjects(builder.GenericControlPlaneCRD.DeepCopy(), tt.cluster, cpConfig). - Build() - } else { - c = fake.NewClientBuilder(). - WithObjects(builder.GenericControlPlaneCRD.DeepCopy(), tt.cluster). - Build() - } - r := &Reconciler{ - Client: c, - recorder: record.NewFakeRecorder(32), - externalTracker: external.ObjectTracker{ - Controller: externalfake.Controller{}, - Cache: &informertest.FakeInformers{}, - Scheme: c.Scheme(), - }, - } - - res, err := r.reconcileControlPlane(ctx, tt.cluster) - g.Expect(res).To(BeComparableTo(tt.expectResult)) - if tt.expectErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - - if tt.check != nil { - tt.check(g, tt.cluster) - } - }) - } - }) + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - t.Run("reconcile kubeconfig", func(t *testing.T) { - cluster := &clusterv1.Cluster{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster", - }, - Spec: clusterv1.ClusterSpec{ - ControlPlaneEndpoint: clusterv1.APIEndpoint{ - Host: "1.2.3.4", - Port: 8443, + var c client.Client + if tt.cpRef != nil { + cpConfig := &unstructured.Unstructured{Object: tt.cpRef} + c = fake.NewClientBuilder(). + WithObjects(builder.GenericControlPlaneCRD.DeepCopy(), tt.cluster, cpConfig). + Build() + } else { + c = fake.NewClientBuilder(). + WithObjects(builder.GenericControlPlaneCRD.DeepCopy(), tt.cluster). + Build() + } + r := &Reconciler{ + Client: c, + recorder: record.NewFakeRecorder(32), + externalTracker: external.ObjectTracker{ + Controller: externalfake.Controller{}, + Cache: &informertest.FakeInformers{}, + Scheme: c.Scheme(), }, + } + + s := &scope{ + cluster: tt.cluster, + } + res, err := r.reconcileControlPlane(ctx, s) + g.Expect(res).To(BeComparableTo(tt.expectResult)) + if tt.expectErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + if tt.check != nil { + tt.check(g, tt.cluster) + } + }) + } +} + +func TestClusterReconcileKubeConfig(t *testing.T) { + cluster := &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + }, + Spec: clusterv1.ClusterSpec{ + ControlPlaneEndpoint: clusterv1.APIEndpoint{ + Host: "1.2.3.4", + Port: 8443, }, - } + }, + } - tests := []struct { - name string - cluster *clusterv1.Cluster - secret *corev1.Secret - wantErr bool - wantRequeue bool - }{ - { - name: "cluster not provisioned, apiEndpoint is not set", - cluster: &clusterv1.Cluster{}, - wantErr: false, - }, - { - name: "kubeconfig secret found", - cluster: cluster, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster-kubeconfig", - }, + tests := []struct { + name string + cluster *clusterv1.Cluster + secret *corev1.Secret + wantErr bool + wantRequeue bool + }{ + { + name: "cluster not provisioned, apiEndpoint is not set", + cluster: &clusterv1.Cluster{}, + wantErr: false, + }, + { + name: "kubeconfig secret found", + cluster: cluster, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-kubeconfig", }, - wantErr: false, }, - { - name: "kubeconfig secret not found, should requeue", - cluster: cluster, - wantErr: false, - wantRequeue: true, - }, - { - name: "invalid ca secret, should return error", - cluster: cluster, - secret: &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cluster-ca", - }, + wantErr: false, + }, + { + name: "kubeconfig secret not found, should requeue", + cluster: cluster, + wantErr: false, + wantRequeue: true, + }, + { + name: "invalid ca secret, should return error", + cluster: cluster, + secret: &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster-ca", }, - wantErr: true, }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - g := NewWithT(t) + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + g := NewWithT(t) - c := fake.NewClientBuilder(). - WithObjects(tt.cluster). + c := fake.NewClientBuilder(). + WithObjects(tt.cluster). + Build() + if tt.secret != nil { + c = fake.NewClientBuilder(). + WithObjects(tt.cluster, tt.secret). Build() - if tt.secret != nil { - c = fake.NewClientBuilder(). - WithObjects(tt.cluster, tt.secret). - Build() - } - r := &Reconciler{ - Client: c, - recorder: record.NewFakeRecorder(32), - } - res, err := r.reconcileKubeconfig(ctx, tt.cluster) - if tt.wantErr { - g.Expect(err).To(HaveOccurred()) - } else { - g.Expect(err).ToNot(HaveOccurred()) - } - - if tt.wantRequeue { - g.Expect(res.RequeueAfter).To(BeNumerically(">=", 0)) - } - }) - } - }) + } + r := &Reconciler{ + Client: c, + recorder: record.NewFakeRecorder(32), + } + + s := &scope{ + cluster: tt.cluster, + } + res, err := r.reconcileKubeconfig(ctx, s) + if tt.wantErr { + g.Expect(err).To(HaveOccurred()) + } else { + g.Expect(err).ToNot(HaveOccurred()) + } + + if tt.wantRequeue { + g.Expect(res.RequeueAfter).To(BeNumerically(">=", 0)) + } + }) + } } func TestClusterReconciler_reconcilePhase(t *testing.T) { @@ -788,7 +930,10 @@ func TestClusterReconcilePhases_reconcileFailureDomains(t *testing.T) { }, } - _, err := r.reconcileInfrastructure(ctx, tt.cluster) + s := &scope{ + cluster: tt.cluster, + } + _, err := r.reconcileInfrastructure(ctx, s) g.Expect(err).ToNot(HaveOccurred()) g.Expect(tt.cluster.Status.FailureDomains).To(BeEquivalentTo(tt.expectFailureDomains)) }) diff --git a/internal/controllers/cluster/cluster_controller_test.go b/internal/controllers/cluster/cluster_controller_test.go index b43903dd8ad4..02e02bfe2736 100644 --- a/internal/controllers/cluster/cluster_controller_test.go +++ b/internal/controllers/cluster/cluster_controller_test.go @@ -23,6 +23,7 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/tools/record" utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" @@ -458,9 +459,15 @@ func TestClusterReconciler_reconcileDelete(t *testing.T) { r := &Reconciler{ Client: fakeClient, APIReader: fakeClient, + recorder: record.NewFakeRecorder(1), } - _, _ = r.reconcileDelete(ctx, tt.cluster) + s := &scope{ + cluster: tt.cluster, + infraCluster: fakeInfraCluster, + getDescendantsSucceeded: true, + } + _, _ = r.reconcileDelete(ctx, s) infraCluster := builder.InfrastructureCluster("", "").Build() err := fakeClient.Get(ctx, client.ObjectKeyFromObject(fakeInfraCluster), infraCluster) g.Expect(apierrors.IsNotFound(err)).To(Equal(tt.wantDelete)) @@ -713,7 +720,6 @@ func (b *machinePoolBuilder) build() expv1.MachinePool { func TestFilterOwnedDescendants(t *testing.T) { _ = feature.MutableGates.Set("MachinePool=true") - g := NewWithT(t) c := clusterv1.Cluster{ TypeMeta: metav1.TypeMeta{ @@ -788,28 +794,55 @@ func TestFilterOwnedDescendants(t *testing.T) { }, } - actual, err := d.filterOwnedDescendants(&c) - g.Expect(err).ToNot(HaveOccurred()) + t.Run("Without a control plane object", func(t *testing.T) { + g := NewWithT(t) - expected := []client.Object{ - &mp2OwnedByCluster, - &mp4OwnedByCluster, - &md2OwnedByCluster, - &md4OwnedByCluster, - &ms2OwnedByCluster, - &ms4OwnedByCluster, - &m2OwnedByCluster, - &m5OwnedByCluster, - &m3ControlPlaneOwnedByCluster, - &m6ControlPlaneOwnedByCluster, - } + actual, err := d.filterOwnedDescendants(&c) + g.Expect(err).ToNot(HaveOccurred()) + + expected := []client.Object{ + &mp2OwnedByCluster, + &mp4OwnedByCluster, + &md2OwnedByCluster, + &md4OwnedByCluster, + &ms2OwnedByCluster, + &ms4OwnedByCluster, + &m2OwnedByCluster, + &m5OwnedByCluster, + &m3ControlPlaneOwnedByCluster, + &m6ControlPlaneOwnedByCluster, + } - g.Expect(actual).To(BeComparableTo(expected)) -} + g.Expect(actual).To(Equal(expected)) + }) -func TestDescendantsLength(t *testing.T) { - g := NewWithT(t) + t.Run("With a control plane object", func(t *testing.T) { + g := NewWithT(t) + + cWithCP := c.DeepCopy() + cWithCP.Spec.ControlPlaneRef = &corev1.ObjectReference{ + Kind: "SomeKind", + } + + actual, err := d.filterOwnedDescendants(cWithCP) + g.Expect(err).ToNot(HaveOccurred()) + + expected := []client.Object{ + &mp2OwnedByCluster, + &mp4OwnedByCluster, + &md2OwnedByCluster, + &md4OwnedByCluster, + &ms2OwnedByCluster, + &ms4OwnedByCluster, + &m2OwnedByCluster, + &m5OwnedByCluster, + } + + g.Expect(actual).To(Equal(expected)) + }) +} +func TestObjectsPendingDelete(t *testing.T) { d := clusterDescendants{ machineDeployments: clusterv1.MachineDeploymentList{ Items: []clusterv1.MachineDeployment{ @@ -848,7 +881,21 @@ func TestDescendantsLength(t *testing.T) { }, } - g.Expect(d.length()).To(Equal(15)) + t.Run("Without a control plane object", func(t *testing.T) { + g := NewWithT(t) + + c := &clusterv1.Cluster{} + g.Expect(d.objectsPendingDeleteCount(c)).To(Equal(15)) + g.Expect(d.objectsPendingDeleteNames(c)).To(Equal("Control plane machines: m1,m2,m3; Machine deployments: md1; Machine sets: ms1,ms2; Machine pools: mp1,mp2,mp3,mp4,mp5; Worker machines: m3,m4,m5,m6")) + }) + + t.Run("With a control plane object", func(t *testing.T) { + g := NewWithT(t) + + c := &clusterv1.Cluster{Spec: clusterv1.ClusterSpec{ControlPlaneRef: &corev1.ObjectReference{Kind: "SomeKind"}}} + g.Expect(d.objectsPendingDeleteCount(c)).To(Equal(12)) + g.Expect(d.objectsPendingDeleteNames(c)).To(Equal("Machine deployments: md1; Machine sets: ms1,ms2; Machine pools: mp1,mp2,mp3,mp4,mp5; Worker machines: m3,m4,m5,m6")) + }) } func TestReconcileControlPlaneInitializedControlPlaneRef(t *testing.T) { @@ -868,7 +915,11 @@ func TestReconcileControlPlaneInitializedControlPlaneRef(t *testing.T) { } r := &Reconciler{} - res, err := r.reconcileControlPlaneInitialized(ctx, c) + + s := &scope{ + cluster: c, + } + res, err := r.reconcileControlPlaneInitialized(ctx, s) g.Expect(res.IsZero()).To(BeTrue()) g.Expect(err).ToNot(HaveOccurred()) g.Expect(conditions.Has(c, clusterv1.ControlPlaneInitializedCondition)).To(BeFalse()) diff --git a/internal/controllers/machine/machine_controller_phases.go b/internal/controllers/machine/machine_controller_phases.go index ffe24b8f340b..f7e53d0ce232 100644 --- a/internal/controllers/machine/machine_controller_phases.go +++ b/internal/controllers/machine/machine_controller_phases.go @@ -32,7 +32,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/reconcile" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" @@ -228,7 +227,7 @@ func (r *Reconciler) reconcileInfrastructure(ctx context.Context, s *scope) (ctr m.Status.FailureReason = ptr.To(capierrors.InvalidConfigurationMachineError) m.Status.FailureMessage = ptr.To(fmt.Sprintf("Machine infrastructure resource %v with name %q has been deleted after being ready", m.Spec.InfrastructureRef.GroupVersionKind(), m.Spec.InfrastructureRef.Name)) - return ctrl.Result{}, reconcile.TerminalError(errors.Errorf("could not find %v %q for Machine %q in namespace %q", m.Spec.InfrastructureRef.GroupVersionKind().String(), m.Spec.InfrastructureRef.Name, m.Name, m.Namespace)) + return ctrl.Result{}, errors.Errorf("could not find %v %q for Machine %q in namespace %q", m.Spec.InfrastructureRef.GroupVersionKind().String(), m.Spec.InfrastructureRef.Name, m.Name, m.Namespace) } log.Info("Could not find infrastructure machine, requeuing", m.Spec.InfrastructureRef.Kind, klog.KRef(m.Spec.InfrastructureRef.Namespace, m.Spec.InfrastructureRef.Name)) return ctrl.Result{RequeueAfter: externalReadyWait}, nil