diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index b600e6e56dcb..5e8898ef92ae 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -164,12 +164,6 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, nil } - // In case the object is deleted, the managed topology stops to reconcile; - // (the other controllers will take care of deletion). - if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, cluster) - } - patchHelper, err := patch.NewHelper(cluster, r.Client) if err != nil { return ctrl.Result{}, err @@ -196,6 +190,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re } }() + // In case the object is deleted, the managed topology stops to reconcile; + // (the other controllers will take care of deletion). + if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, cluster) + } + // Handle normal reconciliation loop. return r.reconcile(ctx, s) } diff --git a/internal/controllers/topology/cluster/conditions.go b/internal/controllers/topology/cluster/conditions.go index c32e7c2a8dae..033a82feba0a 100644 --- a/internal/controllers/topology/cluster/conditions.go +++ b/internal/controllers/topology/cluster/conditions.go @@ -43,6 +43,19 @@ func (r *Reconciler) reconcileConditions(s *scope.Scope, cluster *clusterv1.Clus // In such a case, since some of the component's spec would be adrift from the topology the // topology cannot be considered fully reconciled. func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluster *clusterv1.Cluster, reconcileErr error) error { + // Mark TopologyReconciled as false due to cluster deletion. + if !cluster.ObjectMeta.DeletionTimestamp.IsZero() { + conditions.Set( + cluster, + conditions.FalseCondition( + clusterv1.TopologyReconciledCondition, + clusterv1.DeletedReason, + clusterv1.ConditionSeverityInfo, + "", + ), + ) + return nil + } // If an error occurred during reconciliation set the TopologyReconciled condition to false. // Add the error message from the reconcile function to the message of the condition. if reconcileErr != nil { diff --git a/internal/controllers/topology/cluster/conditions_test.go b/internal/controllers/topology/cluster/conditions_test.go index ffb5837e86f1..bd3776f23aba 100644 --- a/internal/controllers/topology/cluster/conditions_test.go +++ b/internal/controllers/topology/cluster/conditions_test.go @@ -39,6 +39,7 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { scheme := runtime.NewScheme() g.Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) + deletionTime := metav1.Unix(0, 0) tests := []struct { name string reconcileErr error @@ -585,6 +586,17 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { }, wantConditionStatus: corev1.ConditionTrue, }, + { + name: "should set the TopologyReconciledCondition to False if the cluster has been deleted", + cluster: &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: &deletionTime, + }, + }, + wantConditionStatus: corev1.ConditionFalse, + wantConditionReason: clusterv1.DeletedReason, + wantConditionMessage: "", + }, } for _, tt := range tests {