diff --git a/api/v1beta1/condition_consts.go b/api/v1beta1/condition_consts.go index 5c100e779cfb..4a34f3c9bf37 100644 --- a/api/v1beta1/condition_consts.go +++ b/api/v1beta1/condition_consts.go @@ -276,6 +276,11 @@ const ( // TopologyReconciledHookBlockingReason (Severity=Info) documents reconciliation of a Cluster topology // not yet completed because at least one of the lifecycle hooks is blocking. TopologyReconciledHookBlockingReason = "LifecycleHookBlocking" + + // TopologyReconciledClusterClassNotReconciledReason (Severity=Info) documents reconciliation of a Cluster topology not + // yet completed because the ClusterClass has not reconciled yet. If this condition persists there may be an issue + // with the ClusterClass surfaced in the ClusterClass status or controller logs. + TopologyReconciledClusterClassNotReconciledReason = "ClusterClassNotReconciled" ) // Conditions and condition reasons for ClusterClass. diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index d49636d1257d..f47e3b5e7cdb 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -175,26 +175,9 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re return ctrl.Result{}, err } - // Get ClusterClass. - clusterClass := &clusterv1.ClusterClass{} - key := client.ObjectKey{Name: cluster.Spec.Topology.Class, Namespace: cluster.Namespace} - if err := r.Client.Get(ctx, key, clusterClass); err != nil { - return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve ClusterClass %s", cluster.Spec.Topology.Class) - } - // Default and Validate the Cluster based on information from the ClusterClass. - // This step is needed as if the ClusterClass does not exist at Cluster creation some fields may not be defaulted or - // validated in the webhook. - if errs := webhooks.DefaultVariables(cluster, clusterClass); len(errs) > 0 { - return ctrl.Result{}, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("Cluster").GroupKind(), cluster.Name, errs) - } - if errs := webhooks.ValidateClusterForClusterClass(cluster, clusterClass, field.NewPath("spec", "topology")); len(errs) > 0 { - return ctrl.Result{}, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("Cluster").GroupKind(), cluster.Name, errs) - } - // Create a scope initialized with only the cluster; during reconcile // additional information will be added about the Cluster blueprint, current state and desired state. s := scope.New(cluster) - s.Blueprint.ClusterClass = clusterClass defer func() { if err := r.reconcileConditions(s, cluster, reterr); err != nil { @@ -221,6 +204,31 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (_ ctrl.Re func (r *Reconciler) reconcile(ctx context.Context, s *scope.Scope) (ctrl.Result, error) { var err error + // Get ClusterClass. + clusterClass := &clusterv1.ClusterClass{} + key := client.ObjectKey{Name: s.Current.Cluster.Spec.Topology.Class, Namespace: s.Current.Cluster.Namespace} + if err := r.Client.Get(ctx, key, clusterClass); err != nil { + return ctrl.Result{}, errors.Wrapf(err, "failed to retrieve ClusterClass %s", s.Current.Cluster.Spec.Topology.Class) + } + + s.Blueprint.ClusterClass = clusterClass + // If the ClusterClass `metadata.Generation` doesn't match the `status.ObservedGeneration` return as the ClusterClass + // is not up to date. + // Note: This doesn't require requeue as a change to ClusterClass observedGeneration will cause an additional reconcile + // in the Cluster. + if clusterClass.GetGeneration() != clusterClass.Status.ObservedGeneration { + return ctrl.Result{}, nil + } + + // Default and Validate the Cluster based on information from the ClusterClass. + // This step is needed as if the ClusterClass does not exist at Cluster creation some fields may not be defaulted or + // validated in the webhook. + if errs := webhooks.DefaultVariables(s.Current.Cluster, clusterClass); len(errs) > 0 { + return ctrl.Result{}, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("Cluster").GroupKind(), s.Current.Cluster.Name, errs) + } + if errs := webhooks.ValidateClusterForClusterClass(s.Current.Cluster, clusterClass, field.NewPath("spec", "topology")); len(errs) > 0 { + return ctrl.Result{}, apierrors.NewInvalid(clusterv1.GroupVersion.WithKind("Cluster").GroupKind(), s.Current.Cluster.Name, errs) + } // Gets the blueprint with the ClusterClass and the referenced templates // and store it in the request scope. s.Blueprint, err = r.getBlueprint(ctx, s.Current.Cluster, s.Blueprint.ClusterClass) diff --git a/internal/controllers/topology/cluster/conditions.go b/internal/controllers/topology/cluster/conditions.go index 43c7169aeeaf..98cc96f53f5c 100644 --- a/internal/controllers/topology/cluster/conditions.go +++ b/internal/controllers/topology/cluster/conditions.go @@ -37,6 +37,7 @@ func (r *Reconciler) reconcileConditions(s *scope.Scope, cluster *clusterv1.Clus // cluster are in sync with the topology defined in the cluster. // The condition is false under the following conditions: // - An error occurred during the reconcile process of the cluster topology. +// - The ClusterClass has not been successfully reconciled with its current spec. // - The cluster upgrade has not yet propagated to all the components of the cluster. // - For a managed topology cluster the version upgrade is propagated one component at a time. // In such a case, since some of the component's spec would be adrift from the topology the @@ -58,6 +59,22 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste return nil } + // If the ClusterClass `metadata.Generation` doesn't match the `status.ObservedGeneration` requeue as the ClusterClass + // is not up to date. + if s.Blueprint != nil && s.Blueprint.ClusterClass != nil && + s.Blueprint.ClusterClass.GetGeneration() != s.Blueprint.ClusterClass.Status.ObservedGeneration { + conditions.Set( + cluster, + conditions.FalseCondition( + clusterv1.TopologyReconciledCondition, + clusterv1.TopologyReconciledClusterClassNotReconciledReason, + clusterv1.ConditionSeverityInfo, + "ClusterClass is outdated. If this condition persists please check ClusterClass status.", + ), + ) + return nil + } + // If any of the lifecycle hooks are blocking any part of the reconciliation then topology // is not considered as fully reconciled. if s.HookResponseTracker.AggregateRetryAfter() != 0 { diff --git a/internal/controllers/topology/cluster/conditions_test.go b/internal/controllers/topology/cluster/conditions_test.go index dbd6d9cce998..ec3a3489a726 100644 --- a/internal/controllers/topology/cluster/conditions_test.go +++ b/internal/controllers/topology/cluster/conditions_test.go @@ -22,6 +22,7 @@ import ( . "github.com/onsi/gomega" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" @@ -48,6 +49,26 @@ func TestReconcileTopologyReconciledCondition(t *testing.T) { wantConditionReason: clusterv1.TopologyReconcileFailedReason, wantErr: false, }, + { + name: "should set the condition to false if the ClusterClass is out of date", + cluster: &clusterv1.Cluster{}, + s: &scope.Scope{ + Blueprint: &scope.ClusterBlueprint{ + ClusterClass: &clusterv1.ClusterClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: "class1", + Generation: 10, + }, + Status: clusterv1.ClusterClassStatus{ + ObservedGeneration: 999, + }, + }, + }, + }, + wantConditionStatus: corev1.ConditionFalse, + wantConditionReason: clusterv1.TopologyReconciledClusterClassNotReconciledReason, + wantErr: false, + }, { name: "should set the condition to false if the there is a blocking hook", reconcileErr: nil,