diff --git a/api/v1alpha3/conversion.go b/api/v1alpha3/conversion.go index f8129ac1f299..3d890553c90c 100644 --- a/api/v1alpha3/conversion.go +++ b/api/v1alpha3/conversion.go @@ -19,6 +19,7 @@ package v1alpha3 import ( apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/cluster-api/api/v1alpha4" + "sigs.k8s.io/cluster-api/util/conditions" utilconversion "sigs.k8s.io/cluster-api/util/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" ) @@ -26,13 +27,32 @@ import ( func (src *Cluster) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*v1alpha4.Cluster) - return Convert_v1alpha3_Cluster_To_v1alpha4_Cluster(src, dst, nil) + if err := Convert_v1alpha3_Cluster_To_v1alpha4_Cluster(src, dst, nil); err != nil { + return err + } + + // Given this is a bool and there is no timestamp associated with it, when this condition is set, its timestamp + // will be "now". See https://github.com/kubernetes-sigs/cluster-api/issues/3798#issuecomment-708619826 for more + // discussion. + if src.Status.ControlPlaneInitialized { + conditions.MarkTrue(dst, v1alpha4.ControlPlaneInitializedCondition) + } + + return nil } func (dst *Cluster) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*v1alpha4.Cluster) - return Convert_v1alpha4_Cluster_To_v1alpha3_Cluster(src, dst, nil) + if err := Convert_v1alpha4_Cluster_To_v1alpha3_Cluster(src, dst, nil); err != nil { + return err + } + + if conditions.IsTrue(src, v1alpha4.ControlPlaneInitializedCondition) { + dst.Status.ControlPlaneInitialized = true + } + + return nil } func (src *ClusterList) ConvertTo(dstRaw conversion.Hub) error { @@ -174,10 +194,14 @@ func (dst *MachineHealthCheckList) ConvertFrom(srcRaw conversion.Hub) error { } // Convert_v1alpha3_Bootstrap_To_v1alpha4_Bootstrap is an autogenerated conversion function. -func Convert_v1alpha3_Bootstrap_To_v1alpha4_Bootstrap(in *Bootstrap, out *v1alpha4.Bootstrap, s apiconversion.Scope) error { //nolint +func Convert_v1alpha3_Bootstrap_To_v1alpha4_Bootstrap(in *Bootstrap, out *v1alpha4.Bootstrap, s apiconversion.Scope) error { // nolint return autoConvert_v1alpha3_Bootstrap_To_v1alpha4_Bootstrap(in, out, s) } func Convert_v1alpha4_MachineRollingUpdateDeployment_To_v1alpha3_MachineRollingUpdateDeployment(in *v1alpha4.MachineRollingUpdateDeployment, out *MachineRollingUpdateDeployment, s apiconversion.Scope) error { return autoConvert_v1alpha4_MachineRollingUpdateDeployment_To_v1alpha3_MachineRollingUpdateDeployment(in, out, s) } + +func Convert_v1alpha3_ClusterStatus_To_v1alpha4_ClusterStatus(in *ClusterStatus, out *v1alpha4.ClusterStatus, s apiconversion.Scope) error { + return autoConvert_v1alpha3_ClusterStatus_To_v1alpha4_ClusterStatus(in, out, s) +} diff --git a/api/v1alpha3/zz_generated.conversion.go b/api/v1alpha3/zz_generated.conversion.go index 0f00332f99b7..6243deb915c9 100644 --- a/api/v1alpha3/zz_generated.conversion.go +++ b/api/v1alpha3/zz_generated.conversion.go @@ -94,11 +94,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*ClusterStatus)(nil), (*v1alpha4.ClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha3_ClusterStatus_To_v1alpha4_ClusterStatus(a.(*ClusterStatus), b.(*v1alpha4.ClusterStatus), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1alpha4.ClusterStatus)(nil), (*ClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_ClusterStatus_To_v1alpha3_ClusterStatus(a.(*v1alpha4.ClusterStatus), b.(*ClusterStatus), scope) }); err != nil { @@ -354,6 +349,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*ClusterStatus)(nil), (*v1alpha4.ClusterStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha3_ClusterStatus_To_v1alpha4_ClusterStatus(a.(*ClusterStatus), b.(*v1alpha4.ClusterStatus), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1alpha4.MachineRollingUpdateDeployment)(nil), (*MachineRollingUpdateDeployment)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_MachineRollingUpdateDeployment_To_v1alpha3_MachineRollingUpdateDeployment(a.(*v1alpha4.MachineRollingUpdateDeployment), b.(*MachineRollingUpdateDeployment), scope) }); err != nil { @@ -436,7 +436,17 @@ func Convert_v1alpha4_Cluster_To_v1alpha3_Cluster(in *v1alpha4.Cluster, out *Clu func autoConvert_v1alpha3_ClusterList_To_v1alpha4_ClusterList(in *ClusterList, out *v1alpha4.ClusterList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha4.Cluster)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1alpha4.Cluster, len(*in)) + for i := range *in { + if err := Convert_v1alpha3_Cluster_To_v1alpha4_Cluster(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -447,7 +457,17 @@ func Convert_v1alpha3_ClusterList_To_v1alpha4_ClusterList(in *ClusterList, out * func autoConvert_v1alpha4_ClusterList_To_v1alpha3_ClusterList(in *v1alpha4.ClusterList, out *ClusterList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]Cluster)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cluster, len(*in)) + for i := range *in { + if err := Convert_v1alpha4_Cluster_To_v1alpha3_Cluster(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -520,25 +540,19 @@ func autoConvert_v1alpha3_ClusterStatus_To_v1alpha4_ClusterStatus(in *ClusterSta out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) out.Phase = in.Phase out.InfrastructureReady = in.InfrastructureReady - out.ControlPlaneInitialized = in.ControlPlaneInitialized + // WARNING: in.ControlPlaneInitialized requires manual conversion: does not exist in peer-type out.ControlPlaneReady = in.ControlPlaneReady out.Conditions = *(*v1alpha4.Conditions)(unsafe.Pointer(&in.Conditions)) out.ObservedGeneration = in.ObservedGeneration return nil } -// Convert_v1alpha3_ClusterStatus_To_v1alpha4_ClusterStatus is an autogenerated conversion function. -func Convert_v1alpha3_ClusterStatus_To_v1alpha4_ClusterStatus(in *ClusterStatus, out *v1alpha4.ClusterStatus, s conversion.Scope) error { - return autoConvert_v1alpha3_ClusterStatus_To_v1alpha4_ClusterStatus(in, out, s) -} - func autoConvert_v1alpha4_ClusterStatus_To_v1alpha3_ClusterStatus(in *v1alpha4.ClusterStatus, out *ClusterStatus, s conversion.Scope) error { out.FailureDomains = *(*FailureDomains)(unsafe.Pointer(&in.FailureDomains)) out.FailureReason = (*errors.ClusterStatusError)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) out.Phase = in.Phase out.InfrastructureReady = in.InfrastructureReady - out.ControlPlaneInitialized = in.ControlPlaneInitialized out.ControlPlaneReady = in.ControlPlaneReady out.Conditions = *(*Conditions)(unsafe.Pointer(&in.Conditions)) out.ObservedGeneration = in.ObservedGeneration diff --git a/api/v1alpha4/cluster_types.go b/api/v1alpha4/cluster_types.go index b41a77c1e30e..51d1893a2b06 100644 --- a/api/v1alpha4/cluster_types.go +++ b/api/v1alpha4/cluster_types.go @@ -128,10 +128,6 @@ type ClusterStatus struct { // +optional InfrastructureReady bool `json:"infrastructureReady"` - // ControlPlaneInitialized defines if the control plane has been initialized. - // +optional - ControlPlaneInitialized bool `json:"controlPlaneInitialized"` - // ControlPlaneReady defines if the control plane is ready. // +optional ControlPlaneReady bool `json:"controlPlaneReady,omitempty"` diff --git a/api/v1alpha4/condition_consts.go b/api/v1alpha4/condition_consts.go index 455d89893ff0..cd2f3898842e 100644 --- a/api/v1alpha4/condition_consts.go +++ b/api/v1alpha4/condition_consts.go @@ -65,6 +65,10 @@ const ( // its node reference populated. MissingNodeRefReason = "MissingNodeRef" + // WaitingForControlPlaneProviderInitializedReason (Severity=Info) documents a cluster waiting for the control plane + // provider to report successful control plane initialization. + WaitingForControlPlaneProviderInitializedReason = "WaitingForControlPlaneProviderInitialized" + // ControlPlaneReady reports the ready condition from the control plane object defined for this cluster. // This condition is mirrored from the Ready condition in the control plane ref object, and // the absence of this condition might signal problems in the reconcile external loops or the fact that diff --git a/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go index 781f38ac7d71..176384b1d8d7 100644 --- a/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go @@ -242,7 +242,8 @@ func (r *KubeadmConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } - if !cluster.Status.ControlPlaneInitialized { + // Note: can't use IsFalse here because we need to handle the absence of the condition as well as false. + if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { return r.handleClusterNotInitialized(ctx, scope) } diff --git a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go index 06135ceb2bc3..3a4bba31f21d 100644 --- a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go @@ -21,7 +21,6 @@ import ( "context" "fmt" "reflect" - "sigs.k8s.io/cluster-api/util/patch" "testing" "time" @@ -41,6 +40,7 @@ import ( "sigs.k8s.io/cluster-api/test/helpers" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/secret" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -423,7 +423,7 @@ func TestKubeadmConfigReconciler_Reconcile_ErrorIfJoiningControlPlaneHasInvalidC // TODO: extract this kind of code into a setup function that puts the state of objects into an initialized controlplane (implies secrets exist) cluster := newCluster("cluster") cluster.Status.InfrastructureReady = true - cluster.Status.ControlPlaneInitialized = true + conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") controlPlaneInitConfig := newControlPlaneInitKubeadmConfig(controlPlaneInitMachine, "control-plane-init-cfg") @@ -462,7 +462,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp cluster := newCluster("cluster") cluster.Status.InfrastructureReady = true - cluster.Status.ControlPlaneInitialized = true + conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") controlPlaneInitConfig := newControlPlaneInitKubeadmConfig(controlPlaneInitMachine, "control-plane-init-cfg") @@ -498,7 +498,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueIfControlPlaneIsMissingAPIEndp func TestReconcileIfJoinNodesAndControlPlaneIsReady(t *testing.T) { cluster := newCluster("cluster") cluster.Status.InfrastructureReady = true - cluster.Status.ControlPlaneInitialized = true + conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} var useCases = []struct { @@ -587,7 +587,7 @@ func TestReconcileIfJoinNodePoolsAndControlPlaneIsReady(t *testing.T) { cluster := newCluster("cluster") cluster.Status.InfrastructureReady = true - cluster.Status.ControlPlaneInitialized = true + conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} var useCases = []struct { @@ -666,7 +666,7 @@ func TestKubeadmConfigSecretCreatedStatusNotPatched(t *testing.T) { cluster := newCluster("cluster") cluster.Status.InfrastructureReady = true - cluster.Status.ControlPlaneInitialized = true + conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -735,7 +735,7 @@ func TestBootstrapTokenTTLExtension(t *testing.T) { cluster := newCluster("cluster") cluster.Status.InfrastructureReady = true - cluster.Status.ControlPlaneInitialized = true + conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -887,7 +887,7 @@ func TestBootstrapTokenRotationMachinePool(t *testing.T) { cluster := newCluster("cluster") cluster.Status.InfrastructureReady = true - cluster.Status.ControlPlaneInitialized = true + conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{Host: "100.105.150.1", Port: 6443} controlPlaneInitMachine := newControlPlaneMachine(cluster, "control-plane-init-machine") @@ -1305,7 +1305,7 @@ func TestKubeadmConfigReconciler_Reconcile_AlwaysCheckCAVerificationUnlessReques // Setup work for an initialized cluster clusterName := "my-cluster" cluster := newCluster(clusterName) - cluster.Status.ControlPlaneInitialized = true + conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) cluster.Status.InfrastructureReady = true cluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: "example.com", @@ -1433,7 +1433,6 @@ func TestKubeadmConfigReconciler_Reconcile_DoesNotFailIfCASecretsAlreadyExist(t cluster := newCluster("my-cluster") cluster.Status.InfrastructureReady = true - cluster.Status.ControlPlaneInitialized = false m := newControlPlaneMachine(cluster, "control-plane-machine") configName := "my-config" c := newControlPlaneInitKubeadmConfig(m, configName) diff --git a/cmd/clusterctl/client/cluster/mover.go b/cmd/clusterctl/client/cluster/mover.go index a4d40be34dd3..a44f7b48cb2d 100644 --- a/cmd/clusterctl/client/cluster/mover.go +++ b/cmd/clusterctl/client/cluster/mover.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/version" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -137,7 +138,8 @@ func (o *objectMover) checkProvisioningCompleted(graph *objectGraph) error { continue } - if !clusterObj.Status.ControlPlaneInitialized { + // Note: can't use IsFalse here because we need to handle the absence of the condition as well as false. + if !conditions.IsTrue(clusterObj, clusterv1.ControlPlaneInitializedCondition) { errList = append(errList, errors.Errorf("cannot start the move operation while the control plane for %q %s/%s is not yet initialized", clusterObj.GroupVersionKind(), clusterObj.GetNamespace(), clusterObj.GetName())) continue } diff --git a/cmd/clusterctl/client/cluster/mover_test.go b/cmd/clusterctl/client/cluster/mover_test.go index 5718d4fa0700..07fc152cedbd 100644 --- a/cmd/clusterctl/client/cluster/mover_test.go +++ b/cmd/clusterctl/client/cluster/mover_test.go @@ -27,6 +27,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" clusterctlv1 "sigs.k8s.io/cluster-api/cmd/clusterctl/api/v1alpha3" "sigs.k8s.io/cluster-api/cmd/clusterctl/internal/test" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -643,8 +644,10 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { Name: "cluster1", }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: false, - ControlPlaneInitialized: true, + InfrastructureReady: false, + Conditions: clusterv1.Conditions{ + *conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), + }, }, }, }, @@ -665,8 +668,31 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { Name: "cluster1", }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, - ControlPlaneInitialized: false, + InfrastructureReady: true, + }, + }, + }, + }, + wantErr: true, + }, + { + name: "Blocks with a cluster with ControlPlaneInitialized=False", + fields: fields{ + objs: []client.Object{ + &clusterv1.Cluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "Cluster", + APIVersion: clusterv1.GroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns1", + Name: "cluster1", + }, + Status: clusterv1.ClusterStatus{ + InfrastructureReady: true, + Conditions: clusterv1.Conditions{ + *conditions.FalseCondition(clusterv1.ControlPlaneInitializedCondition, "", clusterv1.ConditionSeverityInfo, ""), + }, }, }, }, @@ -690,9 +716,11 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { ControlPlaneRef: &corev1.ObjectReference{}, }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, - ControlPlaneInitialized: true, - ControlPlaneReady: false, + InfrastructureReady: true, + Conditions: clusterv1.Conditions{ + *conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), + }, + ControlPlaneReady: false, }, }, }, @@ -714,8 +742,10 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { UID: "cluster1", }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, - ControlPlaneInitialized: true, + InfrastructureReady: true, + Conditions: clusterv1.Conditions{ + *conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), + }, }, }, &clusterv1.Machine{ @@ -758,8 +788,10 @@ func Test_objectMover_checkProvisioningCompleted(t *testing.T) { UID: "cluster1", }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, - ControlPlaneInitialized: true, + InfrastructureReady: true, + Conditions: clusterv1.Conditions{ + *conditions.TrueCondition(clusterv1.ControlPlaneInitializedCondition), + }, }, }, &clusterv1.Machine{ diff --git a/config/crd/bases/cluster.x-k8s.io_clusters.yaml b/config/crd/bases/cluster.x-k8s.io_clusters.yaml index 8d4b3d5177c4..c800cdcb4ae4 100644 --- a/config/crd/bases/cluster.x-k8s.io_clusters.yaml +++ b/config/crd/bases/cluster.x-k8s.io_clusters.yaml @@ -367,9 +367,6 @@ spec: - type type: object type: array - controlPlaneInitialized: - description: ControlPlaneInitialized defines if the control plane has been initialized. - type: boolean controlPlaneReady: description: ControlPlaneReady defines if the control plane is ready. type: boolean diff --git a/controllers/cluster_controller.go b/controllers/cluster_controller.go index bac33948cadb..59d16d420b34 100644 --- a/controllers/cluster_controller.go +++ b/controllers/cluster_controller.go @@ -463,6 +463,12 @@ func splitMachineList(list *clusterv1.MachineList) (*clusterv1.MachineList, *clu } func (r *ClusterReconciler) reconcileControlPlaneInitialized(ctx context.Context, cluster *clusterv1.Cluster) (ctrl.Result, error) { + // Skip checking if the control plane is initialized when using a Control Plane Provider (this is reconciled in + // reconcileControlPlane instead). + if cluster.Spec.ControlPlaneRef != nil { + return ctrl.Result{}, nil + } + if conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { return ctrl.Result{}, nil } @@ -505,7 +511,7 @@ func (r *ClusterReconciler) controlPlaneMachineToCluster(o client.Object) []ctrl return nil } - if cluster.Status.ControlPlaneInitialized { + if conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { return nil } diff --git a/controllers/cluster_controller_phases.go b/controllers/cluster_controller_phases.go index 0167b5425909..234bc854fdaa 100644 --- a/controllers/cluster_controller_phases.go +++ b/controllers/cluster_controller_phases.go @@ -237,12 +237,16 @@ func (r *ClusterReconciler) reconcileControlPlane(ctx context.Context, cluster * // Update cluster.Status.ControlPlaneInitialized if it hasn't already been set // Determine if the control plane provider is initialized. - if !cluster.Status.ControlPlaneInitialized { + if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { initialized, err := external.IsInitialized(controlPlaneConfig) if err != nil { return ctrl.Result{}, err } - cluster.Status.ControlPlaneInitialized = initialized + if initialized { + conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + } else { + conditions.MarkFalse(cluster, clusterv1.ControlPlaneInitializedCondition, clusterv1.WaitingForControlPlaneProviderInitializedReason, clusterv1.ConditionSeverityInfo, "Waiting for control plane provider to indicate the control plane has been initialized") + } } return ctrl.Result{}, nil diff --git a/controllers/cluster_controller_test.go b/controllers/cluster_controller_test.go index 2f909d3ab03c..cd292cf2ed61 100644 --- a/controllers/cluster_controller_test.go +++ b/controllers/cluster_controller_test.go @@ -21,6 +21,7 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + "sigs.k8s.io/cluster-api/util/conditions" corev1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1" @@ -238,7 +239,7 @@ var _ = Describe("Cluster Reconciler", func() { }, timeout).ShouldNot(BeEmpty()) }) - It("Should successfully set Status.ControlPlaneInitialized on the cluster object if controlplane is ready", func() { + It("Should successfully set ControlPlaneInitialized on the cluster object if controlplane is ready", func() { cluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "test6-", @@ -319,7 +320,7 @@ var _ = Describe("Cluster Reconciler", func() { if err := testEnv.Get(ctx, key, cluster); err != nil { return false } - return cluster.Status.ControlPlaneInitialized + return conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) }, timeout).Should(BeTrue()) }) }) @@ -642,5 +643,5 @@ func TestReconcileControlPlaneInitializedControlPlaneRef(t *testing.T) { res, err := r.reconcileControlPlaneInitialized(ctx, c) g.Expect(res.IsZero()).To(BeTrue()) g.Expect(err).ToNot(HaveOccurred()) - g.Expect(c.Status.ControlPlaneInitialized).To(BeFalse()) + g.Expect(conditions.Has(c, clusterv1.ControlPlaneInitializedCondition)).To(BeFalse()) } diff --git a/controllers/machine_controller.go b/controllers/machine_controller.go index 86dabb571aa2..370c018b4f7a 100644 --- a/controllers/machine_controller.go +++ b/controllers/machine_controller.go @@ -251,7 +251,7 @@ func patchMachine(ctx context.Context, patchHelper *patch.Helper, machine *clust func (r *MachineReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cluster, m *clusterv1.Machine) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) - if cluster.Status.ControlPlaneInitialized { + if conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { if err := r.watchClusterNodes(ctx, cluster); err != nil { log.Error(err, "error watching nodes on target cluster") return ctrl.Result{}, err diff --git a/controllers/machine_controller_test.go b/controllers/machine_controller_test.go index d0ca798f50d8..3bc8b4aca355 100644 --- a/controllers/machine_controller_test.go +++ b/controllers/machine_controller_test.go @@ -112,7 +112,7 @@ func TestWatches(t *testing.T) { // Patch cluster control plane initialized (this is required to start node watch) patchHelper, err := patch.NewHelper(testCluster, testEnv) g.Expect(err).ShouldNot(HaveOccurred()) - testCluster.Status.ControlPlaneInitialized = true + conditions.MarkTrue(testCluster, clusterv1.ControlPlaneInitializedCondition) g.Expect(patchHelper.Patch(ctx, testCluster, patch.WithStatusObservedGeneration{})).To(Succeed()) // Patch infra machine ready diff --git a/controllers/machinehealthcheck_controller_test.go b/controllers/machinehealthcheck_controller_test.go index e8f863217d16..ffa3c0b995e3 100644 --- a/controllers/machinehealthcheck_controller_test.go +++ b/controllers/machinehealthcheck_controller_test.go @@ -19,7 +19,7 @@ import ( "context" "errors" "fmt" - "sigs.k8s.io/controller-runtime/pkg/log" + "sort" "testing" "time" @@ -43,6 +43,7 @@ import ( "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" ) diff --git a/controllers/remote/cluster_cache.go b/controllers/remote/cluster_cache.go index b6c402a9a85e..9c45ae68069c 100644 --- a/controllers/remote/cluster_cache.go +++ b/controllers/remote/cluster_cache.go @@ -32,6 +32,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" + "sigs.k8s.io/cluster-api/util/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" @@ -313,7 +314,7 @@ func (t *ClusterCacheTracker) healthCheckCluster(ctx context.Context, in *health return false, nil } - if !cluster.Status.InfrastructureReady || !cluster.Status.ControlPlaneInitialized { + if !cluster.Status.InfrastructureReady || !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { // If the infrastructure or control plane aren't marked as ready, we should requeue and wait. return false, nil } diff --git a/controllers/remote/cluster_cache_healthcheck_test.go b/controllers/remote/cluster_cache_healthcheck_test.go index 77fb2a541693..37212e2b5b24 100644 --- a/controllers/remote/cluster_cache_healthcheck_test.go +++ b/controllers/remote/cluster_cache_healthcheck_test.go @@ -31,6 +31,7 @@ import ( "k8s.io/klog/klogr" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" ) @@ -85,7 +86,7 @@ var _ = Describe("ClusterCache HealthCheck suite", func() { }, } Expect(k8sClient.Create(ctx, testCluster)).To(Succeed()) - testCluster.Status.ControlPlaneInitialized = true + conditions.MarkTrue(testCluster, clusterv1.ControlPlaneInitializedCondition) testCluster.Status.InfrastructureReady = true Expect(k8sClient.Status().Update(ctx, testCluster)).To(Succeed()) diff --git a/controllers/remote/cluster_cache_tracker_test.go b/controllers/remote/cluster_cache_tracker_test.go index 5b67cc09a995..3daf58205ca2 100644 --- a/controllers/remote/cluster_cache_tracker_test.go +++ b/controllers/remote/cluster_cache_tracker_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/client-go/kubernetes/scheme" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha4" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/handler" @@ -101,7 +102,7 @@ var _ = Describe("ClusterCache Tracker suite", func() { }, } Expect(k8sClient.Create(ctx, clusterA)).To(Succeed()) - clusterA.Status.ControlPlaneInitialized = true + conditions.MarkTrue(clusterA, clusterv1.ControlPlaneInitializedCondition) clusterA.Status.InfrastructureReady = true Expect(k8sClient.Status().Update(ctx, clusterA)).To(Succeed()) diff --git a/test/infrastructure/docker/controllers/dockermachine_controller.go b/test/infrastructure/docker/controllers/dockermachine_controller.go index a96334e87eae..980007ed2dc8 100644 --- a/test/infrastructure/docker/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/controllers/dockermachine_controller.go @@ -191,7 +191,7 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster * // Make sure bootstrap data is available and populated. if machine.Spec.Bootstrap.DataSecretName == nil { - if !util.IsControlPlaneMachine(machine) && !cluster.Status.ControlPlaneInitialized { + if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { log.Info("Waiting for the control plane to be initialized") conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil