From d737e273e731784dd672a9fffd5c14870ec57e93 Mon Sep 17 00:00:00 2001 From: Evgeny Shmarnev Date: Mon, 26 Oct 2020 15:02:19 +0100 Subject: [PATCH] Forward-port modifies DockerMachine condition status to report for control plane to be ready --- api/v1alpha4/condition_consts.go | 7 +++ .../kubeadm/api/v1alpha4/condition_consts.go | 5 +- .../controllers/kubeadmconfig_controller.go | 2 +- .../kubeadmconfig_controller_test.go | 2 +- .../controllers/dockermachine_controller.go | 10 +++- .../dockermachine_controller_test.go | 48 ++++++++++++++----- 6 files changed, 58 insertions(+), 16 deletions(-) diff --git a/api/v1alpha4/condition_consts.go b/api/v1alpha4/condition_consts.go index 2e7d6420fc44..191460012150 100644 --- a/api/v1alpha4/condition_consts.go +++ b/api/v1alpha4/condition_consts.go @@ -65,6 +65,13 @@ const ( // to be available. // NOTE: This reason is used only as a fallback when the control plane object is not reporting its own ready condition. WaitingForControlPlaneFallbackReason = "WaitingForControlPlane" + + // WaitingForControlPlaneAvailableReason (Severity=Info) documents a Cluster API object + // waiting for the control plane machine to be available. + // + // NOTE: Having the control plane machine available is a pre-condition for joining additional control planes + // or workers nodes. + WaitingForControlPlaneAvailableReason = "WaitingForControlPlaneAvailable" ) // Conditions and condition Reasons for the Machine object diff --git a/bootstrap/kubeadm/api/v1alpha4/condition_consts.go b/bootstrap/kubeadm/api/v1alpha4/condition_consts.go index 4743af96ee08..ff48dd6cdc23 100644 --- a/bootstrap/kubeadm/api/v1alpha4/condition_consts.go +++ b/bootstrap/kubeadm/api/v1alpha4/condition_consts.go @@ -40,7 +40,10 @@ const ( // // NOTE: Having the control plane machine available is a pre-condition for joining additional control planes // or workers nodes. - WaitingForControlPlaneAvailableReason = "WaitingForControlPlaneAvailable" + // DEPRECATED: This has been deprecated in v1alpha3 and will be removed in a future version. + // Switch to WaitingForControlPlaneAvailableReason constant from the `sigs.k8s.io/cluster-api/api/v1alpha3` + // package. + WaitingForControlPlaneAvailableReason = clusterv1.WaitingForControlPlaneAvailableReason // DataSecretGenerationFailedReason (Severity=Warning) documents a KubeadmConfig controller detecting // an error while generating a data secret; those kind of errors are usually due to misconfigurations diff --git a/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go b/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go index 9bdbee1a9aeb..511320bb5b26 100644 --- a/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go +++ b/bootstrap/kubeadm/controllers/kubeadmconfig_controller.go @@ -284,7 +284,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex // this is required in order to avoid the condition's LastTransitionTime to flicker in case of errors surfacing // using the DataSecretGeneratedFailedReason if conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason { - conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") } // if it's NOT a control plane machine, requeue diff --git a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go index 793150f30a48..ff48f81208f6 100644 --- a/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go +++ b/bootstrap/kubeadm/controllers/kubeadmconfig_controller_test.go @@ -407,7 +407,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueJoiningNodesIfControlPlaneNotI g.Expect(err).NotTo(HaveOccurred()) g.Expect(result.Requeue).To(BeFalse()) g.Expect(result.RequeueAfter).To(Equal(30 * time.Second)) - assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.DataSecretAvailableCondition, clusterv1.ConditionSeverityInfo, bootstrapv1.WaitingForControlPlaneAvailableReason) + assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.DataSecretAvailableCondition, clusterv1.ConditionSeverityInfo, clusterv1.WaitingForControlPlaneAvailableReason) }) } } diff --git a/test/infrastructure/docker/controllers/dockermachine_controller.go b/test/infrastructure/docker/controllers/dockermachine_controller.go index 056e82420885..93ab4a9842e4 100644 --- a/test/infrastructure/docker/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/controllers/dockermachine_controller.go @@ -151,7 +151,7 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques } // Handle non-deleted machines - return r.reconcileNormal(ctx, machine, dockerMachine, externalMachine, externalLoadBalancer) + return r.reconcileNormal(ctx, cluster, machine, dockerMachine, externalMachine, externalLoadBalancer) } func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMachine *infrav1.DockerMachine) error { @@ -177,7 +177,7 @@ func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMa ) } -func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, machine *clusterv1.Machine, dockerMachine *infrav1.DockerMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer) (res ctrl.Result, retErr error) { +func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, dockerMachine *infrav1.DockerMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer) (res ctrl.Result, retErr error) { log := ctrl.LoggerFrom(ctx) // if the machine is already provisioned, return @@ -191,6 +191,12 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, machine * // Make sure bootstrap data is available and populated. if machine.Spec.Bootstrap.DataSecretName == nil { + if !util.IsControlPlaneMachine(machine) && !cluster.Status.ControlPlaneInitialized { + log.Info("Waiting for the control plane to be initialized") + conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + return ctrl.Result{}, nil + } + log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil diff --git a/test/infrastructure/docker/controllers/dockermachine_controller_test.go b/test/infrastructure/docker/controllers/dockermachine_controller_test.go index 8ab890f99607..f5f476f1fe93 100644 --- a/test/infrastructure/docker/controllers/dockermachine_controller_test.go +++ b/test/infrastructure/docker/controllers/dockermachine_controller_test.go @@ -30,6 +30,18 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" ) +var ( + clusterName = "my-cluster" + dockerCluster = newDockerCluster(clusterName, "my-docker-cluster") + cluster = newCluster(clusterName, dockerCluster) + + dockerMachine = newDockerMachine("my-docker-machine-0", "my-machine-0") + machine = newMachine(clusterName, "my-machine-0", dockerMachine) + + anotherDockerMachine = newDockerMachine("my-docker-machine-1", "my-machine-1") + anotherMachine = newMachine(clusterName, "my-machine-1", anotherDockerMachine) +) + func setupScheme() *runtime.Scheme { s := runtime.NewScheme() if err := clusterv1.AddToScheme(s); err != nil { @@ -44,15 +56,11 @@ func setupScheme() *runtime.Scheme { func TestDockerMachineReconciler_DockerClusterToDockerMachines(t *testing.T) { g := NewWithT(t) - clusterName := "my-cluster" - dockerCluster := newDockerCluster(clusterName, "my-docker-cluster") - dockerMachine1 := newDockerMachine("my-docker-machine-0") - dockerMachine2 := newDockerMachine("my-docker-machine-1") objects := []client.Object{ - newCluster(clusterName), + cluster, dockerCluster, - newMachine(clusterName, "my-machine-0", dockerMachine1), - newMachine(clusterName, "my-machine-1", dockerMachine2), + machine, + anotherMachine, // Intentionally omitted newMachine(clusterName, "my-machine-2", nil), } @@ -69,13 +77,22 @@ func TestDockerMachineReconciler_DockerClusterToDockerMachines(t *testing.T) { g.Expect(machineNames).To(ConsistOf("my-machine-0", "my-machine-1")) } -func newCluster(clusterName string) *clusterv1.Cluster { - return &clusterv1.Cluster{ +func newCluster(clusterName string, dockerCluster *infrav1.DockerCluster) *clusterv1.Cluster { + cluster := &clusterv1.Cluster{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: clusterName, }, } + if dockerCluster != nil { + cluster.Spec.InfrastructureRef = &v1.ObjectReference{ + Name: dockerCluster.Name, + Namespace: dockerCluster.Namespace, + Kind: dockerCluster.Kind, + APIVersion: dockerCluster.GroupVersionKind().GroupVersion().String(), + } + } + return cluster } func newDockerCluster(clusterName, dockerName string) *infrav1.DockerCluster { @@ -114,11 +131,20 @@ func newMachine(clusterName, machineName string, dockerMachine *infrav1.DockerMa return machine } -func newDockerMachine(name string) *infrav1.DockerMachine { +func newDockerMachine(dockerMachineName, machineName string) *infrav1.DockerMachine { return &infrav1.DockerMachine{ TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ - Name: name, + Name: dockerMachineName, + ResourceVersion: "1", + Finalizers: []string{infrav1.MachineFinalizer}, + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: clusterv1.GroupVersion.String(), + Kind: "Machine", + Name: machineName, + }, + }, }, Spec: infrav1.DockerMachineSpec{}, Status: infrav1.DockerMachineStatus{},