Skip to content

Commit

Permalink
Merge pull request #3726 from srm09/feature/report-docker-waiting-con…
Browse files Browse the repository at this point in the history
…trol-plane

✨ Modifies DockerMachine condition status to report for control plane to be ready
  • Loading branch information
k8s-ci-robot authored Oct 23, 2020
2 parents 3886ffa + 1d65c3b commit 7ed9b74
Show file tree
Hide file tree
Showing 6 changed files with 58 additions and 16 deletions.
7 changes: 7 additions & 0 deletions api/v1alpha3/condition_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,13 @@ const (
// to be available.
// NOTE: This reason is used only as a fallback when the control plane object is not reporting its own ready condition.
WaitingForControlPlaneFallbackReason = "WaitingForControlPlane"

// WaitingForControlPlaneAvailableReason (Severity=Info) documents a Cluster API object
// waiting for the control plane machine to be available.
//
// NOTE: Having the control plane machine available is a pre-condition for joining additional control planes
// or workers nodes.
WaitingForControlPlaneAvailableReason = "WaitingForControlPlaneAvailable"
)

// Conditions and condition Reasons for the Machine object
Expand Down
5 changes: 4 additions & 1 deletion bootstrap/kubeadm/api/v1alpha3/condition_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,10 @@ const (
//
// NOTE: Having the control plane machine available is a pre-condition for joining additional control planes
// or workers nodes.
WaitingForControlPlaneAvailableReason = "WaitingForControlPlaneAvailable"
// DEPRECATED: This has been deprecated in v1alpha3 and will be removed in a future version.
// Switch to WaitingForControlPlaneAvailableReason constant from the `sigs.k8s.io/cluster-api/api/v1alpha3`
// package.
WaitingForControlPlaneAvailableReason = clusterv1.WaitingForControlPlaneAvailableReason

// DataSecretGenerationFailedReason (Severity=Warning) documents a KubeadmConfig controller detecting
// an error while generating a data secret; those kind of errors are usually due to misconfigurations
Expand Down
2 changes: 1 addition & 1 deletion bootstrap/kubeadm/controllers/kubeadmconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex
// this is required in order to avoid the condition's LastTransitionTime to flicker in case of errors surfacing
// using the DataSecretGeneratedFailedReason
if conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason {
conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "")
conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "")
}

// if it's NOT a control plane machine, requeue
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -422,7 +422,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueJoiningNodesIfControlPlaneNotI
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result.Requeue).To(BeFalse())
g.Expect(result.RequeueAfter).To(Equal(30 * time.Second))
assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.DataSecretAvailableCondition, clusterv1.ConditionSeverityInfo, bootstrapv1.WaitingForControlPlaneAvailableReason)
assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.DataSecretAvailableCondition, clusterv1.ConditionSeverityInfo, clusterv1.WaitingForControlPlaneAvailableReason)
})
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ func (r *DockerMachineReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, re
}

// Handle non-deleted machines
return r.reconcileNormal(ctx, machine, dockerMachine, externalMachine, externalLoadBalancer, log)
return r.reconcileNormal(ctx, cluster, machine, dockerMachine, externalMachine, externalLoadBalancer, log)
}

func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMachine *infrav1.DockerMachine) error {
Expand All @@ -184,7 +184,7 @@ func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMa
)
}

func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, machine *clusterv1.Machine, dockerMachine *infrav1.DockerMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer, log logr.Logger) (res ctrl.Result, retErr error) {
func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, dockerMachine *infrav1.DockerMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer, log logr.Logger) (res ctrl.Result, retErr error) {
// if the machine is already provisioned, return
if dockerMachine.Spec.ProviderID != nil {
// ensure ready state is set.
Expand All @@ -196,6 +196,12 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, machine *

// Make sure bootstrap data is available and populated.
if machine.Spec.Bootstrap.DataSecretName == nil {
if !util.IsControlPlaneMachine(machine) && !cluster.Status.ControlPlaneInitialized {
log.Info("Waiting for the control plane to be initialized")
conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "")
return ctrl.Result{}, nil
}

log.Info("Waiting for the Bootstrap provider controller to set bootstrap data")
conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "")
return ctrl.Result{}, nil
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,18 @@ import (
"sigs.k8s.io/controller-runtime/pkg/handler"
)

var (
clusterName = "my-cluster"
dockerCluster = newDockerCluster(clusterName, "my-docker-cluster")
cluster = newCluster(clusterName, dockerCluster)

dockerMachine = newDockerMachine("my-docker-machine-0", "my-machine-0")
machine = newMachine(clusterName, "my-machine-0", dockerMachine)

anotherDockerMachine = newDockerMachine("my-docker-machine-1", "my-machine-1")
anotherMachine = newMachine(clusterName, "my-machine-1", anotherDockerMachine)
)

func setupScheme() *runtime.Scheme {
s := runtime.NewScheme()
if err := clusterv1.AddToScheme(s); err != nil {
Expand All @@ -45,15 +57,11 @@ func setupScheme() *runtime.Scheme {
func TestDockerMachineReconciler_DockerClusterToDockerMachines(t *testing.T) {
g := NewWithT(t)

clusterName := "my-cluster"
dockerCluster := newDockerCluster(clusterName, "my-docker-cluster")
dockerMachine1 := newDockerMachine("my-docker-machine-0")
dockerMachine2 := newDockerMachine("my-docker-machine-1")
objects := []runtime.Object{
newCluster(clusterName),
cluster,
dockerCluster,
newMachine(clusterName, "my-machine-0", dockerMachine1),
newMachine(clusterName, "my-machine-1", dockerMachine2),
machine,
anotherMachine,
// Intentionally omitted
newMachine(clusterName, "my-machine-2", nil),
}
Expand All @@ -74,13 +82,22 @@ func TestDockerMachineReconciler_DockerClusterToDockerMachines(t *testing.T) {
g.Expect(machineNames).To(ConsistOf("my-machine-0", "my-machine-1"))
}

func newCluster(clusterName string) *clusterv1.Cluster {
return &clusterv1.Cluster{
func newCluster(clusterName string, dockerCluster *infrav1.DockerCluster) *clusterv1.Cluster {
cluster := &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
},
}
if dockerCluster != nil {
cluster.Spec.InfrastructureRef = &v1.ObjectReference{
Name: dockerCluster.Name,
Namespace: dockerCluster.Namespace,
Kind: dockerCluster.Kind,
APIVersion: dockerCluster.GroupVersionKind().GroupVersion().String(),
}
}
return cluster
}

func newDockerCluster(clusterName, dockerName string) *infrav1.DockerCluster {
Expand Down Expand Up @@ -119,11 +136,20 @@ func newMachine(clusterName, machineName string, dockerMachine *infrav1.DockerMa
return machine
}

func newDockerMachine(name string) *infrav1.DockerMachine {
func newDockerMachine(dockerMachineName, machineName string) *infrav1.DockerMachine {
return &infrav1.DockerMachine{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Name: dockerMachineName,
ResourceVersion: "1",
Finalizers: []string{infrav1.MachineFinalizer},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Machine",
Name: machineName,
},
},
},
Spec: infrav1.DockerMachineSpec{},
Status: infrav1.DockerMachineStatus{},
Expand Down

0 comments on commit 7ed9b74

Please sign in to comment.