Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

✨ Forward-port modifies DockerMachine condition status to report for control plane to be ready #3869

Merged
merged 1 commit into from
Oct 27, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions api/v1alpha4/condition_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,13 @@ const (
// to be available.
// NOTE: This reason is used only as a fallback when the control plane object is not reporting its own ready condition.
WaitingForControlPlaneFallbackReason = "WaitingForControlPlane"

// WaitingForControlPlaneAvailableReason (Severity=Info) documents a Cluster API object
// waiting for the control plane machine to be available.
//
// NOTE: Having the control plane machine available is a pre-condition for joining additional control planes
// or workers nodes.
WaitingForControlPlaneAvailableReason = "WaitingForControlPlaneAvailable"
)

// Conditions and condition Reasons for the Machine object
Expand Down
5 changes: 4 additions & 1 deletion bootstrap/kubeadm/api/v1alpha4/condition_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,10 @@ const (
//
// NOTE: Having the control plane machine available is a pre-condition for joining additional control planes
// or workers nodes.
WaitingForControlPlaneAvailableReason = "WaitingForControlPlaneAvailable"
// DEPRECATED: This has been deprecated in v1alpha3 and will be removed in a future version.
// Switch to WaitingForControlPlaneAvailableReason constant from the `sigs.k8s.io/cluster-api/api/v1alpha3`
// package.
WaitingForControlPlaneAvailableReason = clusterv1.WaitingForControlPlaneAvailableReason

// DataSecretGenerationFailedReason (Severity=Warning) documents a KubeadmConfig controller detecting
// an error while generating a data secret; those kind of errors are usually due to misconfigurations
Expand Down
2 changes: 1 addition & 1 deletion bootstrap/kubeadm/controllers/kubeadmconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,7 @@ func (r *KubeadmConfigReconciler) handleClusterNotInitialized(ctx context.Contex
// this is required in order to avoid the condition's LastTransitionTime to flicker in case of errors surfacing
// using the DataSecretGeneratedFailedReason
if conditions.GetReason(scope.Config, bootstrapv1.DataSecretAvailableCondition) != bootstrapv1.DataSecretGenerationFailedReason {
conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, bootstrapv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "")
conditions.MarkFalse(scope.Config, bootstrapv1.DataSecretAvailableCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "")
}

// if it's NOT a control plane machine, requeue
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -407,7 +407,7 @@ func TestKubeadmConfigReconciler_Reconcile_RequeueJoiningNodesIfControlPlaneNotI
g.Expect(err).NotTo(HaveOccurred())
g.Expect(result.Requeue).To(BeFalse())
g.Expect(result.RequeueAfter).To(Equal(30 * time.Second))
assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.DataSecretAvailableCondition, clusterv1.ConditionSeverityInfo, bootstrapv1.WaitingForControlPlaneAvailableReason)
assertHasFalseCondition(g, myclient, tc.request, bootstrapv1.DataSecretAvailableCondition, clusterv1.ConditionSeverityInfo, clusterv1.WaitingForControlPlaneAvailableReason)
})
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques
}

// Handle non-deleted machines
return r.reconcileNormal(ctx, machine, dockerMachine, externalMachine, externalLoadBalancer)
return r.reconcileNormal(ctx, cluster, machine, dockerMachine, externalMachine, externalLoadBalancer)
}

func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMachine *infrav1.DockerMachine) error {
Expand All @@ -177,7 +177,7 @@ func patchDockerMachine(ctx context.Context, patchHelper *patch.Helper, dockerMa
)
}

func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, machine *clusterv1.Machine, dockerMachine *infrav1.DockerMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer) (res ctrl.Result, retErr error) {
func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster *clusterv1.Cluster, machine *clusterv1.Machine, dockerMachine *infrav1.DockerMachine, externalMachine *docker.Machine, externalLoadBalancer *docker.LoadBalancer) (res ctrl.Result, retErr error) {
log := ctrl.LoggerFrom(ctx)

// if the machine is already provisioned, return
Expand All @@ -191,6 +191,12 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, machine *

// Make sure bootstrap data is available and populated.
if machine.Spec.Bootstrap.DataSecretName == nil {
if !util.IsControlPlaneMachine(machine) && !cluster.Status.ControlPlaneInitialized {
log.Info("Waiting for the control plane to be initialized")
conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "")
return ctrl.Result{}, nil
}

log.Info("Waiting for the Bootstrap provider controller to set bootstrap data")
conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "")
return ctrl.Result{}, nil
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,18 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)

var (
clusterName = "my-cluster"
dockerCluster = newDockerCluster(clusterName, "my-docker-cluster")
cluster = newCluster(clusterName, dockerCluster)

dockerMachine = newDockerMachine("my-docker-machine-0", "my-machine-0")
machine = newMachine(clusterName, "my-machine-0", dockerMachine)

anotherDockerMachine = newDockerMachine("my-docker-machine-1", "my-machine-1")
anotherMachine = newMachine(clusterName, "my-machine-1", anotherDockerMachine)
)

func setupScheme() *runtime.Scheme {
s := runtime.NewScheme()
if err := clusterv1.AddToScheme(s); err != nil {
Expand All @@ -44,15 +56,11 @@ func setupScheme() *runtime.Scheme {
func TestDockerMachineReconciler_DockerClusterToDockerMachines(t *testing.T) {
g := NewWithT(t)

clusterName := "my-cluster"
dockerCluster := newDockerCluster(clusterName, "my-docker-cluster")
dockerMachine1 := newDockerMachine("my-docker-machine-0")
dockerMachine2 := newDockerMachine("my-docker-machine-1")
objects := []client.Object{
newCluster(clusterName),
cluster,
dockerCluster,
newMachine(clusterName, "my-machine-0", dockerMachine1),
newMachine(clusterName, "my-machine-1", dockerMachine2),
machine,
anotherMachine,
// Intentionally omitted
newMachine(clusterName, "my-machine-2", nil),
}
Expand All @@ -69,13 +77,22 @@ func TestDockerMachineReconciler_DockerClusterToDockerMachines(t *testing.T) {
g.Expect(machineNames).To(ConsistOf("my-machine-0", "my-machine-1"))
}

func newCluster(clusterName string) *clusterv1.Cluster {
return &clusterv1.Cluster{
func newCluster(clusterName string, dockerCluster *infrav1.DockerCluster) *clusterv1.Cluster {
cluster := &clusterv1.Cluster{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
},
}
if dockerCluster != nil {
cluster.Spec.InfrastructureRef = &v1.ObjectReference{
Name: dockerCluster.Name,
Namespace: dockerCluster.Namespace,
Kind: dockerCluster.Kind,
APIVersion: dockerCluster.GroupVersionKind().GroupVersion().String(),
}
}
return cluster
}

func newDockerCluster(clusterName, dockerName string) *infrav1.DockerCluster {
Expand Down Expand Up @@ -114,11 +131,20 @@ func newMachine(clusterName, machineName string, dockerMachine *infrav1.DockerMa
return machine
}

func newDockerMachine(name string) *infrav1.DockerMachine {
func newDockerMachine(dockerMachineName, machineName string) *infrav1.DockerMachine {
return &infrav1.DockerMachine{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Name: dockerMachineName,
ResourceVersion: "1",
Finalizers: []string{infrav1.MachineFinalizer},
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Machine",
Name: machineName,
},
},
},
Spec: infrav1.DockerMachineSpec{},
Status: infrav1.DockerMachineStatus{},
Expand Down