Skip to content

Commit

Permalink
KCP should update conditions when reconcileHealth is failing
Browse files Browse the repository at this point in the history
  • Loading branch information
fabriziopandini committed Oct 28, 2020
1 parent db6cc2a commit b3fa76f
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 7 deletions.
8 changes: 1 addition & 7 deletions controlplane/kubeadm/controllers/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -304,20 +304,14 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster *
// source ref (reason@machine/name) so the problem can be easily tracked down to its source machine.
conditions.SetAggregate(controlPlane.KCP, controlplanev1.MachinesReadyCondition, controlPlane.Machines.ConditionGetters(), conditions.AddSourceRef())

// reconcileControlPlaneHealth returns err if there is a machine being deleted or if the control plane is unhealthy.
// If the control plane is not yet initialized, this call shouldn't fail.
if result, err := r.reconcileControlPlaneHealth(ctx, cluster, kcp, controlPlane); err != nil || !result.IsZero() {
return result, err
}

// Control plane machines rollout due to configuration changes (e.g. upgrades) takes precedence over other operations.
needRollout := controlPlane.MachinesNeedingRollout()
switch {
case len(needRollout) > 0:
logger.Info("Rolling out Control Plane machines", "needRollout", needRollout.Names())
// NOTE: we are using Status.UpdatedReplicas from the previous reconciliation only to provide a meaningful message
// and this does not influence any reconciliation logic.
conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition, controlplanev1.RollingUpdateInProgressReason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(needRollout), kcp.Status.UpdatedReplicas)
conditions.MarkFalse(controlPlane.KCP, controlplanev1.MachinesSpecUpToDateCondition, controlplanev1.RollingUpdateInProgressReason, clusterv1.ConditionSeverityWarning, "Rolling %d replicas with outdated spec (%d replicas up to date)", len(needRollout), len(controlPlane.Machines)-len(needRollout))
return r.upgradeControlPlane(ctx, cluster, kcp, controlPlane, needRollout)
default:
// make sure last upgrade operation is marked as completed.
Expand Down
12 changes: 12 additions & 0 deletions controlplane/kubeadm/controllers/scale.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,12 @@ func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Conte
func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, controlPlane *internal.ControlPlane) (ctrl.Result, error) {
logger := controlPlane.Logger()

// reconcileControlPlaneHealth returns err if there is a machine being deleted or if the control plane is unhealthy.
// If the control plane is not yet initialized, this call shouldn't fail.
if result, err := r.reconcileControlPlaneHealth(ctx, cluster, kcp, controlPlane); err != nil || !result.IsZero() {
return result, err
}

// Create the bootstrap configuration
bootstrapSpec := controlPlane.JoinControlPlaneConfig()
fd := controlPlane.NextFailureDomainForScaleUp()
Expand All @@ -84,6 +90,12 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(
) (ctrl.Result, error) {
logger := controlPlane.Logger()

// reconcileControlPlaneHealth returns err if there is a machine being deleted or if the control plane is unhealthy.
// If the control plane is not yet initialized, this call shouldn't fail.
if result, err := r.reconcileControlPlaneHealth(ctx, cluster, kcp, controlPlane); err != nil || !result.IsZero() {
return result, err
}

workloadCluster, err := r.managementCluster.GetWorkloadCluster(ctx, util.ObjectKey(cluster))
if err != nil {
logger.Error(err, "Failed to create client to workload cluster")
Expand Down

0 comments on commit b3fa76f

Please sign in to comment.