diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions.go b/controlplane/kubeadm/internal/workload_cluster_conditions.go index 74fbca60e7d9..86cb2344747d 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions.go @@ -67,8 +67,11 @@ func (w *Workload) UpdateEtcdConditions(ctx context.Context, controlPlane *Contr if controlPlane.IsEtcdManaged() { // Update etcd conditions. // In case of well known temporary errors + control plane scaling up/down or rolling out, retry a few times. - // Note: this is required because there isn't a watch mechanism on etcd. - maxRetry := 3 + // Note: it seems that reducing the number of them during every reconciles also improves stability, + // thus we are stopping doing retries (we only try once). + // However, we keep the code implementing retry support so we can easily revert this decision in a patch + // release if we need to. + maxRetry := 1 for i := range maxRetry { retryableError := w.updateManagedEtcdConditions(ctx, controlPlane) // if we should retry and there is a retry left, wait a bit. diff --git a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go index 987ecfa121e1..dc019452e8ac 100644 --- a/controlplane/kubeadm/internal/workload_cluster_conditions_test.go +++ b/controlplane/kubeadm/internal/workload_cluster_conditions_test.go @@ -154,8 +154,9 @@ func TestUpdateEtcdConditions(t *testing.T) { callCount = 0 w.UpdateEtcdConditions(ctx, controlPane) if tt.expectedRetry { - g.Expect(callCount).To(Equal(3)) - } else { + // Note we keep the code implementing retry support so we can easily re-activate it if we need to. + // g.Expect(callCount).To(Equal(3)) + // } else { g.Expect(callCount).To(Equal(1)) } })