Skip to content

Commit

Permalink
Merge pull request #11522 from k8s-infra-cherrypick-robot/cherry-pick…
Browse files Browse the repository at this point in the history
…-11515-to-release-1.9

[release-1.9] 🌱 Drop retry when computing KCP conditions
  • Loading branch information
k8s-ci-robot authored Dec 2, 2024
2 parents 03c076d + 7af10a2 commit fea33f6
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 4 deletions.
7 changes: 5 additions & 2 deletions controlplane/kubeadm/internal/workload_cluster_conditions.go
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,11 @@ func (w *Workload) UpdateEtcdConditions(ctx context.Context, controlPlane *Contr
if controlPlane.IsEtcdManaged() {
// Update etcd conditions.
// In case of well known temporary errors + control plane scaling up/down or rolling out, retry a few times.
// Note: this is required because there isn't a watch mechanism on etcd.
maxRetry := 3
// Note: it seems that reducing the number of them during every reconciles also improves stability,
// thus we are stopping doing retries (we only try once).
// However, we keep the code implementing retry support so we can easily revert this decision in a patch
// release if we need to.
maxRetry := 1
for i := range maxRetry {
retryableError := w.updateManagedEtcdConditions(ctx, controlPlane)
// if we should retry and there is a retry left, wait a bit.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,8 +154,9 @@ func TestUpdateEtcdConditions(t *testing.T) {
callCount = 0
w.UpdateEtcdConditions(ctx, controlPane)
if tt.expectedRetry {
g.Expect(callCount).To(Equal(3))
} else {
// Note we keep the code implementing retry support so we can easily re-activate it if we need to.
// g.Expect(callCount).To(Equal(3))
// } else {
g.Expect(callCount).To(Equal(1))
}
})
Expand Down

0 comments on commit fea33f6

Please sign in to comment.