diff --git a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go index b32094057fa5..e891da80f0e4 100644 --- a/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go +++ b/vertical-pod-autoscaler/pkg/recommender/input/cluster_feeder.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/watch" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" @@ -50,6 +51,8 @@ import ( ) const ( + evictionWatchRetryWait = 10 * time.Second + evictionWatchJitterFactor = 0.5 scaleCacheLoopPeriod time.Duration = 7 * time.Second scaleCacheEntryLifetime time.Duration = time.Hour scaleCacheEntryFreshnessTime time.Duration = 10 * time.Minute @@ -142,14 +145,21 @@ func WatchEvictionEventsWithRetries(kubeClient kube_client.Interface, observer o FieldSelector: "reason=Evicted", } - for { + watchEvictionEventsOnce := func() { watchInterface, err := kubeClient.CoreV1().Events(namespace).Watch(context.TODO(), options) if err != nil { klog.Errorf("Cannot initialize watching events. Reason %v", err) - continue + return } watchEvictionEvents(watchInterface.ResultChan(), observer) } + for { + watchEvictionEventsOnce() + // Wait between attempts, retrying too often breaks API server. + waitTime := wait.Jitter(evictionWatchRetryWait, evictionWatchJitterFactor) + klog.V(1).Infof("An attempt to watch eviction events finished. Waiting %v before the next one.", waitTime) + time.Sleep(waitTime) + } }() }