From da222a326b2b39236564b3b03857036e954b072d Mon Sep 17 00:00:00 2001 From: Damika Gamlath Date: Wed, 4 Sep 2024 07:10:52 +0000 Subject: [PATCH] Make ds pods eviction best effort when draining empty nodes(nodes without user pods). Now autoscaler will not wait for ds pods to get fully evicted in empty nodes --- .../core/scaledown/actuation/drain.go | 7 +- .../core/scaledown/actuation/drain_test.go | 72 +------------------ 2 files changed, 3 insertions(+), 76 deletions(-) diff --git a/cluster-autoscaler/core/scaledown/actuation/drain.go b/cluster-autoscaler/core/scaledown/actuation/drain.go index 2c84566a7e12..c89ad46017f2 100644 --- a/cluster-autoscaler/core/scaledown/actuation/drain.go +++ b/cluster-autoscaler/core/scaledown/actuation/drain.go @@ -85,14 +85,11 @@ func (e Evictor) DrainNode(ctx *acontext.AutoscalingContext, nodeInfo *framework return e.drainNodeWithPodsBasedOnPodPriority(ctx, node, pods, dsPods) } -// EvictDaemonSetPods groups daemonSet pods in the node in to priority groups and, evicts daemonSet pods in the ascending order of priorities. -// If priority evictor is not enable, eviction of daemonSet pods is the best effort. +// EvictDaemonSetPods creates eviction objects for all DaemonSet pods on the node. +// Eviction of DaemonSet pods are best effort. Does not wait for evictions to finish. func (e Evictor) EvictDaemonSetPods(ctx *acontext.AutoscalingContext, nodeInfo *framework.NodeInfo) (map[string]status.PodEvictionResult, error) { node := nodeInfo.Node() dsPods, _ := podsToEvict(nodeInfo, ctx.DaemonSetEvictionForEmptyNodes) - if e.fullDsEviction { - return e.drainNodeWithPodsBasedOnPodPriority(ctx, node, dsPods, nil) - } return e.drainNodeWithPodsBasedOnPodPriority(ctx, node, nil, dsPods) } diff --git a/cluster-autoscaler/core/scaledown/actuation/drain_test.go b/cluster-autoscaler/core/scaledown/actuation/drain_test.go index 3baf252d1dd3..82a4613866a7 100644 --- a/cluster-autoscaler/core/scaledown/actuation/drain_test.go +++ b/cluster-autoscaler/core/scaledown/actuation/drain_test.go @@ -42,7 +42,6 @@ import ( . "k8s.io/autoscaler/cluster-autoscaler/utils/test" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" - kubelet_config "k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -57,7 +56,6 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) { evictByDefault bool extraAnnotationValue map[string]string expectNotEvicted map[string]struct{} - fullDsEviction bool podPriorities []int32 }{ { @@ -84,57 +82,6 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) { extraAnnotationValue: map[string]string{"d1": "false"}, expectNotEvicted: map[string]struct{}{"d1": {}}, }, - { - name: "Failed to create DaemonSet eviction", - dsPods: []string{"d1", "d2"}, - dsEvictionTimeout: 5000 * time.Millisecond, - evictionSuccess: false, - err: fmt.Errorf("Failed to drain node /n1, due to following errors"), - evictByDefault: true, - fullDsEviction: true, - podPriorities: []int32{0, 1000}, - }, - { - name: "Eviction timeout exceed", - dsPods: []string{"d1", "d2", "d3"}, - evictionTimeoutExceed: true, - dsEvictionTimeout: 100 * time.Millisecond, - evictionSuccess: false, - err: fmt.Errorf("Failed to drain node /n1, due to following errors"), - evictByDefault: true, - fullDsEviction: true, - podPriorities: []int32{0, 1000, 2000}, - }, - { - name: "Successful attempt to evict DaemonSet pods", - dsPods: []string{"d1", "d2"}, - dsEvictionTimeout: 5000 * time.Millisecond, - evictionSuccess: true, - evictByDefault: true, - fullDsEviction: true, - podPriorities: []int32{0, 1000}, - }, - { - name: "Evict single pod due to annotation", - dsPods: []string{"d1", "d2"}, - dsEvictionTimeout: 5000 * time.Millisecond, - evictionSuccess: true, - extraAnnotationValue: map[string]string{"d1": "true"}, - expectNotEvicted: map[string]struct{}{"d2": {}}, - fullDsEviction: true, - podPriorities: []int32{0, 1000}, - }, - { - name: "Don't evict single pod due to annotation", - dsPods: []string{"d1", "d2"}, - dsEvictionTimeout: 5000 * time.Millisecond, - evictionSuccess: true, - evictByDefault: true, - extraAnnotationValue: map[string]string{"d1": "false"}, - expectNotEvicted: map[string]struct{}{"d1": {}}, - fullDsEviction: true, - podPriorities: []int32{0, 1000}, - }, } for _, scenario := range testScenarios { @@ -160,9 +107,6 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) { for i, dsName := range scenario.dsPods { ds := BuildTestPod(dsName, 100, 0, WithDSController()) ds.Spec.NodeName = "n1" - if scenario.fullDsEviction { - ds.Spec.Priority = &scenario.podPriorities[i] - } if v, ok := scenario.extraAnnotationValue[dsName]; ok { ds.Annotations[daemonset.EnableDsEvictionKey] = v } @@ -198,19 +142,9 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) { clustersnapshot.InitializeClusterSnapshotOrDie(t, context.ClusterSnapshot, []*apiv1.Node{n1}, dsPods) drainConfig := SingleRuleDrainConfig(context.MaxGracefulTerminationSec) - if scenario.fullDsEviction { - drainConfig = []kubelet_config.ShutdownGracePeriodByPodPriority{} - for _, priority := range scenario.podPriorities { - drainConfig = append(drainConfig, kubelet_config.ShutdownGracePeriodByPodPriority{ - Priority: priority, - ShutdownGracePeriodSeconds: int64(context.MaxGracefulTerminationSec), - }) - } - } evictor := Evictor{ EvictionRetryTime: waitBetweenRetries, shutdownGracePeriodByPodPriority: drainConfig, - fullDsEviction: scenario.fullDsEviction, } nodeInfo, err := context.ClusterSnapshot.NodeInfos().Get(n1.Name) assert.NoError(t, err) @@ -232,11 +166,7 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) { for i := 0; i < len(expectEvicted); i++ { deleted[i] = utils.GetStringFromChan(deletedPods) } - if scenario.fullDsEviction { - assert.Equal(t, expectEvicted, deleted) - } else { - assert.ElementsMatch(t, deleted, expectEvicted) - } + assert.ElementsMatch(t, deleted, expectEvicted) }) } }