Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: Implement DRA support in Cluster Autoscaler #7350

Closed
wants to merge 21 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
4992123
DRA: extract interacting with the scheduler framework out of Predicat…
towca Sep 26, 2024
2d55ff2
DRA: introduce internal NodeInfo/PodInfo with DRA objects attached
towca Sep 27, 2024
7c1f8d5
DRA: migrate all of CA to use the new internal NodeInfo/PodInfo
towca Sep 27, 2024
dfd0234
DRA: remove AddNodeWithPods from ClusterSnapshot, replace uses with A…
towca Sep 30, 2024
fafb78a
DRA: add Initialize to ClusterSnapshot, remove AddNodes
towca Sep 30, 2024
c249f46
DRA: remove redundant IsPVCUsedByPods from ClusterSnapshot
towca Sep 30, 2024
f876a51
DRA: remove AddNode from ClusterSnapshot
towca Sep 30, 2024
bb87555
DRA: refactor utils related to NodeInfos
towca Sep 30, 2024
fad6868
DRA: propagate schedulerframework handle and DRA feature flag to Clus…
towca Sep 30, 2024
26e4787
DRA: Implement a Snapshot of DRA objects, its Provider, and utils
towca Sep 26, 2024
9e32e07
DRA: grab a snapshot of DRA objects and plumb to ClusterSnapshot.Init…
towca Sep 30, 2024
006685c
DRA: propagate DRA objects through NodeInfos in node_info utils
towca Sep 30, 2024
c5edd3b
DRA: rename ClusterSnapshot methods to better reflect their purpose
towca Oct 1, 2024
bdef0a7
DRA: extend ClusterSnapshot.SchedulePod, propagate scheduling state f…
towca Oct 1, 2024
0e055c4
DRA: plumb the DRA snapshot into scheduler framework through ClusterS…
towca Oct 1, 2024
0a11e9c
DRA: implement calculating utilization for DRA resources
towca Oct 1, 2024
ef9d420
DRA: integrate BasicClusterSnapshot with the DRA snapshot
towca Oct 1, 2024
38fb034
DRA: add integration tests
towca Sep 26, 2024
7e70b41
DRA: handle expendable pods using DRA
towca Oct 3, 2024
3544bb4
DRA: handle duplicating unschedulable pods using DRA
towca Oct 4, 2024
2e7eeea
DRA TMP: vendor in the required scheduler framework channges
towca Oct 7, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions cluster-autoscaler/core/autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/pdb"
"k8s.io/autoscaler/cluster-autoscaler/core/scaleup"
"k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot"
"k8s.io/autoscaler/cluster-autoscaler/dynamicresources"
"k8s.io/autoscaler/cluster-autoscaler/estimator"
"k8s.io/autoscaler/cluster-autoscaler/expander"
"k8s.io/autoscaler/cluster-autoscaler/expander/factory"
Expand Down Expand Up @@ -63,6 +64,7 @@ type AutoscalerOptions struct {
ScaleUpOrchestrator scaleup.Orchestrator
DeleteOptions options.NodeDeleteOptions
DrainabilityRules rules.Rules
DraProvider *dynamicresources.Provider
}

// Autoscaler is the main component of CA which scales up/down node groups according to its configuration
Expand Down Expand Up @@ -103,6 +105,7 @@ func NewAutoscaler(opts AutoscalerOptions, informerFactory informers.SharedInfor
opts.ScaleUpOrchestrator,
opts.DeleteOptions,
opts.DrainabilityRules,
opts.DraProvider,
), nil
}

Expand Down Expand Up @@ -169,6 +172,9 @@ func initializeDefaultOptions(opts *AutoscalerOptions, informerFactory informers
if opts.DrainabilityRules == nil {
opts.DrainabilityRules = rules.Default(opts.DeleteOptions)
}
if opts.EnableDynamicResources && opts.DraProvider == nil {
opts.DraProvider = dynamicresources.NewProviderFromInformers(informerFactory)
}

return nil
}
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/dynamicresources"
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
Expand Down Expand Up @@ -110,7 +111,7 @@ func TestFilterOutExpendable(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
processor := NewFilterOutExpendablePodListProcessor()
snapshot := clustersnapshot.NewBasicClusterSnapshot(framework.TestFrameworkHandleOrDie(t), true)
err := snapshot.Initialize(tc.nodes, nil)
err := snapshot.Initialize(tc.nodes, nil, dynamicresources.Snapshot{})
assert.NoError(t, err)

pods, err := processor.Process(&context.AutoscalingContext{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (

"github.com/stretchr/testify/assert"
apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/dynamicresources"
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
"k8s.io/autoscaler/cluster-autoscaler/simulator/predicatechecker"
Expand Down Expand Up @@ -284,7 +285,7 @@ func BenchmarkFilterOutSchedulable(b *testing.B) {
predicateChecker := predicatechecker.NewSchedulerBasedPredicateChecker(fwHandle)

clusterSnapshot := snapshotFactory(fwHandle)
if err := clusterSnapshot.Initialize(nodes, scheduledPods); err != nil {
if err := clusterSnapshot.Initialize(nodes, scheduledPods, dynamicresources.Snapshot{}); err != nil {
assert.NoError(b, err)
}

Expand Down
18 changes: 16 additions & 2 deletions cluster-autoscaler/core/scaledown/actuation/actuator.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/pdb"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/status"
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
"k8s.io/autoscaler/cluster-autoscaler/dynamicresources"
"k8s.io/autoscaler/cluster-autoscaler/metrics"
"k8s.io/autoscaler/cluster-autoscaler/observers/nodegroupchange"
"k8s.io/autoscaler/cluster-autoscaler/simulator"
Expand Down Expand Up @@ -61,6 +62,7 @@ type Actuator struct {
configGetter actuatorNodeGroupConfigGetter
nodeDeleteDelayAfterTaint time.Duration
pastLatencies *expiring.List
draProvider *dynamicresources.Provider
}

// actuatorNodeGroupConfigGetter is an interface to limit the functions that can be used
Expand All @@ -71,7 +73,7 @@ type actuatorNodeGroupConfigGetter interface {
}

// NewActuator returns a new instance of Actuator.
func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, ndt *deletiontracker.NodeDeletionTracker, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, configGetter actuatorNodeGroupConfigGetter) *Actuator {
func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupchange.NodeGroupChangeObserver, ndt *deletiontracker.NodeDeletionTracker, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, configGetter actuatorNodeGroupConfigGetter, draProvider *dynamicresources.Provider) *Actuator {
ndb := NewNodeDeletionBatcher(ctx, scaleStateNotifier, ndt, ctx.NodeDeletionBatcherInterval)
legacyFlagDrainConfig := SingleRuleDrainConfig(ctx.MaxGracefulTerminationSec)
var evictor Evictor
Expand All @@ -90,6 +92,7 @@ func NewActuator(ctx *context.AutoscalingContext, scaleStateNotifier nodegroupch
configGetter: configGetter,
nodeDeleteDelayAfterTaint: ctx.NodeDeleteDelayAfterTaint,
pastLatencies: expiring.NewList(),
draProvider: draProvider,
}
}

Expand Down Expand Up @@ -365,7 +368,18 @@ func (a *Actuator) createSnapshot(nodes []*apiv1.Node) (clustersnapshot.ClusterS
scheduledPods := kube_util.ScheduledPods(pods)
nonExpendableScheduledPods := utils.FilterOutExpendablePods(scheduledPods, a.ctx.ExpendablePodsPriorityCutoff)

err = snapshot.Initialize(nodes, nonExpendableScheduledPods)
draSnapshot := dynamicresources.Snapshot{}
if a.ctx.EnableDynamicResources {
// Grab a live snapshot of DRA objects.
draSnap, err := a.draProvider.Snapshot()
if err != nil {
klog.Warningf("Couldn't retrieve DRA objects, this probably means that DRA is misconfigured in the cluster. Scaling involving DRA pods won't work, proceeding. Error: %v", err)
} else {
draSnapshot = draSnap
}
}

err = snapshot.Initialize(nodes, nonExpendableScheduledPods, draSnapshot)
if err != nil {
return nil, err
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"testing"
"time"

"k8s.io/autoscaler/cluster-autoscaler/dynamicresources"
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroupconfig"
"k8s.io/autoscaler/cluster-autoscaler/processors/nodegroups/asyncnodegroups"
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
Expand Down Expand Up @@ -1045,7 +1046,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleUpTestConfig) *ScaleUpTestR
// build orchestrator
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)
err = context.ClusterSnapshot.Initialize(nodes, kube_util.ScheduledPods(pods))
err = context.ClusterSnapshot.Initialize(nodes, kube_util.ScheduledPods(pods), dynamicresources.Snapshot{})
assert.NoError(t, err)
nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
Expand Down Expand Up @@ -1155,7 +1156,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)
err = context.ClusterSnapshot.Initialize(nodes, pods)
err = context.ClusterSnapshot.Initialize(nodes, pods, dynamicresources.Snapshot{})
assert.NoError(t, err)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
Expand Down Expand Up @@ -1198,7 +1199,7 @@ func TestBinpackingLimiter(t *testing.T) {

context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)
err = context.ClusterSnapshot.Initialize(nodes, nil)
err = context.ClusterSnapshot.Initialize(nodes, nil, dynamicresources.Snapshot{})
assert.NoError(t, err)
nodeInfos, err := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).
Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
Expand Down Expand Up @@ -1258,7 +1259,7 @@ func TestScaleUpNoHelp(t *testing.T) {
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)
err = context.ClusterSnapshot.Initialize(nodes, pods)
err = context.ClusterSnapshot.Initialize(nodes, pods, dynamicresources.Snapshot{})
assert.NoError(t, err)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
Expand Down Expand Up @@ -1413,7 +1414,7 @@ func TestComputeSimilarNodeGroups(t *testing.T) {
listers := kube_util.NewListerRegistry(nil, nil, kube_util.NewTestPodLister(nil), nil, nil, nil, nil, nil, nil)
ctx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{BalanceSimilarNodeGroups: tc.balancingEnabled}, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)
err = ctx.ClusterSnapshot.Initialize(nodes, nil)
err = ctx.ClusterSnapshot.Initialize(nodes, nil, dynamicresources.Snapshot{})
assert.NoError(t, err)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
Expand Down Expand Up @@ -1478,7 +1479,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)
err = context.ClusterSnapshot.Initialize(nodes, podList)
err = context.ClusterSnapshot.Initialize(nodes, podList, dynamicresources.Snapshot{})
assert.NoError(t, err)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, now)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), asyncnodegroups.NewDefaultAsyncNodeGroupStateChecker())
Expand Down Expand Up @@ -1655,7 +1656,7 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
assert.NoError(t, err)

nodes := []*apiv1.Node{n1, n2}
err = context.ClusterSnapshot.Initialize(nodes, nil)
err = context.ClusterSnapshot.Initialize(nodes, nil, dynamicresources.Snapshot{})
assert.NoError(t, err)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())
processors := NewTestProcessors(&context)
Expand Down
9 changes: 5 additions & 4 deletions cluster-autoscaler/core/scaleup/resource/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/core/test"
"k8s.io/autoscaler/cluster-autoscaler/dynamicresources"
"k8s.io/autoscaler/cluster-autoscaler/processors/nodeinfosprovider"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
"k8s.io/autoscaler/cluster-autoscaler/utils/taints"
Expand Down Expand Up @@ -69,7 +70,7 @@ func TestDeltaForNode(t *testing.T) {

ng := testCase.nodeGroupConfig
group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
err := ctx.ClusterSnapshot.Initialize(nodes, nil)
err := ctx.ClusterSnapshot.Initialize(nodes, nil, dynamicresources.Snapshot{})
assert.NoError(t, err)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())

Expand Down Expand Up @@ -112,7 +113,7 @@ func TestResourcesLeft(t *testing.T) {

ng := testCase.nodeGroupConfig
_, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
err := ctx.ClusterSnapshot.Initialize(nodes, nil)
err := ctx.ClusterSnapshot.Initialize(nodes, nil, dynamicresources.Snapshot{})
assert.NoError(t, err)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())

Expand Down Expand Up @@ -165,7 +166,7 @@ func TestApplyLimits(t *testing.T) {

ng := testCase.nodeGroupConfig
group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
err := ctx.ClusterSnapshot.Initialize(nodes, nil)
err := ctx.ClusterSnapshot.Initialize(nodes, nil, dynamicresources.Snapshot{})
assert.NoError(t, err)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())

Expand Down Expand Up @@ -232,7 +233,7 @@ func TestResourceManagerWithGpuResource(t *testing.T) {
assert.NoError(t, err)

nodes := []*corev1.Node{n1}
err = context.ClusterSnapshot.Initialize(nodes, nil)
err = context.ClusterSnapshot.Initialize(nodes, nil, dynamicresources.Snapshot{})
assert.NoError(t, err)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, taints.TaintConfig{}, time.Now())

Expand Down
21 changes: 17 additions & 4 deletions cluster-autoscaler/core/static_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (
orchestrator "k8s.io/autoscaler/cluster-autoscaler/core/scaleup/orchestrator"
core_utils "k8s.io/autoscaler/cluster-autoscaler/core/utils"
"k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot"
"k8s.io/autoscaler/cluster-autoscaler/dynamicresources"
"k8s.io/autoscaler/cluster-autoscaler/estimator"
"k8s.io/autoscaler/cluster-autoscaler/expander"
"k8s.io/autoscaler/cluster-autoscaler/metrics"
Expand Down Expand Up @@ -92,6 +93,7 @@ type StaticAutoscaler struct {
processorCallbacks *staticAutoscalerProcessorCallbacks
initialized bool
taintConfig taints.TaintConfig
draProvider *dynamicresources.Provider
}

type staticAutoscalerProcessorCallbacks struct {
Expand Down Expand Up @@ -145,7 +147,8 @@ func NewStaticAutoscaler(
remainingPdbTracker pdb.RemainingPdbTracker,
scaleUpOrchestrator scaleup.Orchestrator,
deleteOptions options.NodeDeleteOptions,
drainabilityRules rules.Rules) *StaticAutoscaler {
drainabilityRules rules.Rules,
draProvider *dynamicresources.Provider) *StaticAutoscaler {

clusterStateConfig := clusterstate.ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: opts.MaxTotalUnreadyPercentage,
Expand Down Expand Up @@ -176,7 +179,7 @@ func NewStaticAutoscaler(
processorCallbacks.scaleDownPlanner = scaleDownPlanner

ndt := deletiontracker.NewNodeDeletionTracker(0 * time.Second)
scaleDownActuator := actuation.NewActuator(autoscalingContext, processors.ScaleStateNotifier, ndt, deleteOptions, drainabilityRules, processors.NodeGroupConfigProcessor)
scaleDownActuator := actuation.NewActuator(autoscalingContext, processors.ScaleStateNotifier, ndt, deleteOptions, drainabilityRules, processors.NodeGroupConfigProcessor, draProvider)
autoscalingContext.ScaleDownActuator = scaleDownActuator

if scaleUpOrchestrator == nil {
Expand All @@ -200,6 +203,7 @@ func NewStaticAutoscaler(
processorCallbacks: processorCallbacks,
clusterStateRegistry: clusterStateRegistry,
taintConfig: taintConfig,
draProvider: draProvider,
}
}

Expand Down Expand Up @@ -244,8 +248,17 @@ func (a *StaticAutoscaler) cleanUpIfRequired() {
}

func (a *StaticAutoscaler) initializeClusterSnapshot(nodes []*apiv1.Node, scheduledPods []*apiv1.Pod) caerrors.AutoscalerError {
a.ClusterSnapshot.Clear()
if err := a.ClusterSnapshot.Initialize(nodes, scheduledPods); err != nil {
var draSnapshot dynamicresources.Snapshot
if a.AutoscalingContext.EnableDynamicResources {
snap, err := a.draProvider.Snapshot()
if err != nil {
klog.Warningf("Couldn't retrieve DRA objects, this probably means that DRA is misconfigured in the cluster. Scaling involving DRA pods won't work, proceeding. Error: %v", err)
} else {
draSnapshot = snap
}
}

if err := a.ClusterSnapshot.Initialize(nodes, scheduledPods, draSnapshot); err != nil {
return caerrors.ToAutoscalerError(caerrors.InternalError, err)
}
return nil
Expand Down
8 changes: 4 additions & 4 deletions cluster-autoscaler/core/static_autoscaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ func (m *onNodeGroupDeleteMock) Delete(id string) error {

func setUpScaleDownActuator(ctx *context.AutoscalingContext, autoscalingOptions config.AutoscalingOptions) {
deleteOptions := options.NewNodeDeleteOptions(autoscalingOptions)
ctx.ScaleDownActuator = actuation.NewActuator(ctx, nil, deletiontracker.NewNodeDeletionTracker(0*time.Second), deleteOptions, rules.Default(deleteOptions), NewTestProcessors(ctx).NodeGroupConfigProcessor)
ctx.ScaleDownActuator = actuation.NewActuator(ctx, nil, deletiontracker.NewNodeDeletionTracker(0*time.Second), deleteOptions, rules.Default(deleteOptions), NewTestProcessors(ctx).NodeGroupConfigProcessor, nil)
}

type nodeGroup struct {
Expand Down Expand Up @@ -1440,7 +1440,7 @@ func TestStaticAutoscalerRunOnceWithUnselectedNodeGroups(t *testing.T) {
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(autoscalingOptions.NodeGroupDefaults), processors.AsyncNodeGroupStateChecker)

// Setting the Actuator is necessary for testing any scale-down logic, it shouldn't have anything to do in this test.
sdActuator := actuation.NewActuator(&context, clusterState, deletiontracker.NewNodeDeletionTracker(0*time.Second), options.NodeDeleteOptions{}, nil, processors.NodeGroupConfigProcessor)
sdActuator := actuation.NewActuator(&context, clusterState, deletiontracker.NewNodeDeletionTracker(0*time.Second), options.NodeDeleteOptions{}, nil, processors.NodeGroupConfigProcessor, nil)
context.ScaleDownActuator = sdActuator

// Fake planner that keeps track of the scale-down candidates passed to UpdateClusterState.
Expand Down Expand Up @@ -2087,7 +2087,7 @@ func TestStaticAutoscalerUpcomingScaleDownCandidates(t *testing.T) {
csr := clusterstate.NewClusterStateRegistry(provider, csrConfig, ctx.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(config.NodeGroupAutoscalingOptions{MaxNodeProvisionTime: 15 * time.Minute}), processors.AsyncNodeGroupStateChecker)

// Setting the Actuator is necessary for testing any scale-down logic, it shouldn't have anything to do in this test.
actuator := actuation.NewActuator(&ctx, csr, deletiontracker.NewNodeDeletionTracker(0*time.Second), options.NodeDeleteOptions{}, nil, NewTestProcessors(&ctx).NodeGroupConfigProcessor)
actuator := actuation.NewActuator(&ctx, csr, deletiontracker.NewNodeDeletionTracker(0*time.Second), options.NodeDeleteOptions{}, nil, NewTestProcessors(&ctx).NodeGroupConfigProcessor, nil)
ctx.ScaleDownActuator = actuator

// Fake planner that keeps track of the scale-down candidates passed to UpdateClusterState.
Expand Down Expand Up @@ -2659,7 +2659,7 @@ func newScaleDownPlannerAndActuator(ctx *context.AutoscalingContext, p *ca_proce
nodeDeletionTracker = deletiontracker.NewNodeDeletionTracker(0 * time.Second)
}
planner := planner.New(ctx, p, deleteOptions, nil)
actuator := actuation.NewActuator(ctx, cs, nodeDeletionTracker, deleteOptions, nil, p.NodeGroupConfigProcessor)
actuator := actuation.NewActuator(ctx, cs, nodeDeletionTracker, deleteOptions, nil, p.NodeGroupConfigProcessor, nil)
return planner, actuator
}

Expand Down
Loading