Skip to content

Commit

Permalink
Add ClusterStateRegistry to the AutoscalingContext.
Browse files Browse the repository at this point in the history
Due to the dependency of the MaxNodeProvisionTimeProvider on the context
the provider was extracted to a dedicated package and injected to the
ClusterStateRegistry after context creation.
  • Loading branch information
kisieland committed Jun 29, 2023
1 parent 8f83f7e commit c29c920
Show file tree
Hide file tree
Showing 11 changed files with 170 additions and 100 deletions.
8 changes: 6 additions & 2 deletions cluster-autoscaler/clusterstate/clusterstate.go
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ type ClusterStateRegistry struct {
}

// NewClusterStateRegistry creates new ClusterStateRegistry.
func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config ClusterStateRegistryConfig, logRecorder *utils.LogEventRecorder, backoff backoff.Backoff, maxNodeProvisionTimeProvider maxNodeProvisionTimeProvider) *ClusterStateRegistry {
func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config ClusterStateRegistryConfig, logRecorder *utils.LogEventRecorder, backoff backoff.Backoff) *ClusterStateRegistry {
emptyStatus := &api.ClusterAutoscalerStatus{
ClusterwideConditions: make([]api.ClusterAutoscalerCondition, 0),
NodeGroupStatuses: make([]api.NodeGroupStatus, 0),
Expand All @@ -167,7 +167,6 @@ func NewClusterStateRegistry(cloudProvider cloudprovider.CloudProvider, config C
cloudProviderNodeInstancesCache: utils.NewCloudProviderNodeInstancesCache(cloudProvider),
interrupt: make(chan struct{}),
scaleUpFailures: make(map[string][]ScaleUpFailure),
maxNodeProvisionTimeProvider: maxNodeProvisionTimeProvider,
}
}

Expand All @@ -193,6 +192,11 @@ func (csr *ClusterStateRegistry) RegisterOrUpdateScaleUp(nodeGroup cloudprovider
csr.registerOrUpdateScaleUpNoLock(nodeGroup, delta, currentTime)
}

// RegisterProviders registers providers in the cluster state registry.
func (csr *ClusterStateRegistry) RegisterProviders(maxNodeProvisionTimeProvider maxNodeProvisionTimeProvider) {
csr.maxNodeProvisionTimeProvider = maxNodeProvisionTimeProvider
}

// MaxNodeProvisionTime returns MaxNodeProvisionTime value that should be used for the given NodeGroup.
func (csr *ClusterStateRegistry) MaxNodeProvisionTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
return csr.maxNodeProvisionTimeProvider.GetMaxNodeProvisionTime(nodeGroup)
Expand Down
76 changes: 40 additions & 36 deletions cluster-autoscaler/clusterstate/clusterstate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ func TestOKWithScaleUp(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(time.Minute))
clusterstate.RegisterOrUpdateScaleUp(provider.GetNodeGroup("ng1"), 4, time.Now())
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
assert.NoError(t, err)
Expand Down Expand Up @@ -114,8 +114,8 @@ func TestEmptyOK(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(time.Minute))
err := clusterstate.UpdateNodes([]*apiv1.Node{}, nil, now.Add(-5*time.Second))
assert.NoError(t, err)
assert.True(t, clusterstate.IsClusterHealthy())
Expand Down Expand Up @@ -155,7 +155,8 @@ func TestOKOneUnreadyNode(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(), NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
assert.NoError(t, err)
assert.True(t, clusterstate.IsClusterHealthy())
Expand Down Expand Up @@ -193,8 +194,8 @@ func TestNodeWithoutNodeGroupDontCrash(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
err := clusterstate.UpdateNodes([]*apiv1.Node{noNgNode}, nil, now)
assert.NoError(t, err)
assert.Empty(t, clusterstate.GetScaleUpFailures())
Expand All @@ -221,8 +222,8 @@ func TestOKOneUnreadyNodeWithScaleDownCandidate(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
clusterstate.UpdateScaleDownCandidates([]*apiv1.Node{ng1_1}, now)

Expand Down Expand Up @@ -287,8 +288,8 @@ func TestMissingNodes(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
assert.NoError(t, err)
assert.True(t, clusterstate.IsClusterHealthy())
Expand Down Expand Up @@ -330,8 +331,8 @@ func TestTooManyUnready(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
assert.NoError(t, err)
assert.False(t, clusterstate.IsClusterHealthy())
Expand Down Expand Up @@ -360,8 +361,8 @@ func TestUnreadyLongAfterCreation(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
assert.NoError(t, err)
assert.Equal(t, 1, len(clusterstate.GetClusterReadiness().Unready))
Expand Down Expand Up @@ -393,8 +394,8 @@ func TestNotStarted(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1}, nil, now)
assert.NoError(t, err)
assert.Equal(t, 1, len(clusterstate.GetClusterReadiness().NotStarted))
Expand Down Expand Up @@ -431,7 +432,8 @@ func TestExpiredScaleUp(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(), NewStaticMaxNodeProvisionTimeProvider(2*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(2 * time.Minute))
clusterstate.RegisterOrUpdateScaleUp(provider.GetNodeGroup("ng1"), 4, now.Add(-3*time.Minute))
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, nil, now)
assert.NoError(t, err)
Expand All @@ -456,8 +458,8 @@ func TestRegisterScaleDown(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))

now := time.Now()

Expand Down Expand Up @@ -526,8 +528,8 @@ func TestUpcomingNodes(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1, ng3_1, ng4_1, ng5_1, ng5_2}, nil, now)
assert.NoError(t, err)
assert.Empty(t, clusterstate.GetScaleUpFailures())
Expand Down Expand Up @@ -574,8 +576,8 @@ func TestTaintBasedNodeDeletion(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_2}, nil, now)
assert.NoError(t, err)
assert.Empty(t, clusterstate.GetScaleUpFailures())
Expand All @@ -596,8 +598,8 @@ func TestIncorrectSize(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
now := time.Now()
clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, nil, now.Add(-5*time.Minute))
incorrect := clusterstate.incorrectNodeGroupSizes["ng1"]
Expand Down Expand Up @@ -633,8 +635,8 @@ func TestUnregisteredNodes(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(10*time.Second))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(10 * time.Second))
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1}, nil, time.Now().Add(-time.Minute))

assert.NoError(t, err)
Expand Down Expand Up @@ -683,8 +685,8 @@ func TestCloudProviderDeletedNodes(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(10*time.Second))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(10 * time.Second))
now.Add(time.Minute)
err := clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng1_2, noNgNode}, nil, now)

Expand Down Expand Up @@ -885,8 +887,8 @@ func TestScaleUpBackoff(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(120*time.Second))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(120 * time.Second))

// After failed scale-up, node group should be still healthy, but should backoff from scale-ups
clusterstate.RegisterOrUpdateScaleUp(provider.GetNodeGroup("ng1"), 1, now.Add(-180*time.Second))
Expand Down Expand Up @@ -953,8 +955,8 @@ func TestGetClusterSize(t *testing.T) {
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: 10,
OkTotalUnreadyCount: 1,
}, fakeLogRecorder, newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))

// There are 2 actual nodes in 2 node groups with target sizes of 5 and 1.
clusterstate.UpdateNodes([]*apiv1.Node{ng1_1, ng2_1, notAutoscaledNode}, nil, now)
Expand Down Expand Up @@ -1001,7 +1003,8 @@ func TestUpdateScaleUp(t *testing.T) {
},
fakeLogRecorder,
newBackoff(),
NewStaticMaxNodeProvisionTimeProvider(10*time.Second))
)
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(10 * time.Second))

clusterstate.RegisterOrUpdateScaleUp(provider.GetNodeGroup("ng1"), 100, now)
assert.Equal(t, clusterstate.scaleUpRequests["ng1"].Increase, 100)
Expand Down Expand Up @@ -1039,7 +1042,8 @@ func TestScaleUpFailures(t *testing.T) {

fakeClient := &fake.Clientset{}
fakeLogRecorder, _ := utils.NewStatusMapRecorder(fakeClient, "kube-system", kube_record.NewFakeRecorder(5), false, "my-cool-configmap")
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{}, fakeLogRecorder, newBackoff(), NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
clusterstate := NewClusterStateRegistry(provider, ClusterStateRegistryConfig{}, fakeLogRecorder, newBackoff())
clusterstate.RegisterProviders(NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))

clusterstate.RegisterFailedScaleUp(provider.GetNodeGroup("ng1"), metrics.Timeout, now)
clusterstate.RegisterFailedScaleUp(provider.GetNodeGroup("ng2"), metrics.Timeout, now)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

package clusterstate
package providers

import (
"time"
Expand All @@ -25,7 +25,7 @@ import (
)

// NewDefaultMaxNodeProvisionTimeProvider returns the default maxNodeProvisionTimeProvider which uses the NodeGroupConfigProcessor.
func NewDefaultMaxNodeProvisionTimeProvider(context *context.AutoscalingContext, nodeGroupConfigProcessor nodegroupconfig.NodeGroupConfigProcessor) maxNodeProvisionTimeProvider {
func NewDefaultMaxNodeProvisionTimeProvider(context *context.AutoscalingContext, nodeGroupConfigProcessor nodegroupconfig.NodeGroupConfigProcessor) *defultMaxNodeProvisionTimeProvider {
return &defultMaxNodeProvisionTimeProvider{context: context, nodeGroupConfigProcessor: nodeGroupConfigProcessor}
}

Expand All @@ -38,17 +38,3 @@ type defultMaxNodeProvisionTimeProvider struct {
func (p *defultMaxNodeProvisionTimeProvider) GetMaxNodeProvisionTime(nodeGroup cloudprovider.NodeGroup) (time.Duration, error) {
return p.nodeGroupConfigProcessor.GetMaxNodeProvisionTime(p.context, nodeGroup)
}

// NewStaticMaxNodeProvisionTimeProvider returns static maxNodeProvisionTimeProvider which returns constant MaxNodeProvisionTime for every NodeGroup. Can be used for convenient testing.
func NewStaticMaxNodeProvisionTimeProvider(maxNodeProvisionTime time.Duration) maxNodeProvisionTimeProvider {
return &staticMaxNodeProvisionTimeProvider{maxNodeProvisionTime}
}

type staticMaxNodeProvisionTimeProvider struct {
staticMaxNodeProvisionTime time.Duration
}

// GetMaxNodeProvisionTime returns constant MaxNodeProvisionTime value that should be used for every NodeGroup.
func (p *staticMaxNodeProvisionTimeProvider) GetMaxNodeProvisionTime(cloudprovider.NodeGroup) (time.Duration, error) {
return p.staticMaxNodeProvisionTime, nil
}
37 changes: 37 additions & 0 deletions cluster-autoscaler/clusterstate/testutils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package clusterstate

import (
"time"

"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
)

// NewMockMaxNodeProvisionTimeProvider returns static maxNodeProvisionTimeProvider which returns constant MaxNodeProvisionTime for every NodeGroup.
func NewMockMaxNodeProvisionTimeProvider(maxNodeProvisionTime time.Duration) *staticMockMaxNodeProvisionTimeProvider {
return &staticMockMaxNodeProvisionTimeProvider{maxNodeProvisionTime}
}

type staticMockMaxNodeProvisionTimeProvider struct {
staticMaxNodeProvisionTime time.Duration
}

// GetMaxNodeProvisionTime returns constant MaxNodeProvisionTime value that should be used for every NodeGroup.
func (p *staticMockMaxNodeProvisionTimeProvider) GetMaxNodeProvisionTime(cloudprovider.NodeGroup) (time.Duration, error) {
return p.staticMaxNodeProvisionTime, nil
}
8 changes: 7 additions & 1 deletion cluster-autoscaler/context/autoscaling_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package context

import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown"
Expand Down Expand Up @@ -60,6 +61,8 @@ type AutoscalingContext struct {
ScaleDownActuator scaledown.Actuator
// RemainingPdbTracker tracks the remaining pod disruption budget
RemainingPdbTracker pdb.RemainingPdbTracker
// ClusterStateRegistry tracks the health of the node groups and pending scale-ups and scale-downs
ClusterStateRegistry *clusterstate.ClusterStateRegistry
}

// AutoscalingKubeClients contains all Kubernetes API clients,
Expand Down Expand Up @@ -105,7 +108,9 @@ func NewAutoscalingContext(
estimatorBuilder estimator.EstimatorBuilder,
processorCallbacks processor_callbacks.ProcessorCallbacks,
debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter,
remainingPdbTracker pdb.RemainingPdbTracker) *AutoscalingContext {
remainingPdbTracker pdb.RemainingPdbTracker,
clusterStateRegistry *clusterstate.ClusterStateRegistry,
) *AutoscalingContext {
return &AutoscalingContext{
AutoscalingOptions: options,
CloudProvider: cloudProvider,
Expand All @@ -117,6 +122,7 @@ func NewAutoscalingContext(
ProcessorCallbacks: processorCallbacks,
DebuggingSnapshotter: debuggingSnapshotter,
RemainingPdbTracker: remainingPdbTracker,
ClusterStateRegistry: clusterStateRegistry,
}
}

Expand Down
6 changes: 4 additions & 2 deletions cluster-autoscaler/core/scaledown/actuation/actuator_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -829,7 +829,8 @@ func TestStartDeletion(t *testing.T) {
if err != nil {
t.Fatalf("Couldn't set up autoscaling context: %v", err)
}
csr := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), clusterstate.NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
csr := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff())
csr.RegisterProviders(clusterstate.NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
for _, node := range tc.emptyNodes {
err := ctx.ClusterSnapshot.AddNodeWithPods(node, tc.pods[node.Name])
if err != nil {
Expand Down Expand Up @@ -1078,7 +1079,8 @@ func TestStartDeletionInBatchBasic(t *testing.T) {
if err != nil {
t.Fatalf("Couldn't set up autoscaling context: %v", err)
}
csr := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff(), clusterstate.NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
csr := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, ctx.LogRecorder, NewBackoff())
csr.RegisterProviders(clusterstate.NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
ndt := deletiontracker.NewNodeDeletionTracker(0)
actuator := Actuator{
ctx: &ctx, clusterState: csr, nodeDeletionTracker: ndt,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,8 @@ func TestRemove(t *testing.T) {
})

ctx, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, fakeClient, nil, provider, nil, nil)
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder, NewBackoff(), clusterstate.NewStaticMaxNodeProvisionTimeProvider(15*time.Minute))
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, fakeLogRecorder, NewBackoff())
clusterStateRegistry.RegisterProviders(clusterstate.NewMockMaxNodeProvisionTimeProvider(15 * time.Minute))
if err != nil {
t.Fatalf("Couldn't set up autoscaling context: %v", err)
}
Expand Down
Loading

0 comments on commit c29c920

Please sign in to comment.