Skip to content

Commit

Permalink
Move to table-based tests.
Browse files Browse the repository at this point in the history
  • Loading branch information
fische committed Jan 22, 2024
1 parent a47ef89 commit e8e3ad0
Showing 1 changed file with 65 additions and 88 deletions.
153 changes: 65 additions & 88 deletions cluster-autoscaler/core/static_autoscaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1123,15 +1123,6 @@ func TestStaticAutoscalerRunOnceWithFilteringOnUpcomingNodesEnabledNoScaleUp(t *

// We should not touch taints from unselected node groups.
func TestStaticAutoscalerRunOnceWithUnselectedNodeGroups(t *testing.T) {
readyNodeLister := kubernetes.NewTestNodeLister(nil)
allNodeLister := kubernetes.NewTestNodeLister(nil)
allPodListerMock := &podListerMock{}
podDisruptionBudgetListerMock := &podDisruptionBudgetListerMock{}
daemonSetListerMock := &daemonSetListerMock{}
onScaleUpMock := &onScaleUpMock{}
onScaleDownMock := &onScaleDownMock{}
deleteFinished := make(chan bool, 1)

n1 := BuildTestNode("n1", 1000, 1000)
n1.Spec.Taints = append(n1.Spec.Taints, apiv1.Taint{
Key: taints.DeletionCandidateTaint,
Expand All @@ -1150,98 +1141,84 @@ func TestStaticAutoscalerRunOnceWithUnselectedNodeGroups(t *testing.T) {
p1 := BuildTestPod("p1", 600, 100)
p1.Spec.NodeName = n1.Name

provider := testprovider.NewTestAutoprovisioningCloudProvider(
func(id string, delta int) error {
return onScaleUpMock.ScaleUp(id, delta)
}, func(id string, name string) error {
ret := onScaleDownMock.ScaleDown(id, name)
deleteFinished <- true
return ret
},
nil, nil, nil, nil)
// set minimal cloud provider where only ng1 is defined as selected node group
provider := testprovider.NewTestCloudProvider(nil, nil)
provider.AddNodeGroup("ng1", 1, 10, 1)
provider.AddNode("ng1", n1)
ng1 := reflect.ValueOf(provider.GetNodeGroup("ng1")).Interface().(*testprovider.TestNodeGroup)
assert.NotNil(t, ng1)
assert.NotNil(t, provider)

// Create context with mocked lister registry.
options := config.AutoscalingOptions{
NodeGroupDefaults: config.NodeGroupAutoscalingOptions{
ScaleDownUnneededTime: time.Minute,
ScaleDownUnreadyTime: time.Minute,
ScaleDownUtilizationThreshold: 0.5,
MaxNodeProvisionTime: 10 * time.Second,
tests := map[string]struct {
node *apiv1.Node
pods []*apiv1.Pod
expectedTaints []apiv1.Taint
}{
"Node from selected node groups can get their deletion candidate taints removed": {
node: n1,
pods: []*apiv1.Pod{p1},
expectedTaints: []apiv1.Taint{},
},
"Node from non-selected node groups should keep their deletion candidate taints": {
node: n2,
pods: nil,
expectedTaints: n2.Spec.Taints,
},
EstimatorName: estimator.BinpackingEstimatorName,
EnforceNodeGroupMinSize: true,
ScaleDownEnabled: true,
MaxNodesTotal: 1,
MaxCoresTotal: 10,
MaxMemoryTotal: 100000,
}
processorCallbacks := newStaticAutoscalerProcessorCallbacks()

clientset := fake.NewSimpleClientset(n1, n2)
context, err := NewScaleTestAutoscalingContext(options, clientset, nil, provider, processorCallbacks, nil)
assert.NoError(t, err)
for name, test := range tests {
// prevent issues with scoping, we should be able to get rid of that with Go 1.22
test := test
t.Run(name, func(t *testing.T) {
t.Parallel()
// Create fake listers for the generated nodes, nothing returned by the rest (but the ones used in the tested path have to be defined).
readyNodeLister := kubernetes.NewTestNodeLister([]*apiv1.Node{test.node})
allNodeLister := kubernetes.NewTestNodeLister([]*apiv1.Node{test.node})
allPodListerMock := kubernetes.NewTestPodLister(test.pods)
daemonSetLister, err := kubernetes.NewTestDaemonSetLister(nil)
assert.NoError(t, err)
listerRegistry := kube_util.NewListerRegistry(allNodeLister, readyNodeLister, allPodListerMock,
kubernetes.NewTestPodDisruptionBudgetLister(nil), daemonSetLister,
nil, nil, nil, nil)

// Create context with minimal autoscalingOptions that guarantee we reach the tested logic.
autoscalingOptions := config.AutoscalingOptions{
ScaleDownEnabled: true,
MaxBulkSoftTaintCount: 10,
MaxBulkSoftTaintTime: 3 * time.Second,
}
processorCallbacks := newStaticAutoscalerProcessorCallbacks()
clientset := fake.NewSimpleClientset(test.node)
context, err := NewScaleTestAutoscalingContext(autoscalingOptions, clientset, listerRegistry, provider, processorCallbacks, nil)
assert.NoError(t, err)

setUpScaleDownActuator(&context, options)
// Create CSR with unhealthy cluster protection effectively disabled, to guarantee we reach the tested logic.
clusterStateConfig := clusterstate.ClusterStateRegistryConfig{
OkTotalUnreadyCount: 1,
}
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(autoscalingOptions.NodeGroupDefaults))

listerRegistry := kube_util.NewListerRegistry(allNodeLister, readyNodeLister, allPodListerMock, podDisruptionBudgetListerMock, daemonSetListerMock,
nil, nil, nil, nil)
context.ListerRegistry = listerRegistry
// Setting the Actuator is necessary for testing any scale-down logic, it shouldn't have anything to do in this test.
sdActuator := actuation.NewActuator(&context, clusterState, deletiontracker.NewNodeDeletionTracker(0*time.Second), options.NodeDeleteOptions{}, nil, NewTestProcessors(&context).NodeGroupConfigProcessor)
context.ScaleDownActuator = sdActuator

clusterStateConfig := clusterstate.ClusterStateRegistryConfig{
OkTotalUnreadyCount: 1,
}
processors := NewTestProcessors(&context)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, NewBackoff(), nodegroupconfig.NewDefaultNodeGroupConfigProcessor(options.NodeGroupDefaults))
sdPlanner, sdActuator := newScaleDownPlannerAndActuator(&context, processors, clusterState)
suOrchestrator := orchestrator.New()
suOrchestrator.Initialize(&context, processors, clusterState, taints.TaintConfig{})
// Fake planner that keeps track of the scale-down candidates passed to UpdateClusterState.
sdPlanner := &candidateTrackingFakePlanner{}

context.AutoscalingOptions.MaxBulkSoftTaintCount = 10
context.AutoscalingOptions.MaxBulkSoftTaintTime = 3 * time.Second
autoscaler := &StaticAutoscaler{
AutoscalingContext: &context,
clusterStateRegistry: clusterState,
scaleDownPlanner: sdPlanner,
scaleDownActuator: sdActuator,
processors: NewTestProcessors(&context),
processorCallbacks: processorCallbacks,
}

autoscaler := &StaticAutoscaler{
AutoscalingContext: &context,
clusterStateRegistry: clusterState,
lastScaleUpTime: time.Now(),
lastScaleDownFailTime: time.Now(),
scaleDownPlanner: sdPlanner,
scaleDownActuator: sdActuator,
scaleUpOrchestrator: suOrchestrator,
processors: processors,
processorCallbacks: processorCallbacks,
initialized: true,
err = autoscaler.RunOnce(time.Now().Add(5 * time.Hour))
assert.NoError(t, err)
newNode, err := clientset.CoreV1().Nodes().Get(stdcontext.TODO(), test.node.Name, metav1.GetOptions{})
assert.NoError(t, err)
assert.Equal(t, test.expectedTaints, newNode.Spec.Taints)
})
}

// Node from selected node groups can get their deletion candidate taints removed.
readyNodeLister.SetNodes([]*apiv1.Node{n1})
allNodeLister.SetNodes([]*apiv1.Node{n1})
allPodListerMock.On("List").Return([]*apiv1.Pod{p1}, nil).Twice()
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil)
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil)

err = autoscaler.RunOnce(time.Now().Add(5 * time.Hour))
assert.NoError(t, err)
newN1, err := clientset.CoreV1().Nodes().Get(stdcontext.TODO(), n1.Name, metav1.GetOptions{})
assert.NoError(t, err)
assert.Empty(t, newN1.Spec.Taints)

// Node from non-selected node groups should keep their deletion candidate taints.
readyNodeLister.SetNodes([]*apiv1.Node{n2})
allNodeLister.SetNodes([]*apiv1.Node{n2})
allPodListerMock.On("List").Return([]*apiv1.Pod{}, nil).Twice()
daemonSetListerMock.On("List", labels.Everything()).Return([]*appsv1.DaemonSet{}, nil)
podDisruptionBudgetListerMock.On("List").Return([]*policyv1.PodDisruptionBudget{}, nil)

err = autoscaler.RunOnce(time.Now().Add(5 * time.Hour))
assert.NoError(t, err)
newN2, err := clientset.CoreV1().Nodes().Get(stdcontext.TODO(), n2.Name, metav1.GetOptions{})
assert.NoError(t, err)
assert.NotEmpty(t, newN2.Spec.Taints)
}

func TestStaticAutoscalerRunOnceWithBypassedSchedulers(t *testing.T) {
Expand Down

0 comments on commit e8e3ad0

Please sign in to comment.