Skip to content

Commit

Permalink
Adding support for Debugging Snapshot
Browse files Browse the repository at this point in the history
  • Loading branch information
jayantjain93 committed Dec 29, 2021
1 parent 4b8c393 commit a8a6013
Show file tree
Hide file tree
Showing 12 changed files with 595 additions and 50 deletions.
7 changes: 6 additions & 1 deletion cluster-autoscaler/context/autoscaling_context.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate/utils"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot"
"k8s.io/autoscaler/cluster-autoscaler/estimator"
"k8s.io/autoscaler/cluster-autoscaler/expander"
processor_callbacks "k8s.io/autoscaler/cluster-autoscaler/processors/callbacks"
Expand Down Expand Up @@ -50,6 +51,8 @@ type AutoscalingContext struct {
EstimatorBuilder estimator.EstimatorBuilder
// ProcessorCallbacks is interface defining extra callback methods which can be called by processors used in extension points.
ProcessorCallbacks processor_callbacks.ProcessorCallbacks
// DebuggingSnapshotter is the interface for capturing the debugging snapshot
DebuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter
}

// AutoscalingKubeClients contains all Kubernetes API clients,
Expand Down Expand Up @@ -93,7 +96,8 @@ func NewAutoscalingContext(
cloudProvider cloudprovider.CloudProvider,
expanderStrategy expander.Strategy,
estimatorBuilder estimator.EstimatorBuilder,
processorCallbacks processor_callbacks.ProcessorCallbacks) *AutoscalingContext {
processorCallbacks processor_callbacks.ProcessorCallbacks,
debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter) *AutoscalingContext {
return &AutoscalingContext{
AutoscalingOptions: options,
CloudProvider: cloudProvider,
Expand All @@ -103,6 +107,7 @@ func NewAutoscalingContext(
ExpanderStrategy: expanderStrategy,
EstimatorBuilder: estimatorBuilder,
ProcessorCallbacks: processorCallbacks,
DebuggingSnapshotter: debuggingSnapshotter,
}
}

Expand Down
5 changes: 4 additions & 1 deletion cluster-autoscaler/core/autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot"
"k8s.io/autoscaler/cluster-autoscaler/estimator"
"k8s.io/autoscaler/cluster-autoscaler/expander"
"k8s.io/autoscaler/cluster-autoscaler/expander/factory"
Expand All @@ -48,6 +49,7 @@ type AutoscalerOptions struct {
EstimatorBuilder estimator.EstimatorBuilder
Processors *ca_processors.AutoscalingProcessors
Backoff backoff.Backoff
DebuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter
}

// Autoscaler is the main component of CA which scales up/down node groups according to its configuration
Expand Down Expand Up @@ -76,7 +78,8 @@ func NewAutoscaler(opts AutoscalerOptions) (Autoscaler, errors.AutoscalerError)
opts.CloudProvider,
opts.ExpanderStrategy,
opts.EstimatorBuilder,
opts.Backoff), nil
opts.Backoff,
opts.DebuggingSnapshotter), nil
}

// Initialize default options if not provided.
Expand Down
20 changes: 20 additions & 0 deletions cluster-autoscaler/core/filter_out_schedulable.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,12 @@ func (p *filterOutSchedulablePodListProcessor) Process(
if len(unschedulablePodsToHelp) != len(unschedulablePods) {
klog.V(2).Info("Schedulable pods present")
context.ProcessorCallbacks.DisableScaleDownForLoop()

if context.DebuggingSnapshotter.IsDataCollectionAllowed() {
schedulablePods := findSchedulablePods(unschedulablePods, unschedulablePodsToHelp)
context.DebuggingSnapshotter.SetUnscheduledPodsCanBeScheduled(schedulablePods)
}

} else {
klog.V(4).Info("No schedulable pods")
}
Expand Down Expand Up @@ -179,3 +185,17 @@ func moreImportantPod(pod1, pod2 *apiv1.Pod) bool {
p2 := corev1helpers.PodPriority(pod2)
return p1 > p2
}

func findSchedulablePods(allUnschedulablePods, podsStillUnschedulable []*apiv1.Pod) []*apiv1.Pod {
podsStillUnschedulableMap := make(map[*apiv1.Pod]struct{}, len(podsStillUnschedulable))
for _, x := range podsStillUnschedulable {
podsStillUnschedulableMap[x] = struct{}{}
}
var schedulablePods []*apiv1.Pod
for _, x := range allUnschedulablePods {
if _, found := podsStillUnschedulableMap[x]; !found {
schedulablePods = append(schedulablePods, x)
}
}
return schedulablePods
}
30 changes: 15 additions & 15 deletions cluster-autoscaler/core/scale_down_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ func TestFindUnneededNodes(t *testing.T) {
},
UnremovableNodeRecheckTimeout: 5 * time.Minute,
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil, nil)
assert.NoError(t, err)

clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down Expand Up @@ -252,7 +252,7 @@ func TestFindUnneededGPUNodes(t *testing.T) {
},
UnremovableNodeRecheckTimeout: 5 * time.Minute,
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil, nil)
assert.NoError(t, err)

clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down Expand Up @@ -359,7 +359,7 @@ func TestFindUnneededWithPerNodeGroupThresholds(t *testing.T) {
}
for tn, tc := range cases {
t.Run(tn, func(t *testing.T) {
context, err := NewScaleTestAutoscalingContext(globalOptions, &fake.Clientset{}, nil, provider, nil)
context, err := NewScaleTestAutoscalingContext(globalOptions, &fake.Clientset{}, nil, provider, nil, nil)
assert.NoError(t, err)
clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
sd := NewScaleDown(&context, NewTestProcessors(), clusterStateRegistry)
Expand Down Expand Up @@ -434,7 +434,7 @@ func TestPodsWithPreemptionsFindUnneededNodes(t *testing.T) {
ScaleDownUtilizationThreshold: 0.35,
},
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil, nil)
assert.NoError(t, err)

clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down Expand Up @@ -491,7 +491,7 @@ func TestFindUnneededMaxCandidates(t *testing.T) {
ScaleDownCandidatesPoolRatio: 1,
ScaleDownCandidatesPoolMinCount: 1000,
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil, nil)
assert.NoError(t, err)

clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down Expand Up @@ -566,7 +566,7 @@ func TestFindUnneededEmptyNodes(t *testing.T) {
ScaleDownCandidatesPoolRatio: 1.0,
ScaleDownCandidatesPoolMinCount: 1000,
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil, nil)
assert.NoError(t, err)

clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down Expand Up @@ -616,7 +616,7 @@ func TestFindUnneededNodePool(t *testing.T) {
ScaleDownCandidatesPoolRatio: 0.1,
ScaleDownCandidatesPoolMinCount: 10,
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, nil, provider, nil, nil)
assert.NoError(t, err)

clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down Expand Up @@ -760,7 +760,7 @@ func TestDeleteNode(t *testing.T) {

// build context
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
context, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, fakeClient, registry, provider, nil)
context, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, fakeClient, registry, provider, nil, nil)
assert.NoError(t, err)

clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down Expand Up @@ -1146,7 +1146,7 @@ func TestScaleDown(t *testing.T) {
assert.NoError(t, err)
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, jobLister, nil, nil)

context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
assert.NoError(t, err)
nodes := []*apiv1.Node{n1, n2}

Expand Down Expand Up @@ -1331,7 +1331,7 @@ func TestDaemonSetEvictionForEmptyNodes(t *testing.T) {
provider.AddNode("ng1", n1)
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)

context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
assert.NoError(t, err)

if scenario.nodeInfoSuccess {
Expand Down Expand Up @@ -1554,7 +1554,7 @@ func simpleScaleDownEmpty(t *testing.T, config *scaleTestConfig) {
assert.NotNil(t, provider)

registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
context, err := NewScaleTestAutoscalingContext(config.options, fakeClient, registry, provider, nil)
context, err := NewScaleTestAutoscalingContext(config.options, fakeClient, registry, provider, nil, nil)
assert.NoError(t, err)

clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down Expand Up @@ -1643,7 +1643,7 @@ func TestNoScaleDownUnready(t *testing.T) {
MaxGracefulTerminationSec: 60,
}
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, nil, nil, nil)
context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
assert.NoError(t, err)

nodes := []*apiv1.Node{n1, n2}
Expand Down Expand Up @@ -1756,7 +1756,7 @@ func TestScaleDownNoMove(t *testing.T) {
assert.NoError(t, err)
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, jobLister, nil, nil)

context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
assert.NoError(t, err)

nodes := []*apiv1.Node{n1, n2}
Expand Down Expand Up @@ -2006,7 +2006,7 @@ func TestSoftTaint(t *testing.T) {
assert.NoError(t, err)
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, jobLister, nil, nil)

context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
assert.NoError(t, err)

clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down Expand Up @@ -2127,7 +2127,7 @@ func TestSoftTaintTimeLimit(t *testing.T) {
assert.NoError(t, err)
registry := kube_util.NewListerRegistry(nil, nil, nil, nil, nil, nil, nil, jobLister, nil, nil)

context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, fakeClient, registry, provider, nil, nil)
assert.NoError(t, err)

clusterStateRegistry := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down
20 changes: 13 additions & 7 deletions cluster-autoscaler/core/scale_test_common.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ import (
"reflect"
"testing"

"k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot"

"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
testcloudprovider "k8s.io/autoscaler/cluster-autoscaler/cloudprovider/test"
Expand Down Expand Up @@ -155,7 +157,7 @@ func NewTestProcessors() *processors.AutoscalingProcessors {
func NewScaleTestAutoscalingContext(
options config.AutoscalingOptions, fakeClient kube_client.Interface,
listers kube_util.ListerRegistry, provider cloudprovider.CloudProvider,
processorCallbacks processor_callbacks.ProcessorCallbacks) (context.AutoscalingContext, error) {
processorCallbacks processor_callbacks.ProcessorCallbacks, debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter) (context.AutoscalingContext, error) {
// Not enough buffer space causes the test to hang without printing any logs.
// This is not useful.
fakeRecorder := kube_record.NewFakeRecorder(100)
Expand All @@ -170,6 +172,9 @@ func NewScaleTestAutoscalingContext(
if err != nil {
return context.AutoscalingContext{}, err
}
if debuggingSnapshotter == nil {
debuggingSnapshotter = debuggingsnapshot.NewDebuggingSnapshotter(false)
}
clusterSnapshot := simulator.NewBasicClusterSnapshot()
return context.AutoscalingContext{
AutoscalingOptions: options,
Expand All @@ -179,12 +184,13 @@ func NewScaleTestAutoscalingContext(
LogRecorder: fakeLogRecorder,
ListerRegistry: listers,
},
CloudProvider: provider,
PredicateChecker: predicateChecker,
ClusterSnapshot: clusterSnapshot,
ExpanderStrategy: random.NewStrategy(),
EstimatorBuilder: estimatorBuilder,
ProcessorCallbacks: processorCallbacks,
CloudProvider: provider,
PredicateChecker: predicateChecker,
ClusterSnapshot: clusterSnapshot,
ExpanderStrategy: random.NewStrategy(),
EstimatorBuilder: estimatorBuilder,
ProcessorCallbacks: processorCallbacks,
DebuggingSnapshotter: debuggingSnapshotter,
}, nil
}

Expand Down
14 changes: 7 additions & 7 deletions cluster-autoscaler/core/scale_up_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -518,7 +518,7 @@ func runSimpleScaleUpTest(t *testing.T, config *scaleTestConfig) *scaleTestResul
assert.NotNil(t, provider)

// Create context with non-random expander strategy.
context, err := NewScaleTestAutoscalingContext(config.options, &fake.Clientset{}, listers, provider, nil)
context, err := NewScaleTestAutoscalingContext(config.options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)

expander := reportingStrategy{
Expand Down Expand Up @@ -684,7 +684,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
MaxCoresTotal: config.DefaultMaxClusterCores,
MaxMemoryTotal: config.DefaultMaxClusterMemory,
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)

nodes := []*apiv1.Node{n1, n2}
Expand Down Expand Up @@ -724,7 +724,7 @@ func TestScaleUpNoHelp(t *testing.T) {
MaxCoresTotal: config.DefaultMaxClusterCores,
MaxMemoryTotal: config.DefaultMaxClusterMemory,
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)

nodes := []*apiv1.Node{n1}
Expand Down Expand Up @@ -790,7 +790,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
MaxCoresTotal: config.DefaultMaxClusterCores,
MaxMemoryTotal: config.DefaultMaxClusterMemory,
}
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)

nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider().Process(&context, nodes, []*appsv1.DaemonSet{}, nil)
Expand Down Expand Up @@ -851,7 +851,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
}
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil, nil)
context, err := NewScaleTestAutoscalingContext(options, fakeClient, listers, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, fakeClient, listers, provider, nil, nil)
assert.NoError(t, err)

clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down Expand Up @@ -904,7 +904,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
}
podLister := kube_util.NewTestPodLister([]*apiv1.Pod{})
listers := kube_util.NewListerRegistry(nil, nil, podLister, nil, nil, nil, nil, nil, nil, nil)
context, err := NewScaleTestAutoscalingContext(options, fakeClient, listers, provider, nil)
context, err := NewScaleTestAutoscalingContext(options, fakeClient, listers, provider, nil, nil)
assert.NoError(t, err)

clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, newBackoff())
Expand Down Expand Up @@ -980,7 +980,7 @@ func TestCheckScaleUpDeltaWithinLimits(t *testing.T) {

func TestAuthError(t *testing.T) {
metrics.RegisterAll(false)
context, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, &fake.Clientset{}, nil, nil, nil)
context, err := NewScaleTestAutoscalingContext(config.AutoscalingOptions{}, &fake.Clientset{}, nil, nil, nil, nil)
assert.NoError(t, err)

nodeGroup := &mockprovider.NodeGroup{}
Expand Down
17 changes: 15 additions & 2 deletions cluster-autoscaler/core/static_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@ import (

apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/autoscaler/cluster-autoscaler/debuggingsnapshot"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"

"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
Expand Down Expand Up @@ -119,7 +120,8 @@ func NewStaticAutoscaler(
cloudProvider cloudprovider.CloudProvider,
expanderStrategy expander.Strategy,
estimatorBuilder estimator.EstimatorBuilder,
backoff backoff.Backoff) *StaticAutoscaler {
backoff backoff.Backoff,
debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter) *StaticAutoscaler {

processorCallbacks := newStaticAutoscalerProcessorCallbacks()
autoscalingContext := context.NewAutoscalingContext(
Expand All @@ -130,7 +132,8 @@ func NewStaticAutoscaler(
cloudProvider,
expanderStrategy,
estimatorBuilder,
processorCallbacks)
processorCallbacks,
debuggingSnapshotter)

clusterStateConfig := clusterstate.ClusterStateRegistryConfig{
MaxTotalUnreadyPercentage: opts.MaxTotalUnreadyPercentage,
Expand Down Expand Up @@ -220,6 +223,8 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
a.cleanUpIfRequired()
a.processorCallbacks.reset()
a.clusterStateRegistry.PeriodicCleanup()
a.DebuggingSnapshotter.StartDataCollection()
defer a.DebuggingSnapshotter.Flush()

unschedulablePodLister := a.UnschedulablePodLister()
scheduledPodLister := a.ScheduledPodLister()
Expand Down Expand Up @@ -409,6 +414,13 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) errors.AutoscalerError
}
}

l, err := a.ClusterSnapshot.NodeInfos().List()
if err != nil {
klog.Errorf("Unable to fetch NodeInfo List for Debugging Snapshot, %v", err)
} else {
a.AutoscalingContext.DebuggingSnapshotter.SetNodeGroupInfo(l)
}

unschedulablePodsToHelp, _ := a.processors.PodListProcessor.Process(a.AutoscalingContext, unschedulablePods)

// finally, filter out pods that are too "young" to safely be considered for a scale-up (delay is configurable)
Expand Down Expand Up @@ -715,6 +727,7 @@ func (a *StaticAutoscaler) filterOutYoungPods(allUnschedulablePods []*apiv1.Pod,
// ExitCleanUp performs all necessary clean-ups when the autoscaler's exiting.
func (a *StaticAutoscaler) ExitCleanUp() {
a.processors.CleanUp()
a.DebuggingSnapshotter.Cleanup()

if !a.AutoscalingContext.WriteStatusConfigMap {
return
Expand Down
Loading

0 comments on commit a8a6013

Please sign in to comment.