Skip to content

Commit

Permalink
Merge pull request kubernetes#5330 from olagacek/master
Browse files Browse the repository at this point in the history
Use ScaleDownSetProcessor.GetNodesToRemove in scale down planner to filter  NodesToDelete.
  • Loading branch information
k8s-ci-robot authored Nov 29, 2022
2 parents 41e1ea6 + a20685b commit aa7733c
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 30 deletions.
6 changes: 3 additions & 3 deletions cluster-autoscaler/core/scaledown/legacy/legacy.go
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ func (sd *ScaleDown) mapNodesToStatusScaleDownNodes(nodes []*apiv1.Node, nodeGro
}

// NodesToDelete selects the nodes to delete for scale down.
func (sd *ScaleDown) NodesToDelete(currentTime time.Time, pdbs []*policyv1.PodDisruptionBudget) (empty, drain []*apiv1.Node, res status.ScaleDownResult, err errors.AutoscalerError) {
func (sd *ScaleDown) NodesToDelete(currentTime time.Time, pdbs []*policyv1.PodDisruptionBudget) (_, drain []*apiv1.Node, res status.ScaleDownResult, err errors.AutoscalerError) {
_, drained := sd.nodeDeletionTracker.DeletionsInProgress()
if len(drained) > 0 {
return nil, nil, status.ScaleDownInProgress, nil
Expand Down Expand Up @@ -288,10 +288,10 @@ func (sd *ScaleDown) NodesToDelete(currentTime time.Time, pdbs []*policyv1.PodDi
}
candidateNames := make([]string, 0, len(empty)+len(nonEmpty))
for _, n := range empty {
candidateNames = append(candidateNames, n.Name)
candidateNames = append(candidateNames, n.Node.Name)
}
for _, n := range nonEmpty {
candidateNames = append(candidateNames, n.Name)
candidateNames = append(candidateNames, n.Node.Name)
}

if len(candidateNames) == 0 {
Expand Down
60 changes: 38 additions & 22 deletions cluster-autoscaler/core/scaledown/planner/planner.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/unneeded"
"k8s.io/autoscaler/cluster-autoscaler/core/scaledown/unremovable"
"k8s.io/autoscaler/cluster-autoscaler/processors"
"k8s.io/autoscaler/cluster-autoscaler/processors/nodes"
"k8s.io/autoscaler/cluster-autoscaler/simulator"
"k8s.io/autoscaler/cluster-autoscaler/simulator/clustersnapshot"
"k8s.io/autoscaler/cluster-autoscaler/simulator/scheduling"
Expand Down Expand Up @@ -61,32 +62,34 @@ type replicasInfo struct {

// Planner is responsible for deciding which nodes should be deleted during scale down.
type Planner struct {
context *context.AutoscalingContext
unremovableNodes *unremovable.Nodes
unneededNodes *unneeded.Nodes
rs removalSimulator
actuationInjector *scheduling.HintingSimulator
latestUpdate time.Time
eligibilityChecker eligibilityChecker
nodeUtilizationMap map[string]utilization.Info
actuationStatus scaledown.ActuationStatus
resourceLimitsFinder *resource.LimitsFinder
cc controllerReplicasCalculator
context *context.AutoscalingContext
unremovableNodes *unremovable.Nodes
unneededNodes *unneeded.Nodes
rs removalSimulator
actuationInjector *scheduling.HintingSimulator
latestUpdate time.Time
eligibilityChecker eligibilityChecker
nodeUtilizationMap map[string]utilization.Info
actuationStatus scaledown.ActuationStatus
resourceLimitsFinder *resource.LimitsFinder
cc controllerReplicasCalculator
scaleDownSetProcessor nodes.ScaleDownSetProcessor
}

// New creates a new Planner object.
func New(context *context.AutoscalingContext, processors *processors.AutoscalingProcessors, deleteOptions simulator.NodeDeleteOptions) *Planner {
resourceLimitsFinder := resource.NewLimitsFinder(processors.CustomResourcesProcessor)
return &Planner{
context: context,
unremovableNodes: unremovable.NewNodes(),
unneededNodes: unneeded.NewNodes(processors.NodeGroupConfigProcessor, resourceLimitsFinder),
rs: simulator.NewRemovalSimulator(context.ListerRegistry, context.ClusterSnapshot, context.PredicateChecker, simulator.NewUsageTracker(), deleteOptions, true),
actuationInjector: scheduling.NewHintingSimulator(context.PredicateChecker),
eligibilityChecker: eligibility.NewChecker(processors.NodeGroupConfigProcessor),
nodeUtilizationMap: make(map[string]utilization.Info),
resourceLimitsFinder: resourceLimitsFinder,
cc: newControllerReplicasCalculator(context.ListerRegistry),
context: context,
unremovableNodes: unremovable.NewNodes(),
unneededNodes: unneeded.NewNodes(processors.NodeGroupConfigProcessor, resourceLimitsFinder),
rs: simulator.NewRemovalSimulator(context.ListerRegistry, context.ClusterSnapshot, context.PredicateChecker, simulator.NewUsageTracker(), deleteOptions, true),
actuationInjector: scheduling.NewHintingSimulator(context.PredicateChecker),
eligibilityChecker: eligibility.NewChecker(processors.NodeGroupConfigProcessor),
nodeUtilizationMap: make(map[string]utilization.Info),
resourceLimitsFinder: resourceLimitsFinder,
cc: newControllerReplicasCalculator(context.ListerRegistry),
scaleDownSetProcessor: processors.ScaleDownSetProcessor,
}
}

Expand Down Expand Up @@ -133,11 +136,24 @@ func (p *Planner) NodesToDelete() (empty, needDrain []*apiv1.Node) {
return nil, nil
}
limitsLeft := p.resourceLimitsFinder.LimitsLeft(p.context, nodes, resourceLimiter, p.latestUpdate)
empty, needDrain, unremovable := p.unneededNodes.RemovableAt(p.context, p.latestUpdate, limitsLeft, resourceLimiter.GetResources(), p.actuationStatus)
emptyRemovable, needDrainRemovable, unremovable := p.unneededNodes.RemovableAt(p.context, p.latestUpdate, limitsLeft, resourceLimiter.GetResources(), p.actuationStatus)
for _, u := range unremovable {
p.unremovableNodes.Add(u)
}
// TODO: filter results with ScaleDownSetProcessor.GetNodesToRemove
nodesToRemove := p.scaleDownSetProcessor.GetNodesToRemove(
p.context,
// We need to pass empty nodes first, as there might be some non-empty scale
// downs already in progress. If we pass the empty nodes first, they will be first
// to get deleted, thus we decrease chances of hitting the limit on non-empty scale down.
append(emptyRemovable, needDrainRemovable...),
p.context.AutoscalingOptions.MaxScaleDownParallelism)
for _, nodeToRemove := range nodesToRemove {
if len(nodeToRemove.PodsToReschedule) > 0 {
needDrain = append(needDrain, nodeToRemove.Node)
} else {
empty = append(empty, nodeToRemove.Node)
}
}
return empty, needDrain
}

Expand Down
9 changes: 4 additions & 5 deletions cluster-autoscaler/core/scaledown/unneeded/nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,21 +117,20 @@ func (n *Nodes) Drop(node string) {
// RemovableAt returns all nodes that can be removed at a given time, divided
// into empty and non-empty node lists, as well as a list of nodes that were
// unneeded, but are not removable, annotated by reason.
func (n *Nodes) RemovableAt(context *context.AutoscalingContext, ts time.Time, resourcesLeft resource.Limits, resourcesWithLimits []string, as scaledown.ActuationStatus) (empty, needDrain []*apiv1.Node, unremovable []*simulator.UnremovableNode) {
func (n *Nodes) RemovableAt(context *context.AutoscalingContext, ts time.Time, resourcesLeft resource.Limits, resourcesWithLimits []string, as scaledown.ActuationStatus) (empty, needDrain []simulator.NodeToBeRemoved, unremovable []*simulator.UnremovableNode) {
nodeGroupSize := utils.GetNodeGroupSizeMap(context.CloudProvider)
for nodeName, v := range n.byName {
klog.V(2).Infof("%s was unneeded for %s", nodeName, ts.Sub(v.since).String())
node := v.ntbr.Node

if r := n.unremovableReason(context, v, ts, nodeGroupSize, resourcesLeft, resourcesWithLimits, as); r != simulator.NoReason {
unremovable = append(unremovable, &simulator.UnremovableNode{Node: node, Reason: r})
unremovable = append(unremovable, &simulator.UnremovableNode{Node: v.ntbr.Node, Reason: r})
continue
}

if len(v.ntbr.PodsToReschedule) > 0 {
needDrain = append(needDrain, node)
needDrain = append(needDrain, v.ntbr)
} else {
empty = append(empty, node)
empty = append(empty, v.ntbr)
}
}
return
Expand Down

0 comments on commit aa7733c

Please sign in to comment.