Skip to content

Commit

Permalink
Add force Daemon Sets option
Browse files Browse the repository at this point in the history
  • Loading branch information
BigDarkClown committed Jan 30, 2023
1 parent 4d94120 commit b608278
Show file tree
Hide file tree
Showing 11 changed files with 247 additions and 190 deletions.
14 changes: 7 additions & 7 deletions cluster-autoscaler/core/scale_up_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -533,7 +533,7 @@ func runSimpleScaleUpTest(t *testing.T, config *ScaleTestConfig) *ScaleTestResul
}
context.ExpanderStrategy = expander

nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, now)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, now)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff())
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())

Expand Down Expand Up @@ -694,7 +694,7 @@ func TestScaleUpUnhealthy(t *testing.T) {
assert.NoError(t, err)

nodes := []*apiv1.Node{n1, n2}
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, now)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, now)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff())
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
p3 := BuildTestPod("p-new", 550, 0)
Expand Down Expand Up @@ -736,7 +736,7 @@ func TestScaleUpNoHelp(t *testing.T) {
assert.NoError(t, err)

nodes := []*apiv1.Node{n1}
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, now)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, now)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff())
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
p3 := BuildTestPod("p-new", 500, 0)
Expand Down Expand Up @@ -804,7 +804,7 @@ func TestScaleUpBalanceGroups(t *testing.T) {
context, err := NewScaleTestAutoscalingContext(options, &fake.Clientset{}, listers, provider, nil, nil)
assert.NoError(t, err)

nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, now)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, now)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff())
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())

Expand Down Expand Up @@ -873,7 +873,7 @@ func TestScaleUpAutoprovisionedNodeGroup(t *testing.T) {
processors.NodeGroupManager = &MockAutoprovisioningNodeGroupManager{t, 0}

nodes := []*apiv1.Node{}
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, time.Now())
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, time.Now())

resourceManager := scaleup.NewResourceManager(processors.CustomResourcesProcessor)
scaleUpStatus, err := ScaleUp(&context, processors, clusterState, resourceManager, []*apiv1.Pod{p1}, nodes, []*appsv1.DaemonSet{}, nodeInfos, nil)
Expand Down Expand Up @@ -927,7 +927,7 @@ func TestScaleUpBalanceAutoprovisionedNodeGroups(t *testing.T) {
processors.NodeGroupManager = &MockAutoprovisioningNodeGroupManager{t, 2}

nodes := []*apiv1.Node{}
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, time.Now())
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, time.Now())

resourceManager := scaleup.NewResourceManager(processors.CustomResourcesProcessor)
scaleUpStatus, err := ScaleUp(&context, processors, clusterState, resourceManager, []*apiv1.Pod{p1, p2, p3}, nodes, []*appsv1.DaemonSet{}, nodeInfos, nil)
Expand Down Expand Up @@ -978,7 +978,7 @@ func TestScaleUpToMeetNodeGroupMinSize(t *testing.T) {
assert.NoError(t, err)

nodes := []*apiv1.Node{n1, n2}
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, time.Now())
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, time.Now())
processors := NewTestProcessors(&context)
clusterState := clusterstate.NewClusterStateRegistry(provider, clusterstate.ClusterStateRegistryConfig{}, context.LogRecorder, NewBackoff())
clusterState.UpdateNodes(nodes, nodeInfos, time.Now())
Expand Down
8 changes: 4 additions & 4 deletions cluster-autoscaler/core/scaleup/resource_manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ func TestDeltaForNode(t *testing.T) {

ng := testCase.nodeGroupConfig
group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil).Process(&ctx, nodes, []*appsv1.DaemonSet{}, nil, time.Now())
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, nil, time.Now())

rm := NewResourceManager(processors.CustomResourcesProcessor)
delta, err := rm.DeltaForNode(&ctx, nodeInfos[ng.Name], group)
Expand Down Expand Up @@ -109,7 +109,7 @@ func TestResourcesLeft(t *testing.T) {

ng := testCase.nodeGroupConfig
_, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil).Process(&ctx, nodes, []*appsv1.DaemonSet{}, nil, time.Now())
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, nil, time.Now())

rm := NewResourceManager(processors.CustomResourcesProcessor)
left, err := rm.ResourcesLeft(&ctx, nodeInfos, nodes)
Expand Down Expand Up @@ -160,7 +160,7 @@ func TestApplyResourcesLimits(t *testing.T) {

ng := testCase.nodeGroupConfig
group, nodes := newNodeGroup(t, cp, ng.Name, ng.Min, ng.Max, ng.Size, ng.CPU, ng.Mem)
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil).Process(&ctx, nodes, []*appsv1.DaemonSet{}, nil, time.Now())
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&ctx, nodes, []*appsv1.DaemonSet{}, nil, time.Now())

rm := NewResourceManager(processors.CustomResourcesProcessor)
newCount, err := rm.ApplyResourcesLimits(&ctx, testCase.newNodeCount, testCase.resourcesLeft, nodeInfos[testCase.nodeGroupConfig.Name], group)
Expand Down Expand Up @@ -225,7 +225,7 @@ func TestResourceManagerWithGpuResource(t *testing.T) {
assert.NoError(t, err)

nodes := []*corev1.Node{n1}
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, time.Now())
nodeInfos, _ := nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false).Process(&context, nodes, []*appsv1.DaemonSet{}, nil, time.Now())

rm := NewResourceManager(processors.CustomResourcesProcessor)

Expand Down
2 changes: 1 addition & 1 deletion cluster-autoscaler/core/test/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ func NewTestProcessors(context *context.AutoscalingContext) *processors.Autoscal
AutoscalingStatusProcessor: &status.NoOpAutoscalingStatusProcessor{},
NodeGroupManager: nodegroups.NewDefaultNodeGroupManager(),
NodeInfoProcessor: nodeinfos.NewDefaultNodeInfoProcessor(),
TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil),
TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false),
NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(),
CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(),
ActionableClusterProcessor: actionablecluster.NewDefaultActionableClusterProcessor(),
Expand Down
5 changes: 3 additions & 2 deletions cluster-autoscaler/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,7 @@ var (
maxCapacityMemoryDifferenceRatio = flag.Float64("memory-difference-ratio", config.DefaultMaxCapacityMemoryDifferenceRatio, "Maximum difference in memory capacity between two similar node groups to be considered for balancing. Value is a ratio of the smaller node group's memory capacity.")
maxFreeDifferenceRatio = flag.Float64("max-free-difference-ratio", config.DefaultMaxFreeDifferenceRatio, "Maximum difference in free resources between two similar node groups to be considered for balancing. Value is a ratio of the smaller node group's free resource.")
maxAllocatableDifferenceRatio = flag.Float64("max-allocatable-difference-ratio", config.DefaultMaxAllocatableDifferenceRatio, "Maximum difference in allocatable resources between two similar node groups to be considered for balancing. Value is a ratio of the smaller node group's allocatable resource.")
forceDaemonSets = flag.Bool("force-ds", false, "Blocks scale-up of node groups too small for all suitable Daemon Sets pods.")
)

func createAutoscalingOptions() config.AutoscalingOptions {
Expand Down Expand Up @@ -399,7 +400,7 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter
}

opts.Processors = ca_processors.DefaultProcessors()
opts.Processors.TemplateNodeInfoProvider = nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nodeInfoCacheExpireTime)
opts.Processors.TemplateNodeInfoProvider = nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nodeInfoCacheExpireTime, *forceDaemonSets)
opts.Processors.PodListProcessor = podlistprocessor.NewDefaultPodListProcessor(opts.PredicateChecker)
if autoscalingOptions.ParallelDrain {
sdProcessor := nodes.NewScaleDownCandidatesSortingProcessor()
Expand All @@ -418,7 +419,7 @@ func buildAutoscaler(debuggingSnapshotter debuggingsnapshot.DebuggingSnapshotter
nodeInfoComparatorBuilder = nodegroupset.CreateAwsNodeInfoComparator
} else if autoscalingOptions.CloudProviderName == cloudprovider.GceProviderName {
nodeInfoComparatorBuilder = nodegroupset.CreateGceNodeInfoComparator
opts.Processors.TemplateNodeInfoProvider = nodeinfosprovider.NewAnnotationNodeInfoProvider(nodeInfoCacheExpireTime)
opts.Processors.TemplateNodeInfoProvider = nodeinfosprovider.NewAnnotationNodeInfoProvider(nodeInfoCacheExpireTime, *forceDaemonSets)
}
nodeInfoComparator = nodeInfoComparatorBuilder(autoscalingOptions.BalancingExtraIgnoredLabels, autoscalingOptions.NodeGroupSetRatios)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@ type AnnotationNodeInfoProvider struct {
}

// NewAnnotationNodeInfoProvider returns AnnotationNodeInfoProvider.
func NewAnnotationNodeInfoProvider(t *time.Duration) *AnnotationNodeInfoProvider {
func NewAnnotationNodeInfoProvider(t *time.Duration, forceDaemonSets bool) *AnnotationNodeInfoProvider {
return &AnnotationNodeInfoProvider{
mixedTemplateNodeInfoProvider: NewMixedTemplateNodeInfoProvider(t),
mixedTemplateNodeInfoProvider: NewMixedTemplateNodeInfoProvider(t, forceDaemonSets),
}
}

Expand All @@ -45,7 +45,7 @@ func (p *AnnotationNodeInfoProvider) Process(ctx *context.AutoscalingContext, no
if err != nil {
return nil, err
}
// Add annotatios to the NodeInfo to use later in expander.
// Add annotations to the NodeInfo to use later in expander.
nodeGroups := ctx.CloudProvider.NodeGroups()
for _, ng := range nodeGroups {
if nodeInfo, ok := nodeInfos[ng.Id()]; ok {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,20 +44,22 @@ type cacheItem struct {

// MixedTemplateNodeInfoProvider build nodeInfos from the cluster's nodes and node groups.
type MixedTemplateNodeInfoProvider struct {
nodeInfoCache map[string]cacheItem
ttl time.Duration
nodeInfoCache map[string]cacheItem
ttl time.Duration
forceDaemonSets bool
}

// NewMixedTemplateNodeInfoProvider returns a NodeInfoProvider processor building
// NodeInfos from real-world nodes when available, otherwise from node groups templates.
func NewMixedTemplateNodeInfoProvider(t *time.Duration) *MixedTemplateNodeInfoProvider {
func NewMixedTemplateNodeInfoProvider(t *time.Duration, forceDaemonSets bool) *MixedTemplateNodeInfoProvider {
ttl := maxCacheExpireTime
if t != nil {
ttl = *t
}
return &MixedTemplateNodeInfoProvider{
nodeInfoCache: make(map[string]cacheItem),
ttl: ttl,
nodeInfoCache: make(map[string]cacheItem),
ttl: ttl,
forceDaemonSets: forceDaemonSets,
}
}

Expand Down Expand Up @@ -93,10 +95,7 @@ func (p *MixedTemplateNodeInfoProvider) Process(ctx *context.AutoscalingContext,
id := nodeGroup.Id()
if _, found := result[id]; !found {
// Build nodeInfo.
nodeInfo, err := simulator.BuildNodeInfoForNode(node, podsForNodes)
if err != nil {
return false, "", err
}
nodeInfo, err := simulator.BuildNodeInfoForNode(node, podsForNodes[node.Name], daemonsets, p.forceDaemonSets)
sanitizedNodeInfo, err := utils.SanitizeNodeInfo(nodeInfo, id, ignoredTaints)
if err != nil {
return false, "", err
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
ListerRegistry: registry,
},
}
res, err := NewMixedTemplateNodeInfoProvider(&cacheTtl).Process(&ctx, []*apiv1.Node{justReady5, unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, nil, now)
res, err := NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, []*apiv1.Node{justReady5, unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, nil, now)
assert.NoError(t, err)
assert.Equal(t, 5, len(res))
info, found := res["ng1"]
Expand All @@ -112,7 +112,7 @@ func TestGetNodeInfosForGroups(t *testing.T) {
ListerRegistry: registry,
},
}
res, err = NewMixedTemplateNodeInfoProvider(&cacheTtl).Process(&ctx, []*apiv1.Node{}, []*appsv1.DaemonSet{}, nil, now)
res, err = NewMixedTemplateNodeInfoProvider(&cacheTtl, false).Process(&ctx, []*apiv1.Node{}, []*appsv1.DaemonSet{}, nil, now)
assert.NoError(t, err)
assert.Equal(t, 0, len(res))
}
Expand Down Expand Up @@ -171,7 +171,7 @@ func TestGetNodeInfosForGroupsCache(t *testing.T) {
ListerRegistry: registry,
},
}
niProcessor := NewMixedTemplateNodeInfoProvider(&cacheTtl)
niProcessor := NewMixedTemplateNodeInfoProvider(&cacheTtl, false)
res, err := niProcessor.Process(&ctx, []*apiv1.Node{unready4, unready3, ready2, ready1}, []*appsv1.DaemonSet{}, nil, now)
assert.NoError(t, err)
// Check results
Expand Down Expand Up @@ -263,7 +263,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) {
tni := schedulerframework.NewNodeInfo()
tni.SetNode(tn)
// Cache expire time is set.
niProcessor1 := NewMixedTemplateNodeInfoProvider(&cacheTtl)
niProcessor1 := NewMixedTemplateNodeInfoProvider(&cacheTtl, false)
niProcessor1.nodeInfoCache = map[string]cacheItem{
"ng1": {NodeInfo: tni, added: now.Add(-2 * time.Second)},
"ng2": {NodeInfo: tni, added: now.Add(-2 * time.Second)},
Expand All @@ -277,7 +277,7 @@ func TestGetNodeInfosCacheExpired(t *testing.T) {
assert.Equal(t, 1, len(niProcessor1.nodeInfoCache))

// Cache expire time isn't set.
niProcessor2 := NewMixedTemplateNodeInfoProvider(nil)
niProcessor2 := NewMixedTemplateNodeInfoProvider(nil, false)
niProcessor2.nodeInfoCache = map[string]cacheItem{
"ng1": {NodeInfo: tni, added: now.Add(-2 * time.Second)},
"ng2": {NodeInfo: tni, added: now.Add(-2 * time.Second)},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,6 @@ type TemplateNodeInfoProvider interface {
}

// NewDefaultTemplateNodeInfoProvider returns a default TemplateNodeInfoProvider.
func NewDefaultTemplateNodeInfoProvider(time *time.Duration) TemplateNodeInfoProvider {
return NewMixedTemplateNodeInfoProvider(time)
func NewDefaultTemplateNodeInfoProvider(time *time.Duration, forceDaemonSets bool) TemplateNodeInfoProvider {
return NewMixedTemplateNodeInfoProvider(time, forceDaemonSets)
}
2 changes: 1 addition & 1 deletion cluster-autoscaler/processors/processors.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func DefaultProcessors() *AutoscalingProcessors {
NodeGroupConfigProcessor: nodegroupconfig.NewDefaultNodeGroupConfigProcessor(),
CustomResourcesProcessor: customresources.NewDefaultCustomResourcesProcessor(),
ActionableClusterProcessor: actionablecluster.NewDefaultActionableClusterProcessor(),
TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil),
TemplateNodeInfoProvider: nodeinfosprovider.NewDefaultTemplateNodeInfoProvider(nil, false),
ScaleDownCandidatesNotifier: scaledowncandidates.NewObserversList(),
}
}
Expand Down
64 changes: 37 additions & 27 deletions cluster-autoscaler/simulator/nodes.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,46 +17,56 @@ limitations under the License.
package simulator

import (
appsv1 "k8s.io/api/apps/v1"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/autoscaler/cluster-autoscaler/utils/daemonset"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"

"k8s.io/autoscaler/cluster-autoscaler/utils/errors"
pod_util "k8s.io/autoscaler/cluster-autoscaler/utils/pod"
)

// getRequiredPodsForNode returns a list of pods that would appear on the node if the
// node was just created (like daemonset and manifest-run pods). It reuses kubectl
// drain command to get the list.
func getRequiredPodsForNode(nodename string, podsForNodes map[string][]*apiv1.Pod) ([]*apiv1.Pod, errors.AutoscalerError) {
allPods := podsForNodes[nodename]

return filterRequiredPodsForNode(allPods), nil
}

// BuildNodeInfoForNode build a NodeInfo structure for the given node as if the node was just created.
func BuildNodeInfoForNode(node *apiv1.Node, podsForNodes map[string][]*apiv1.Pod) (*schedulerframework.NodeInfo, errors.AutoscalerError) {
requiredPods, err := getRequiredPodsForNode(node.Name, podsForNodes)
if err != nil {
return nil, err
}
result := schedulerframework.NewNodeInfo(requiredPods...)
result.SetNode(node)
return result, nil
func BuildNodeInfoForNode(node *apiv1.Node, scheduledPods []*apiv1.Pod, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool) (*schedulerframework.NodeInfo, errors.AutoscalerError) {
nodeInfo := schedulerframework.NewNodeInfo()
nodeInfo.SetNode(node)
return addExpectedPods(nodeInfo, scheduledPods, daemonsets, forceDaemonSets)
}

func filterRequiredPodsForNode(allPods []*apiv1.Pod) []*apiv1.Pod {
var selectedPods []*apiv1.Pod

for id, pod := range allPods {
// Ignore pod in deletion phase
func addExpectedPods(nodeInfo *schedulerframework.NodeInfo, scheduledPods []*apiv1.Pod, daemonsets []*appsv1.DaemonSet, forceDaemonSets bool) (*schedulerframework.NodeInfo, errors.AutoscalerError) {
runningDS := make(map[types.UID]bool)
for _, pod := range scheduledPods {
// Ignore scheduled pods in deletion phase
if pod.DeletionTimestamp != nil {
continue
}

// Add scheduled mirror and DS pods
if pod_util.IsMirrorPod(pod) || pod_util.IsDaemonSetPod(pod) {
selectedPods = append(selectedPods, allPods[id])
nodeInfo.AddPod(pod)
}
// Mark DS pods as running
controllerRef := metav1.GetControllerOf(pod)
if controllerRef != nil && controllerRef.Kind == "DaemonSet" {
runningDS[controllerRef.UID] = true
}
}

return selectedPods
// Add all pending DS pods if force scheduling DS
if forceDaemonSets {
var pendingDS []*appsv1.DaemonSet
for _, ds := range daemonsets {
if !runningDS[ds.UID] {
pendingDS = append(pendingDS, ds)
}
}
daemonPods, err := daemonset.GetDaemonSetPodsForNode(nodeInfo, pendingDS)
if err != nil {
return nil, errors.ToAutoscalerError(errors.InternalError, err)
}
for _, pod := range daemonPods {
nodeInfo.AddPod(pod)
}
}
return nodeInfo, nil
}
Loading

0 comments on commit b608278

Please sign in to comment.