Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Extract separate package for core/utils.go and split into multiple files. #2407

Merged
merged 2 commits into from
Oct 22, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 6 additions & 5 deletions cluster-autoscaler/core/filter_out_schedulable.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"time"

"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
"k8s.io/autoscaler/cluster-autoscaler/estimator"
"k8s.io/autoscaler/cluster-autoscaler/metrics"
"k8s.io/autoscaler/cluster-autoscaler/processors/pods"
Expand Down Expand Up @@ -103,7 +104,7 @@ func filterOutSchedulableByPacking(unschedulableCandidates []*apiv1.Pod, nodes [
allScheduled []*apiv1.Pod, predicateChecker *simulator.PredicateChecker,
expendablePodsPriorityCutoff int, nodesExist bool) []*apiv1.Pod {
var unschedulablePods []*apiv1.Pod
nonExpendableScheduled := filterOutExpendablePods(allScheduled, expendablePodsPriorityCutoff)
nonExpendableScheduled := utils.FilterOutExpendablePods(allScheduled, expendablePodsPriorityCutoff)
nodeNameToNodeInfo := schedulerutil.CreateNodeNameToInfoMap(nonExpendableScheduled, nodes)

sort.Slice(unschedulableCandidates, func(i, j int) bool {
Expand Down Expand Up @@ -138,13 +139,13 @@ func filterOutSchedulableByPacking(unschedulableCandidates []*apiv1.Pod, nodes [
func filterOutSchedulableSimple(unschedulableCandidates []*apiv1.Pod, nodes []*apiv1.Node, allScheduled []*apiv1.Pod,
predicateChecker *simulator.PredicateChecker, expendablePodsPriorityCutoff int) []*apiv1.Pod {
var unschedulablePods []*apiv1.Pod
nonExpendableScheduled := filterOutExpendablePods(allScheduled, expendablePodsPriorityCutoff)
nonExpendableScheduled := utils.FilterOutExpendablePods(allScheduled, expendablePodsPriorityCutoff)
nodeNameToNodeInfo := schedulerutil.CreateNodeNameToInfoMap(nonExpendableScheduled, nodes)
podSchedulable := make(podSchedulableMap)
podSchedulable := make(utils.PodSchedulableMap)
loggingQuota := glogx.PodsLoggingQuota()

for _, pod := range unschedulableCandidates {
cachedError, found := podSchedulable.get(pod)
cachedError, found := podSchedulable.Get(pod)
// Try to get result from cache.
if found {
if cachedError != nil {
Expand All @@ -166,7 +167,7 @@ func filterOutSchedulableSimple(unschedulableCandidates []*apiv1.Pod, nodes []*a
} else {
glogx.V(4).UpTo(loggingQuota).Infof("Pod %s marked as unschedulable can be scheduled on %s. Ignoring in scale up.", pod.Name, nodeName)
}
podSchedulable.set(pod, predicateError)
podSchedulable.Set(pod, predicateError)
}

glogx.V(4).Over(loggingQuota).Infof("%v other pods marked as unschedulable can be scheduled.", -loggingQuota.Left())
Expand Down
14 changes: 5 additions & 9 deletions cluster-autoscaler/core/scale_down.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/context"
core_utils "k8s.io/autoscaler/cluster-autoscaler/core/utils"
"k8s.io/autoscaler/cluster-autoscaler/metrics"
"k8s.io/autoscaler/cluster-autoscaler/simulator"
"k8s.io/autoscaler/cluster-autoscaler/utils"
Expand Down Expand Up @@ -214,7 +215,7 @@ func calculateScaleDownCoresMemoryTotal(nodes []*apiv1.Node, timestamp time.Time
// Nodes being deleted do not count towards total cluster resources
continue
}
cores, memory := getNodeCoresAndMemory(node)
cores, memory := core_utils.GetNodeCoresAndMemory(node)

coresTotal += cores
memoryTotal += memory
Expand Down Expand Up @@ -297,7 +298,7 @@ func copyScaleDownResourcesLimits(source scaleDownResourcesLimits) scaleDownReso
func computeScaleDownResourcesDelta(cp cloudprovider.CloudProvider, node *apiv1.Node, nodeGroup cloudprovider.NodeGroup, resourcesWithLimits []string) (scaleDownResourcesDelta, errors.AutoscalerError) {
resultScaleDownDelta := make(scaleDownResourcesDelta)

nodeCPU, nodeMemory := getNodeCoresAndMemory(node)
nodeCPU, nodeMemory := core_utils.GetNodeCoresAndMemory(node)
resultScaleDownDelta[cloudprovider.ResourceNameCores] = nodeCPU
resultScaleDownDelta[cloudprovider.ResourceNameMemory] = nodeMemory

Expand Down Expand Up @@ -384,11 +385,6 @@ func (sd *ScaleDown) CleanUp(timestamp time.Time) {
sd.usageTracker.CleanUp(timestamp.Add(-sd.context.ScaleDownUnneededTime))
}

// GetCandidatesForScaleDown gets candidates for scale down.
func (sd *ScaleDown) GetCandidatesForScaleDown() []*apiv1.Node {
return sd.unneededNodesList
}

// CleanUpUnneededNodes clears the list of unneeded nodes.
func (sd *ScaleDown) CleanUpUnneededNodes() {
sd.unneededNodesList = make([]*apiv1.Node, 0)
Expand Down Expand Up @@ -416,7 +412,7 @@ func (sd *ScaleDown) UpdateUnneededNodes(

currentlyUnneededNodes := make([]*apiv1.Node, 0)
// Only scheduled non expendable pods and pods waiting for lower priority pods preemption can prevent node delete.
nonExpendablePods := filterOutExpendablePods(pods, sd.context.ExpendablePodsPriorityCutoff)
nonExpendablePods := core_utils.FilterOutExpendablePods(pods, sd.context.ExpendablePodsPriorityCutoff)
nodeNameToNodeInfo := scheduler_util.CreateNodeNameToInfoMap(nonExpendablePods, destinationNodes)
utilizationMap := make(map[string]simulator.UtilizationInfo)

Expand Down Expand Up @@ -811,7 +807,7 @@ func (sd *ScaleDown) TryToScaleDown(allNodes []*apiv1.Node, pods []*apiv1.Pod, p

findNodesToRemoveStart := time.Now()
// Only scheduled non expendable pods are taken into account and have to be moved.
nonExpendablePods := filterOutExpendablePods(pods, sd.context.ExpendablePodsPriorityCutoff)
nonExpendablePods := core_utils.FilterOutExpendablePods(pods, sd.context.ExpendablePodsPriorityCutoff)
// We look for only 1 node so new hints may be incomplete.
nodesToRemove, _, _, err := simulator.FindNodesToRemove(candidates, nodesWithoutMaster, nonExpendablePods, sd.context.ListerRegistry,
sd.context.PredicateChecker, 1, false,
Expand Down
145 changes: 63 additions & 82 deletions cluster-autoscaler/core/scale_down_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/clusterstate"
"k8s.io/autoscaler/cluster-autoscaler/config"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/core/utils"
"k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
kube_util "k8s.io/autoscaler/cluster-autoscaler/utils/kubernetes"
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
Expand All @@ -49,8 +50,6 @@ import (
"k8s.io/klog"
)

const nothingReturned = "Nothing returned"

func TestFindUnneededNodes(t *testing.T) {
p1 := BuildTestPod("p1", 100, 0)
p1.Spec.NodeName = "n1"
Expand Down Expand Up @@ -618,20 +617,20 @@ func TestDeleteNode(t *testing.T) {
// verify
if scenario.expectedDeletion {
assert.NoError(t, result.Err)
assert.Equal(t, n1.Name, getStringFromChanImmediately(deletedNodes))
assert.Equal(t, n1.Name, utils.GetStringFromChanImmediately(deletedNodes))
} else {
assert.NotNil(t, result.Err)
}
assert.Equal(t, nothingReturned, getStringFromChanImmediately(deletedNodes))
assert.Equal(t, utils.NothingReturned, utils.GetStringFromChanImmediately(deletedNodes))
assert.Equal(t, scenario.expectedResultType, result.ResultType)

taintedUpdate := fmt.Sprintf("%s-%s", n1.Name, []string{deletetaint.ToBeDeletedTaint})
assert.Equal(t, taintedUpdate, getStringFromChan(updatedNodes))
assert.Equal(t, taintedUpdate, utils.GetStringFromChan(updatedNodes))
if !scenario.expectedDeletion {
untaintedUpdate := fmt.Sprintf("%s-%s", n1.Name, []string{})
assert.Equal(t, untaintedUpdate, getStringFromChanImmediately(updatedNodes))
assert.Equal(t, untaintedUpdate, utils.GetStringFromChanImmediately(updatedNodes))
}
assert.Equal(t, nothingReturned, getStringFromChanImmediately(updatedNodes))
assert.Equal(t, utils.NothingReturned, utils.GetStringFromChanImmediately(updatedNodes))
})
}
}
Expand Down Expand Up @@ -663,8 +662,8 @@ func TestDrainNode(t *testing.T) {
_, err := drainNode(n1, []*apiv1.Pod{p1, p2}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
assert.NoError(t, err)
deleted := make([]string, 0)
deleted = append(deleted, getStringFromChan(deletedPods))
deleted = append(deleted, getStringFromChan(deletedPods))
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
sort.Strings(deleted)
assert.Equal(t, p1.Name, deleted[0])
assert.Equal(t, p2.Name, deleted[1])
Expand Down Expand Up @@ -706,8 +705,8 @@ func TestDrainNodeWithRescheduled(t *testing.T) {
_, err := drainNode(n1, []*apiv1.Pod{p1, p2}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
assert.NoError(t, err)
deleted := make([]string, 0)
deleted = append(deleted, getStringFromChan(deletedPods))
deleted = append(deleted, getStringFromChan(deletedPods))
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
sort.Strings(deleted)
assert.Equal(t, p1.Name, deleted[0])
assert.Equal(t, p2.Name, deleted[1])
Expand Down Expand Up @@ -755,9 +754,9 @@ func TestDrainNodeWithRetries(t *testing.T) {
_, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
assert.NoError(t, err)
deleted := make([]string, 0)
deleted = append(deleted, getStringFromChan(deletedPods))
deleted = append(deleted, getStringFromChan(deletedPods))
deleted = append(deleted, getStringFromChan(deletedPods))
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
sort.Strings(deleted)
assert.Equal(t, p1.Name, deleted[0])
assert.Equal(t, p2.Name, deleted[1])
Expand Down Expand Up @@ -952,8 +951,8 @@ func TestScaleDown(t *testing.T) {
waitForDeleteToFinish(t, scaleDown)
assert.NoError(t, err)
assert.Equal(t, status.ScaleDownNodeDeleteStarted, scaleDownStatus.Result)
assert.Equal(t, n1.Name, getStringFromChan(deletedNodes))
assert.Equal(t, n1.Name, getStringFromChan(updatedNodes))
assert.Equal(t, n1.Name, utils.GetStringFromChan(deletedNodes))
assert.Equal(t, n1.Name, utils.GetStringFromChan(updatedNodes))
}

func waitForDeleteToFinish(t *testing.T, sd *ScaleDown) {
Expand Down Expand Up @@ -1051,13 +1050,13 @@ func TestScaleDownEmptyMinCoresLimitHit(t *testing.T) {

func TestScaleDownEmptyMinMemoryLimitHit(t *testing.T) {
options := defaultScaleDownOptions
options.MinMemoryTotal = 4000 * MiB
options.MinMemoryTotal = 4000 * utils.MiB
config := &scaleTestConfig{
nodes: []nodeConfig{
{"n1", 2000, 1000 * MiB, 0, true, "ng1"},
{"n2", 1000, 1000 * MiB, 0, true, "ng1"},
{"n3", 1000, 1000 * MiB, 0, true, "ng1"},
{"n4", 1000, 3000 * MiB, 0, true, "ng1"},
{"n1", 2000, 1000 * utils.MiB, 0, true, "ng1"},
{"n2", 1000, 1000 * utils.MiB, 0, true, "ng1"},
{"n3", 1000, 1000 * utils.MiB, 0, true, "ng1"},
{"n4", 1000, 3000 * utils.MiB, 0, true, "ng1"},
},
options: options,
expectedScaleDowns: []string{"n1", "n2"},
Expand All @@ -1067,20 +1066,20 @@ func TestScaleDownEmptyMinMemoryLimitHit(t *testing.T) {

func TestScaleDownEmptyTempNodesLimits(t *testing.T) {
options := defaultScaleDownOptions
options.MinMemoryTotal = 4000 * MiB
options.MinMemoryTotal = 4000 * utils.MiB
config := &scaleTestConfig{
nodes: []nodeConfig{
{"n1", 1000, 1000 * MiB, 0, true, "ng1"},
{"n2", 1000, 1000 * MiB, 0, true, "ng1"},
{"n3", 1000, 1000 * MiB, 0, true, "ng1"},
{"n4", 1000, 1000 * MiB, 0, true, "ng1"},
{"n5", 1000, 1000 * MiB, 0, true, "ng1"},
{"n6", 1000, 1000 * MiB, 0, true, "ng1"},

{"n7", 1000, 1000 * MiB, 0, true, "ng2"},
{"n8", 1000, 1000 * MiB, 0, true, "ng2"},
{"n9", 1000, 1000 * MiB, 0, true, "ng2"},
{"n10", 1000, 1000 * MiB, 0, true, "ng2"},
{"n1", 1000, 1000 * utils.MiB, 0, true, "ng1"},
{"n2", 1000, 1000 * utils.MiB, 0, true, "ng1"},
{"n3", 1000, 1000 * utils.MiB, 0, true, "ng1"},
{"n4", 1000, 1000 * utils.MiB, 0, true, "ng1"},
{"n5", 1000, 1000 * utils.MiB, 0, true, "ng1"},
{"n6", 1000, 1000 * utils.MiB, 0, true, "ng1"},

{"n7", 1000, 1000 * utils.MiB, 0, true, "ng2"},
{"n8", 1000, 1000 * utils.MiB, 0, true, "ng2"},
{"n9", 1000, 1000 * utils.MiB, 0, true, "ng2"},
{"n10", 1000, 1000 * utils.MiB, 0, true, "ng2"},
},
options: options,
expectedScaleDowns: []string{"n1", "n2", "n3", "n7"},
Expand All @@ -1091,22 +1090,22 @@ func TestScaleDownEmptyTempNodesLimits(t *testing.T) {

func TestScaleDownEmptyTempNodesMinSize(t *testing.T) {
options := defaultScaleDownOptions
options.MinMemoryTotal = 1000 * MiB
options.MinMemoryTotal = 1000 * utils.MiB
config := &scaleTestConfig{
nodes: []nodeConfig{
{"n1", 1000, 1000 * MiB, 0, true, "ng1"},
{"n2", 1000, 1000 * MiB, 0, true, "ng1"},
{"n3", 1000, 1000 * MiB, 0, true, "ng1"},
{"n4", 1000, 1000 * MiB, 0, true, "ng1"},

{"n6", 1000, 1000 * MiB, 0, true, "ng2"},
{"n7", 1000, 1000 * MiB, 0, true, "ng2"},
{"n8", 1000, 1000 * MiB, 0, true, "ng2"},
{"n9", 1000, 1000 * MiB, 0, true, "ng2"},

{"n10", 1000, 1000 * MiB, 0, true, "ng3"},
{"n11", 1000, 1000 * MiB, 0, true, "ng3"},
{"n12", 1000, 1000 * MiB, 0, true, "ng3"},
{"n1", 1000, 1000 * utils.MiB, 0, true, "ng1"},
{"n2", 1000, 1000 * utils.MiB, 0, true, "ng1"},
{"n3", 1000, 1000 * utils.MiB, 0, true, "ng1"},
{"n4", 1000, 1000 * utils.MiB, 0, true, "ng1"},

{"n6", 1000, 1000 * utils.MiB, 0, true, "ng2"},
{"n7", 1000, 1000 * utils.MiB, 0, true, "ng2"},
{"n8", 1000, 1000 * utils.MiB, 0, true, "ng2"},
{"n9", 1000, 1000 * utils.MiB, 0, true, "ng2"},

{"n10", 1000, 1000 * utils.MiB, 0, true, "ng3"},
{"n11", 1000, 1000 * utils.MiB, 0, true, "ng3"},
{"n12", 1000, 1000 * utils.MiB, 0, true, "ng3"},
},
options: options,
expectedScaleDowns: []string{"n7", "n8", "n10", "n11"},
Expand All @@ -1131,12 +1130,12 @@ func TestScaleDownEmptyMinGpuLimitHit(t *testing.T) {
}
config := &scaleTestConfig{
nodes: []nodeConfig{
{"n1", 1000, 1000 * MiB, 1, true, "ng1"},
{"n2", 1000, 1000 * MiB, 1, true, "ng1"},
{"n3", 1000, 1000 * MiB, 1, true, "ng1"},
{"n4", 1000, 1000 * MiB, 1, true, "ng1"},
{"n5", 1000, 1000 * MiB, 1, true, "ng1"},
{"n6", 1000, 1000 * MiB, 1, true, "ng1"},
{"n1", 1000, 1000 * utils.MiB, 1, true, "ng1"},
{"n2", 1000, 1000 * utils.MiB, 1, true, "ng1"},
{"n3", 1000, 1000 * utils.MiB, 1, true, "ng1"},
{"n4", 1000, 1000 * utils.MiB, 1, true, "ng1"},
{"n5", 1000, 1000 * utils.MiB, 1, true, "ng1"},
{"n6", 1000, 1000 * utils.MiB, 1, true, "ng1"},
},
options: options,
expectedScaleDowns: []string{"n1", "n2"},
Expand Down Expand Up @@ -1267,8 +1266,8 @@ func simpleScaleDownEmpty(t *testing.T, config *scaleTestConfig) {
// Report only up to 10 extra nodes found.
deleted := make([]string, 0, len(config.expectedScaleDowns)+10)
for i := 0; i < len(config.expectedScaleDowns)+10; i++ {
d := getStringFromChan(deletedNodes)
if d == nothingReturned { // a closed channel yields empty value
d := utils.GetStringFromChan(deletedNodes)
if d == utils.NothingReturned { // a closed channel yields empty value
break
}
deleted = append(deleted, d)
Expand Down Expand Up @@ -1355,7 +1354,7 @@ func TestNoScaleDownUnready(t *testing.T) {

assert.NoError(t, err)
assert.Equal(t, status.ScaleDownNodeDeleteStarted, scaleDownStatus.Result)
assert.Equal(t, n1.Name, getStringFromChan(deletedNodes))
assert.Equal(t, n1.Name, utils.GetStringFromChan(deletedNodes))
}

func TestScaleDownNoMove(t *testing.T) {
Expand Down Expand Up @@ -1440,24 +1439,6 @@ func TestScaleDownNoMove(t *testing.T) {
assert.Equal(t, status.ScaleDownNoUnneeded, scaleDownStatus.Result)
}

func getStringFromChan(c chan string) string {
select {
case val := <-c:
return val
case <-time.After(100 * time.Millisecond):
return nothingReturned
}
}

func getStringFromChanImmediately(c chan string) string {
select {
case val := <-c:
return val
default:
return nothingReturned
}
}

func getCountOfChan(c chan string) int {
count := 0
for {
Expand All @@ -1472,13 +1453,13 @@ func getCountOfChan(c chan string) int {

func TestCalculateCoresAndMemoryTotal(t *testing.T) {
nodeConfigs := []nodeConfig{
{"n1", 2000, 7500 * MiB, 0, true, "ng1"},
{"n2", 2000, 7500 * MiB, 0, true, "ng1"},
{"n3", 2000, 7500 * MiB, 0, true, "ng1"},
{"n4", 12000, 8000 * MiB, 0, true, "ng1"},
{"n5", 16000, 7500 * MiB, 0, true, "ng1"},
{"n6", 8000, 6000 * MiB, 0, true, "ng1"},
{"n7", 6000, 16000 * MiB, 0, true, "ng1"},
{"n1", 2000, 7500 * utils.MiB, 0, true, "ng1"},
{"n2", 2000, 7500 * utils.MiB, 0, true, "ng1"},
{"n3", 2000, 7500 * utils.MiB, 0, true, "ng1"},
{"n4", 12000, 8000 * utils.MiB, 0, true, "ng1"},
{"n5", 16000, 7500 * utils.MiB, 0, true, "ng1"},
{"n6", 8000, 6000 * utils.MiB, 0, true, "ng1"},
{"n7", 6000, 16000 * utils.MiB, 0, true, "ng1"},
}
nodes := make([]*apiv1.Node, len(nodeConfigs))
for i, n := range nodeConfigs {
Expand All @@ -1498,7 +1479,7 @@ func TestCalculateCoresAndMemoryTotal(t *testing.T) {
coresTotal, memoryTotal := calculateScaleDownCoresMemoryTotal(nodes, time.Now())

assert.Equal(t, int64(42), coresTotal)
assert.Equal(t, int64(44000*MiB), memoryTotal)
assert.Equal(t, int64(44000*utils.MiB), memoryTotal)
}

func TestFilterOutMasters(t *testing.T) {
Expand Down
Loading