From 7095cc48cf2f410badebca826acc473d47d36259 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 30 May 2019 16:04:18 +0200 Subject: [PATCH] Capping to pod limit range --- .../pkg/utils/vpa/capping.go | 74 +++++- .../pkg/utils/vpa/capping_test.go | 217 ++++++++++++++++++ 2 files changed, 290 insertions(+), 1 deletion(-) diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index b10fbacc19a4..7dab851a9969 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -67,7 +67,11 @@ func (c *cappingRecommendationProcessor) Apply( } updatedRecommendations := []vpa_types.RecommendedContainerResources{} containerToAnnotationsMap := ContainerToAnnotationsMap{} - for _, containerRecommendation := range podRecommendation.ContainerRecommendations { + limitAdjustedRecommendation, err := c.capProportionallyToPodLimitRange(podRecommendation.ContainerRecommendations, pod) + if err != nil { + return nil, nil, err + } + for _, containerRecommendation := range limitAdjustedRecommendation { container := getContainer(containerRecommendation.ContainerName, pod) if container == nil { @@ -300,3 +304,71 @@ func getBoundaryRecommendation(recommendation apiv1.ResourceList, container apiv apiv1.ResourceMemory: *memMaxRequest, } } + +func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, + pod *apiv1.Pod, limitRange apiv1.LimitRangeItem, resourceName apiv1.ResourceName, + get func(vpa_types.RecommendedContainerResources) *apiv1.ResourceList) []vpa_types.RecommendedContainerResources { + minLimit := limitRange.Min[resourceName] + maxLimit := limitRange.Max[resourceName] + defaultLimit := limitRange.Default[resourceName] + + var sumLimit resource.Quantity + for i, item := range pod.Spec.Containers { + if i >= len(resources) { + continue + } + limit := item.Resources.Limits[resourceName] + request := item.Resources.Requests[resourceName] + recommendation := (*get(resources[i]))[resourceName] + containerLimit, _ := getProportionalResourceLimit(resourceName, &limit, &request, &recommendation, &defaultLimit) + if containerLimit != nil { + sumLimit.Add(*containerLimit) + } + } + if minLimit.Cmp(sumLimit) <= 0 && (maxLimit.IsZero() || maxLimit.Cmp(sumLimit) >= 0) { + return resources + } + var targetTotalLimit resource.Quantity + if minLimit.Cmp(sumLimit) > 0 { + targetTotalLimit = minLimit + } + if !maxLimit.IsZero() && maxLimit.Cmp(sumLimit) < 0 { + targetTotalLimit = maxLimit + } + for i := range pod.Spec.Containers { + limit := (*get(resources[i]))[resourceName] + cappedContainerRequest, _ := scaleQuantityProportionally(&limit, &sumLimit, &targetTotalLimit) + (*get(resources[i]))[resourceName] = *cappedContainerRequest + } + return resources +} + +func (c *cappingRecommendationProcessor) capProportionallyToPodLimitRange( + containerRecommendations []vpa_types.RecommendedContainerResources, pod *apiv1.Pod) ([]vpa_types.RecommendedContainerResources, error) { + podLimitRange, err := c.limitsRangeCalculator.GetPodLimitRangeItem(pod.Namespace) + if err != nil { + return nil, fmt.Errorf("error obtaining limit range: %s", err) + } + if podLimitRange == nil { + return containerRecommendations, nil + } + getLower := func(r vpa_types.RecommendedContainerResources) *apiv1.ResourceList { + return &r.LowerBound + } + getTarget := func(r vpa_types.RecommendedContainerResources) *apiv1.ResourceList { + return &r.Target + } + getUpper := func(r vpa_types.RecommendedContainerResources) *apiv1.ResourceList { + return &r.UpperBound + } + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getLower) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getLower) + + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getTarget) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getTarget) + + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getUpper) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getUpper) + + return containerRecommendations, nil +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index decb40e47ed8..5068376ba36f 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -289,3 +289,220 @@ func TestApplyCapsToLimitRange(t *testing.T) { assert.ElementsMatch(t, []string{"cpu capped to fit Max in container LimitRange", "memory capped to fit Min in container LimitRange"}, annotations["container"]) assert.Equal(t, expectedRecommendation, *processedRecommendation) } + +func TestApplyPodLimitRange(t *testing.T) { + tests := []struct { + name string + resources []vpa_types.RecommendedContainerResources + pod apiv1.Pod + limitRange apiv1.LimitRangeItem + resourceName apiv1.ResourceName + get func(vpa_types.RecommendedContainerResources) *apiv1.ResourceList + expect []vpa_types.RecommendedContainerResources + }{ + { + name: "cap target cpu to max", + resources: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + pod: apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + }, + limitRange: apiv1.LimitRangeItem{ + Max: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + resourceName: apiv1.ResourceCPU, + get: func(rcr vpa_types.RecommendedContainerResources) *apiv1.ResourceList { + return &rcr.Target + }, + expect: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + }, + }, + { + name: "cap lower bound cpu to max", + resources: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + LowerBound: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + { + ContainerName: "container2", + LowerBound: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + pod: apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + }, + limitRange: apiv1.LimitRangeItem{ + Max: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + resourceName: apiv1.ResourceCPU, + get: func(rcr vpa_types.RecommendedContainerResources) *apiv1.ResourceList { + return &rcr.LowerBound + }, + expect: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + LowerBound: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + { + ContainerName: "container2", + LowerBound: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + }, + }, + { + name: "cap upper bound mem to min", + resources: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + UpperBound: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + { + ContainerName: "container2", + UpperBound: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + }, + pod: apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + }, + }, + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + }, + }, + }, + }, + }, + limitRange: apiv1.LimitRangeItem{ + Min: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("4G"), + }, + }, + resourceName: apiv1.ResourceMemory, + get: func(rcr vpa_types.RecommendedContainerResources) *apiv1.ResourceList { + return &rcr.UpperBound + }, + expect: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + UpperBound: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2000000000000m"), + }, + }, + { + ContainerName: "container2", + UpperBound: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2000000000000m"), + }, + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := applyPodLimitRange(tc.resources, &tc.pod, tc.limitRange, tc.resourceName, tc.get) + assert.Equal(t, tc.expect, got) + }) + } +}