From 9cb45438c190b723068b5acfc019ec6c46e11a01 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 6 Jun 2019 16:51:12 +0200 Subject: [PATCH] Fix raises request according to pod min limit set in LimitRange test Admisson controller was crashing because it was tryig to divide by 0 so it dind't make any changes to pod. Adter I fixed that only 1 pod would fit in the cluster so I lowered recommendation. --- vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go | 6 +++--- vertical-pod-autoscaler/pkg/utils/vpa/capping.go | 6 +++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index 49a5d12fbc58..dd9711a863c8 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -260,14 +260,14 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { { ContainerName: "hamster", Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceCPU: ParseQuantityOrDie("120m"), apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled }, }, { ContainerName: "hamster2", Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceCPU: ParseQuantityOrDie("120m"), apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled }, }, @@ -289,7 +289,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // request that limitrange allows. // Limit to request ratio should stay unchanged. for _, pod := range podList.Items { - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("120m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi"))) gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index 15d5c06d49e7..2744156a2edd 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -344,7 +344,7 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, return resources } - if minLimit.Cmp(sumRecommendation) > 0 { + if minLimit.Cmp(sumRecommendation) > 0 && !sumLimit.IsZero() { for i := range pod.Spec.Containers { request := (*fieldGetter(resources[i]))[resourceName] cappedContainerRequest, _ := scaleQuantityProportionally(&request, &sumRecommendation, &minLimit) @@ -353,6 +353,10 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, return resources } + if sumLimit.IsZero() { + return resources + } + var targetTotalLimit resource.Quantity if minLimit.Cmp(sumLimit) > 0 { targetTotalLimit = minLimit