diff --git a/vertical-pod-autoscaler/deploy/vpa-rbac.yaml b/vertical-pod-autoscaler/deploy/vpa-rbac.yaml index ff1fe8690a83..6aa52d167b2d 100644 --- a/vertical-pod-autoscaler/deploy/vpa-rbac.yaml +++ b/vertical-pod-autoscaler/deploy/vpa-rbac.yaml @@ -22,6 +22,7 @@ rules: resources: - pods - nodes + - limitranges verbs: - get - list @@ -231,6 +232,7 @@ rules: - pods - configmaps - nodes + - limitranges verbs: - get - list diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index a250f3fdd5bb..cef71e89445e 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -63,7 +63,9 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ ContainerName: "hamster", @@ -88,6 +90,123 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) + ginkgo.It("keeps limits to request ratio constant", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 250m CPU and 200Mi of memory. Limits to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("caps request according to max limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while + // recommendation is 250m + // Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi + InstallLimitRangeWithMax(f, "300m", "1Gi") + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 200m CPU (as this is the recommendation + // capped according to max limit in LimitRange) and 200Mi of memory, + // which is uncapped. Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", 300)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("raises request according to min limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }}, + } + InstallVPA(f, vpaCRD) + + // Min CPU limit is 75m and ratio is 1.5, so min request is 50m + // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while + // recommendation is 100Mi. + InstallLimitRangeWithMin(f, "75m", "250Mi") + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 200Mi of memory, but admission controller + // should change it to 250m CPU and 125Mi of memory, since this is the lowest + // request that limitrange allows. + // Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + ginkgo.It("caps request to max set in VPA", func() { d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index 98db1b4d0e08..fe516f4a77bf 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -136,6 +136,17 @@ func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuan return d } +// NewHamsterDeploymentWithResourcesAndLimits creates a simple hamster deployment with specific +// resource requests and limits for e2e test purposes. +func NewHamsterDeploymentWithResourcesAndLimits(f *framework.Framework, cpuQuantityRequest, memoryQuantityRequest, cpuQuantityLimit, memoryQuantityLimit resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeploymentWithResources(f, cpuQuantityRequest, memoryQuantityRequest) + d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantityLimit, + apiv1.ResourceMemory: memoryQuantityLimit, + } + return d +} + // GetHamsterPods returns running hamster pods (matched by hamsterLabels) func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) @@ -342,3 +353,61 @@ func WaitForConditionPresent(c *vpa_clientset.Clientset, vpa *vpa_types.Vertical return false }) } + +func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity) { + lr := &apiv1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: "hamster-lr", + }, + Spec: apiv1.LimitRangeSpec{ + Limits: []apiv1.LimitRangeItem{}, + }, + } + + if maxMemoryLimit != nil || maxCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{}, + } + if maxCpuLimit != nil { + lrItem.Max[apiv1.ResourceCPU] = *maxCpuLimit + } + if maxMemoryLimit != nil { + lrItem.Max[apiv1.ResourceMemory] = *maxMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + + if minMemoryLimit != nil || minCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Min: apiv1.ResourceList{}, + } + if minCpuLimit != nil { + lrItem.Min[apiv1.ResourceCPU] = *minCpuLimit + } + if minMemoryLimit != nil { + lrItem.Min[apiv1.ResourceMemory] = *minMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory. +func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit)) + maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) + maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) + installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity) +} + +// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory. +func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit)) + minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) + minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) + installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil) +} diff --git a/vertical-pod-autoscaler/e2e/v1beta1/updater.go b/vertical-pod-autoscaler/e2e/v1beta1/updater.go index 33b66e1d49d2..d61949fd5ee3 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/updater.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/updater.go @@ -98,6 +98,44 @@ var _ = UpdaterE2eDescribe("Updater", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue()) }) + + ginkgo.It("observes max in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "200m", vpa_types.UpdateModeAuto) + + // Max CPU limit is 300m and ratio is 3., so max request is 100m, while + // recommendation is 200m + // Max memory limit is 1T and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, "300m", "1T") + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) + + ginkgo.It("observes min in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "50m", vpa_types.UpdateModeAuto) + + // Min CPU limit is 300m and ratio is 3., so min request is 100m, while + // recommendation is 200m + // Min memory limit is 0 and ratio is 2., so min request is 0 + InstallLimitRangeWithMin(f, "300m", "0") + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) }) func testEvictsPods(f *framework.Framework, controllerKind string) { diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index cb7f4d92074f..dd9711a863c8 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -85,6 +85,219 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) + ginkgo.It("keeps limits to request ratio constant", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 250m CPU and 200Mi of memory. Limits to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("caps request according to container max limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while + // recommendation is 250m + // Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi + InstallLimitRangeWithMax(f, "300m", "1Gi", apiv1.LimitTypeContainer) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 200m CPU (as this is the recommendation + // capped according to max limit in LimitRange) and 200Mi of memory, + // which is uncapped. Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", 300)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("raises request according to container min limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }}, + } + InstallVPA(f, vpaCRD) + + // Min CPU from limit range is 50m and ratio is 1.5. Min applies to both limit and request so min + // request is 50m and min limit is 75 + // Min memory limit is 250Mi and it applies to both limit and request. Recommendation is 100Mi. + // It should be scaled up to 250Mi. + InstallLimitRangeWithMin(f, "50m", "250Mi", apiv1.LimitTypeContainer) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 200Mi of memory, but admission controller + // should change it to 250m CPU and 125Mi of memory, since this is the lowest + // request that limitrange allows. + // Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("caps request according to pod max limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }, + { + ContainerName: "hamster2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }, + }, + } + InstallVPA(f, vpaCRD) + + // Max CPU limit is 600m for pod, 300 per container and ratio is 1.5, so max request is 200m, + // while recommendation is 250m + // Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi + InstallLimitRangeWithMax(f, "600m", "1Gi", apiv1.LimitTypePod) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 200m CPU (as this is the recommendation + // capped according to max limit in LimitRange) and 200Mi of memory, + // which is uncapped. Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", 300)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("raises request according to pod min limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("120m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }, + { + ContainerName: "hamster2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("120m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }, + }, + } + InstallVPA(f, vpaCRD) + + // Min CPU from limit range is 100m, 50m per pod and ratio is 1.5. Min applies to both limit and + // request so min request is 50m and min limit is 75 + // Min memory limit is 500Mi per pod, 250 per container and it applies to both limit and request. + // Recommendation is 100Mi it should be scaled up to 250Mi. + InstallLimitRangeWithMin(f, "100m", "500Mi", apiv1.LimitTypePod) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 200Mi of memory, but admission controller + // should change it to 250m CPU and 125Mi of memory, since this is the lowest + // request that limitrange allows. + // Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("120m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + ginkgo.It("caps request to max set in VPA", func() { d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index 93d33906e311..ddbdf9ec46cc 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -103,9 +103,9 @@ func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity) d.Spec.Replicas = &replicas d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when starting deployment creation") err = framework.WaitForDeploymentComplete(f.ClientSet, d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error waiting for deployment creation to finish") return d } @@ -143,6 +143,17 @@ func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuan return d } +// NewHamsterDeploymentWithResourcesAndLimits creates a simple hamster deployment with specific +// resource requests and limits for e2e test purposes. +func NewHamsterDeploymentWithResourcesAndLimits(f *framework.Framework, cpuQuantityRequest, memoryQuantityRequest, cpuQuantityLimit, memoryQuantityLimit resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeploymentWithResources(f, cpuQuantityRequest, memoryQuantityRequest) + d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantityLimit, + apiv1.ResourceMemory: memoryQuantityLimit, + } + return d +} + // GetHamsterPods returns running hamster pods (matched by hamsterLabels) func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) @@ -171,6 +182,33 @@ func SetupVPA(f *framework.Framework, cpu string, mode vpa_types.UpdateMode, tar InstallVPA(f, vpaCRD) } +// SetupVPAForTwoHamsters creates and installs a simple pod with two hamster containers for e2e test purposes. +func SetupVPAForTwoHamsters(f *framework.Framework, cpu string, mode vpa_types.UpdateMode, targetRef *autoscaling.CrossVersionObjectReference) { + vpaCRD := NewVPA(f, "hamster-vpa", targetRef) + vpaCRD.Spec.UpdatePolicy.UpdateMode = &mode + + cpuQuantity := ParseQuantityOrDie(cpu) + resourceList := apiv1.ResourceList{apiv1.ResourceCPU: cpuQuantity} + + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "hamster", + Target: resourceList, + LowerBound: resourceList, + UpperBound: resourceList, + }, + { + ContainerName: "hamster2", + Target: resourceList, + LowerBound: resourceList, + UpperBound: resourceList, + }, + }, + } + InstallVPA(f, vpaCRD) +} + // NewVPA creates a VPA object for e2e test purposes. func NewVPA(f *framework.Framework, name string, targetRef *autoscaling.CrossVersionObjectReference) *vpa_types.VerticalPodAutoscaler { updateMode := vpa_types.UpdateModeAuto @@ -196,18 +234,18 @@ func NewVPA(f *framework.Framework, name string, targetRef *autoscaling.CrossVer func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) { ns := f.Namespace.Name config, err := framework.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error loading framework") vpaClientSet := vpa_clientset.NewForConfigOrDie(config) vpaClient := vpaClientSet.AutoscalingV1beta2() _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpa) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error creating VPA") } // ParseQuantityOrDie parses quantity from string and dies with an error if // unparsable. func ParseQuantityOrDie(text string) resource.Quantity { quantity, err := resource.ParseQuantity(text) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error parsing quantity: %s", text) return quantity } @@ -299,9 +337,9 @@ func GetEvictedPodsCount(currentPodSet PodSet, initialPodSet PodSet) int { func CheckNoPodsEvicted(f *framework.Framework, initialPodSet PodSet) { time.Sleep(VpaEvictionTimeout) currentPodList, err := GetHamsterPods(f) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when listing hamster pods to check number of pod evictions") restarted := GetEvictedPodsCount(MakePodSet(currentPodList), initialPodSet) - gomega.Expect(restarted).To(gomega.Equal(0)) + gomega.Expect(restarted).To(gomega.Equal(0), "there should be no pod evictions") } // WaitForVPAMatch pools VPA object until match function returns true. Returns @@ -335,3 +373,61 @@ func WaitForRecommendationPresent(c *vpa_clientset.Clientset, vpa *vpa_types.Ver return vpa.Status.Recommendation != nil && len(vpa.Status.Recommendation.ContainerRecommendations) != 0 }) } + +func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity, lrType apiv1.LimitType) { + lr := &apiv1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: "hamster-lr", + }, + Spec: apiv1.LimitRangeSpec{ + Limits: []apiv1.LimitRangeItem{}, + }, + } + + if maxMemoryLimit != nil || maxCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: lrType, + Max: apiv1.ResourceList{}, + } + if maxCpuLimit != nil { + lrItem.Max[apiv1.ResourceCPU] = *maxCpuLimit + } + if maxMemoryLimit != nil { + lrItem.Max[apiv1.ResourceMemory] = *maxMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + + if minMemoryLimit != nil || minCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: lrType, + Min: apiv1.ResourceList{}, + } + if minCpuLimit != nil { + lrItem.Min[apiv1.ResourceCPU] = *minCpuLimit + } + if minMemoryLimit != nil { + lrItem.Min[apiv1.ResourceMemory] = *minMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when creating limit range") +} + +// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory. +func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string, lrType apiv1.LimitType) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit)) + maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) + maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) + installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity, lrType) +} + +// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory. +func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string, lrType apiv1.LimitType) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit)) + minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) + minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) + installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil, lrType) +} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/updater.go b/vertical-pod-autoscaler/e2e/v1beta2/updater.go index 67f9edb202d6..012adee959bf 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/updater.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/updater.go @@ -119,6 +119,86 @@ var _ = UpdaterE2eDescribe("Updater", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue()) }) + + ginkgo.It("observes container max in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef) + + // Max CPU limit is 300m and ratio is 3., so max request is 100m, while + // recommendation is 200m + // Max memory limit is 1T and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, "300m", "1T", apiv1.LimitTypeContainer) + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) + + ginkgo.It("observes container min in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef) + + // Min CPU from limit range is 100m and ratio is 3. Min applies both to limit and request so min + // request is 100m request and 300m limit + // Min memory limit is 0 and ratio is 2., so min request is 0 + InstallLimitRangeWithMin(f, "100m", "0", apiv1.LimitTypeContainer) + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) + + ginkgo.It("observes pod max in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPAForTwoHamsters(f, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef) + + // Max CPU limit is 600m per pod, 300m per container and ratio is 3., so max request is 100m, + // while recommendation is 200m + // Max memory limit is 2T per pod, 1T per container and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, "600m", "2T", apiv1.LimitTypePod) + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) + + ginkgo.It("observes pod min in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPAForTwoHamsters(f, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef) + + // Min CPU from limit range is 200m per pod, 100m per container and ratio is 3. Min applies both + // to limit and request so min request is 100m request and 300m limit + // Min memory limit is 0 and ratio is 2., so min request is 0 + InstallLimitRangeWithMin(f, "200m", "0", apiv1.LimitTypePod) + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) }) func testEvictsPods(f *framework.Framework, controller *autoscaling.CrossVersionObjectReference) { diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go index ec1d490a96d8..52487dd6c6c4 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go @@ -18,122 +18,67 @@ package logic import ( "fmt" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/labels" - "math" - "math/big" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta2" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" "k8s.io/klog" ) -// ContainerResources holds resources request for container -type ContainerResources struct { - Limits v1.ResourceList - Requests v1.ResourceList -} - -func newContainerResources() ContainerResources { - return ContainerResources{ - Requests: v1.ResourceList{}, - Limits: v1.ResourceList{}, - } -} - // RecommendationProvider gets current recommendation, annotations and vpaName for the given pod. type RecommendationProvider interface { - GetContainersResourcesForPod(pod *v1.Pod) ([]ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) + GetContainersResourcesForPod(pod *core.Pod) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) } type recommendationProvider struct { - vpaLister vpa_lister.VerticalPodAutoscalerLister + limitsRangeCalculator limitrange.LimitRangeCalculator recommendationProcessor vpa_api_util.RecommendationProcessor selectorFetcher target.VpaTargetSelectorFetcher + vpaLister vpa_lister.VerticalPodAutoscalerLister } // NewRecommendationProvider constructs the recommendation provider that list VPAs and can be used to determine recommendations for pods. -func NewRecommendationProvider(vpaLister vpa_lister.VerticalPodAutoscalerLister, recommendationProcessor vpa_api_util.RecommendationProcessor, selectorFetcher target.VpaTargetSelectorFetcher) *recommendationProvider { +func NewRecommendationProvider(calculator limitrange.LimitRangeCalculator, recommendationProcessor vpa_api_util.RecommendationProcessor, + selectorFetcher target.VpaTargetSelectorFetcher, vpaLister vpa_lister.VerticalPodAutoscalerLister) *recommendationProvider { return &recommendationProvider{ - vpaLister: vpaLister, + limitsRangeCalculator: calculator, recommendationProcessor: recommendationProcessor, selectorFetcher: selectorFetcher, + vpaLister: vpaLister, } } -func getProportionalLimit(originalLimit, originalRequest, recommendedRequest *resource.Quantity) (limit *resource.Quantity, capped bool) { - // originalLimit not set, don't set limit. - if originalLimit == nil || originalLimit.Value() == 0 { - return nil, false - } - // originalLimit set but originalRequest not set - K8s will treat the pod as if they were equal, - // recommend limit equal to request - if originalRequest == nil || originalRequest.Value() == 0 { - result := *recommendedRequest - return &result, false - } - // originalLimit and originalRequest are set. If they are equal recommend limit equal to request. - if originalRequest.MilliValue() == originalLimit.MilliValue() { - result := *recommendedRequest - return &result, false - } - - // Input and output milli values should fit in int64 but intermediate values might be bigger. - originalMilliRequest := big.NewInt(originalRequest.MilliValue()) - originalMilliLimit := big.NewInt(originalLimit.MilliValue()) - recommendedMilliRequest := big.NewInt(recommendedRequest.MilliValue()) - var recommendedMilliLimit big.Int - recommendedMilliLimit.Mul(recommendedMilliRequest, originalMilliLimit) - recommendedMilliLimit.Div(&recommendedMilliLimit, originalMilliRequest) - if recommendedMilliLimit.IsInt64() { - return resource.NewMilliQuantity(recommendedMilliLimit.Int64(), recommendedRequest.Format), false - } - return resource.NewMilliQuantity(math.MaxInt64, recommendedRequest.Format), true -} - // GetContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec. -func GetContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources, annotations vpa_api_util.ContainerToAnnotationsMap) []ContainerResources { - resources := make([]ContainerResources, len(pod.Spec.Containers)) +func GetContainersResources(pod *core.Pod, podRecommendation vpa_types.RecommendedPodResources, limitRange *core.LimitRangeItem, + annotations vpa_api_util.ContainerToAnnotationsMap) []vpa_api_util.ContainerResources { + resources := make([]vpa_api_util.ContainerResources, len(pod.Spec.Containers)) for i, container := range pod.Spec.Containers { - resources[i] = newContainerResources() - recommendation := vpa_api_util.GetRecommendationForContainer(container.Name, &podRecommendation) if recommendation == nil { klog.V(2).Infof("no matching recommendation found for container %s", container.Name) continue } resources[i].Requests = recommendation.Target - - cpuLimit, capped := getProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), resources[i].Requests.Cpu()) - if cpuLimit != nil { - resources[i].Limits[v1.ResourceCPU] = *cpuLimit - } - if capped { - annotations[container.Name] = append( - annotations[container.Name], - fmt.Sprintf( - "Failed to keep CPU limit to request proportion of %d to %d with recommended request of %d milliCPU; doesn't fit in int64. Capping limit to MaxInt64", - container.Resources.Limits.Cpu().MilliValue(), container.Resources.Requests.Cpu().MilliValue(), resources[i].Requests.Cpu().MilliValue())) + defaultLimit := core.ResourceList{} + if limitRange != nil { + defaultLimit = limitRange.Default } - memLimit, capped := getProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), resources[i].Requests.Memory()) - if memLimit != nil { - resources[i].Limits[v1.ResourceMemory] = *memLimit - } - if capped { - annotations[container.Name] = append( - annotations[container.Name], - fmt.Sprintf( - "Failed to keep memory limit to request proportion of %d to %d with recommended request of %d milliBytes; doesn't fit in int64. Capping limit to MaxInt64", - container.Resources.Limits.Memory().MilliValue(), container.Resources.Requests.Memory().MilliValue(), resources[i].Requests.Memory().MilliValue())) + proportionalLimits, limitAnnotations := vpa_api_util.GetProportionalLimit(container.Resources.Limits, container.Resources.Requests, recommendation.Target, defaultLimit) + if proportionalLimits != nil { + resources[i].Limits = proportionalLimits + if len(limitAnnotations) > 0 { + annotations[container.Name] = append(annotations[container.Name], limitAnnotations...) + } } } return resources } -func (p *recommendationProvider) getMatchingVPA(pod *v1.Pod) *vpa_types.VerticalPodAutoscaler { +func (p *recommendationProvider) getMatchingVPA(pod *core.Pod) *vpa_types.VerticalPodAutoscaler { configs, err := p.vpaLister.VerticalPodAutoscalers(pod.Namespace).List(labels.Everything()) if err != nil { klog.Errorf("failed to get vpa configs: %v", err) @@ -164,7 +109,7 @@ func (p *recommendationProvider) getMatchingVPA(pod *v1.Pod) *vpa_types.Vertical // GetContainersResourcesForPod returns recommended request for a given pod, annotations and name of controlling VPA. // The returned slice corresponds 1-1 to containers in the Pod. -func (p *recommendationProvider) GetContainersResourcesForPod(pod *v1.Pod) ([]ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { +func (p *recommendationProvider) GetContainersResourcesForPod(pod *core.Pod) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { klog.V(2).Infof("updating requirements for pod %s.", pod.Name) vpaConfig := p.getMatchingVPA(pod) if vpaConfig == nil { @@ -183,6 +128,11 @@ func (p *recommendationProvider) GetContainersResourcesForPod(pod *v1.Pod) ([]Co return nil, annotations, vpaConfig.Name, err } } - containerResources := GetContainersResources(pod, *recommendedPodResources, annotations) + podLimitRange, err := p.limitsRangeCalculator.GetContainerLimitRangeItem(pod.Namespace) + // TODO: Support limit range on pod level. + if err != nil { + return nil, nil, "", fmt.Errorf("error getting podLimitRange: %s", err) + } + containerResources := GetContainersResources(pod, *recommendedPodResources, podLimitRange, annotations) return containerResources, annotations, vpaConfig.Name, nil } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index fa0555d867a8..f422fe9fe598 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -21,17 +21,18 @@ import ( "math" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" target_mock "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/mock" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" - api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" ) func parseLabelSelector(selector string) labels.Selector { @@ -45,6 +46,21 @@ func mustParseResourcePointer(val string) *resource.Quantity { return &q } +type fakeLimitRangeCalculator struct { + containerLimitRange *apiv1.LimitRangeItem + containerErr error + podLimitRange *apiv1.LimitRangeItem + podErr error +} + +func (nlrc *fakeLimitRangeCalculator) GetContainerLimitRangeItem(namespace string) (*apiv1.LimitRangeItem, error) { + return nlrc.containerLimitRange, nlrc.containerErr +} + +func (nlrc *fakeLimitRangeCalculator) GetPodLimitRangeItem(namespace string) (*apiv1.LimitRangeItem, error) { + return nlrc.podLimitRange, nlrc.podErr +} + func TestUpdateResourceRequests(t *testing.T) { containerName := "container1" vpaName := "vpa1" @@ -62,7 +78,7 @@ func TestUpdateResourceRequests(t *testing.T) { WithLabels(labels).Get() initializedContainer := test.Container().WithName(containerName). - WithCPURequest(resource.MustParse("1")).WithMemRequest(resource.MustParse("100Mi")).Get() + WithCPURequest(resource.MustParse("1")).WithCPURequest(resource.MustParse("2")).WithMemRequest(resource.MustParse("100Mi")).Get() initialized := test.Pod().WithName("test_initialized"). AddContainer(initializedContainer).WithLabels(labels).Get() @@ -102,16 +118,19 @@ func TestUpdateResourceRequests(t *testing.T) { vpaWithNilRecommendation.Status.Recommendation = nil testCases := []struct { - name string - pod *apiv1.Pod - vpas []*vpa_types.VerticalPodAutoscaler - expectedAction bool - expectedMem resource.Quantity - expectedCPU resource.Quantity - expectedCPULimit *resource.Quantity - expectedMemLimit *resource.Quantity - annotations vpa_api_util.ContainerToAnnotationsMap - labelSelector string + name string + pod *apiv1.Pod + vpas []*vpa_types.VerticalPodAutoscaler + expectedAction bool + expectedError error + expectedMem resource.Quantity + expectedCPU resource.Quantity + expectedCPULimit *resource.Quantity + expectedMemLimit *resource.Quantity + limitRange *apiv1.LimitRangeItem + limitRangeCalcErr error + annotations vpa_api_util.ContainerToAnnotationsMap + labelSelector string }{ { name: "uninitialized pod", @@ -249,12 +268,39 @@ func TestUpdateResourceRequests(t *testing.T) { labelSelector: "app = testingApp", annotations: vpa_api_util.ContainerToAnnotationsMap{ containerName: []string{ - "Failed to keep CPU limit to request proportion of 10000 to 1000 with recommended request of -9223372036854775808 milliCPU; doesn't fit in int64. Capping limit to MaxInt64", - "Failed to keep memory limit to request proportion of 1048576000000 to 104857600000 with recommended request of -9223372036854775808 milliBytes; doesn't fit in int64. Capping limit to MaxInt64", + "cpu: failed to keep limit to request ratio; capping limit to int64", + "memory: failed to keep limit to request ratio; capping limit to int64", + }, + }, + }, + { + name: "limit range calculation error", + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + limitRangeCalcErr: fmt.Errorf("oh no"), + expectedAction: false, + expectedError: fmt.Errorf("error getting podLimitRange: oh no"), + }, + { + name: "proportional limit from default", + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedCPU: resource.MustParse("2"), + expectedMem: resource.MustParse("200Mi"), + expectedCPULimit: mustParseResourcePointer("2"), + expectedMemLimit: mustParseResourcePointer("200Mi"), + labelSelector: "app = testingApp", + limitRange: &apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Default: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("2"), + apiv1.ResourceMemory: resource.MustParse("100Mi"), }, }, }, } + for _, tc := range testCases { t.Run(fmt.Sprintf(tc.name), func(t *testing.T) { ctrl := gomock.NewController(t) @@ -272,8 +318,12 @@ func TestUpdateResourceRequests(t *testing.T) { recommendationProvider := &recommendationProvider{ vpaLister: vpaLister, - recommendationProcessor: api.NewCappingRecommendationProcessor(), + recommendationProcessor: vpa_api_util.NewCappingRecommendationProcessor(limitrange.NewNoopLimitsCalculator()), selectorFetcher: mockSelectorFetcher, + limitsRangeCalculator: &fakeLimitRangeCalculator{ + containerLimitRange: tc.limitRange, + containerErr: tc.limitRangeCalcErr, + }, } resources, annotations, name, err := recommendationProvider.GetContainersResourcesForPod(tc.pod) @@ -304,7 +354,7 @@ func TestUpdateResourceRequests(t *testing.T) { if tc.expectedMemLimit == nil { assert.False(t, memLimitPresent, "expected no memory limit, got %s", memLimit.String()) } else { - if assert.True(t, memLimitPresent, "expected cpu limit, but it's missing") { + if assert.True(t, memLimitPresent, "expected memory limit, but it's missing") { assert.Equal(t, tc.expectedMemLimit.MilliValue(), memLimit.MilliValue(), "memory limit doesn't match") } } @@ -320,6 +370,12 @@ func TestUpdateResourceRequests(t *testing.T) { } } else { assert.Empty(t, resources) + if tc.expectedError != nil { + assert.Error(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + assert.NoError(t, err) + } } }) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index 885dae569147..57c9542d068c 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "net/http" "strings" @@ -38,11 +39,12 @@ import ( type AdmissionServer struct { recommendationProvider RecommendationProvider podPreProcessor PodPreProcessor + limitsChecker limitrange.LimitRangeCalculator } // NewAdmissionServer constructs new AdmissionServer -func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor) *AdmissionServer { - return &AdmissionServer{recommendationProvider, podPreProcessor} +func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, limitsChecker limitrange.LimitRangeCalculator) *AdmissionServer { + return &AdmissionServer{recommendationProvider, podPreProcessor, limitsChecker} } type patchRecord struct { @@ -72,10 +74,11 @@ func (s *AdmissionServer) getPatchesForPodResourceRequest(raw []byte, namespace if annotationsPerContainer == nil { annotationsPerContainer = vpa_api_util.ContainerToAnnotationsMap{} } + patches := []patchRecord{} updatesAnnotation := []string{} for i, containerResources := range containersResources { - newPatches, newUpdatesAnnotation := s.getContainerPatch(pod, i, "requests", annotationsPerContainer, containerResources) + newPatches, newUpdatesAnnotation := s.getContainerPatch(pod, i, annotationsPerContainer, containerResources) patches = append(patches, newPatches...) updatesAnnotation = append(updatesAnnotation, newUpdatesAnnotation) } @@ -119,7 +122,7 @@ func getAddResourceRequirementValuePatch(i int, kind string, resource v1.Resourc Value: quantity.String()} } -func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, patchKind string, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources ContainerResources) ([]patchRecord, string) { +func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources vpa_api_util.ContainerResources) ([]patchRecord, string) { var patches []patchRecord // Add empty resources object if missing if pod.Spec.Containers[i].Resources.Limits == nil && diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go index 35ed1e7ff9ce..dc994bf8bf2f 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "github.com/stretchr/testify/assert" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "strings" "testing" @@ -45,13 +46,13 @@ func (fpp *fakePodPreProcessor) Process(pod apiv1.Pod) (apiv1.Pod, error) { } type fakeRecommendationProvider struct { - resources []ContainerResources + resources []vpa_api_util.ContainerResources containerToAnnotations vpa_api_util.ContainerToAnnotationsMap name string e error } -func (frp *fakeRecommendationProvider) GetContainersResourcesForPod(pod *apiv1.Pod) ([]ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { +func (frp *fakeRecommendationProvider) GetContainersResourcesForPod(pod *apiv1.Pod) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { return frp.resources, frp.containerToAnnotations, frp.name, frp.e } @@ -127,7 +128,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { podJson []byte namespace string preProcessorError error - recommendResources []ContainerResources + recommendResources []vpa_api_util.ContainerResources recommendAnnotations vpa_api_util.ContainerToAnnotationsMap recommendName string recommendError error @@ -139,7 +140,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { podJson: []byte("{"), namespace: "default", preProcessorError: nil, - recommendResources: []ContainerResources{}, + recommendResources: []vpa_api_util.ContainerResources{}, recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, recommendName: "name", expectError: fmt.Errorf("unexpected end of JSON input"), @@ -149,7 +150,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { podJson: []byte("{}"), namespace: "default", preProcessorError: fmt.Errorf("bad pod"), - recommendResources: []ContainerResources{}, + recommendResources: []vpa_api_util.ContainerResources{}, recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, recommendName: "name", expectError: fmt.Errorf("bad pod"), @@ -163,7 +164,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { } }`), namespace: "default", - recommendResources: []ContainerResources{ + recommendResources: []vpa_api_util.ContainerResources{ { Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), @@ -196,7 +197,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { } }`), namespace: "default", - recommendResources: []ContainerResources{ + recommendResources: []vpa_api_util.ContainerResources{ { Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), @@ -228,7 +229,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { } }`), namespace: "default", - recommendResources: []ContainerResources{ + recommendResources: []vpa_api_util.ContainerResources{ { Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), @@ -259,7 +260,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { } }`), namespace: "default", - recommendResources: []ContainerResources{ + recommendResources: []vpa_api_util.ContainerResources{ { Limits: apiv1.ResourceList{ cpu: resource.MustParse("1"), @@ -292,7 +293,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { } }`), namespace: "default", - recommendResources: []ContainerResources{ + recommendResources: []vpa_api_util.ContainerResources{ { Limits: apiv1.ResourceList{ cpu: resource.MustParse("1"), @@ -311,7 +312,8 @@ func TestGetPatchesForResourceRequest(t *testing.T) { t.Run(fmt.Sprintf("test case: %s", tc.name), func(t *testing.T) { fppp := fakePodPreProcessor{e: tc.preProcessorError} frp := fakeRecommendationProvider{tc.recommendResources, tc.recommendAnnotations, tc.recommendName, tc.recommendError} - s := NewAdmissionServer(&frp, &fppp) + lc := limitrange.NewNoopLimitsCalculator() + s := NewAdmissionServer(&frp, &fppp, lc) patches, err := s.getPatchesForPodResourceRequest(tc.podJson, tc.namespace) if tc.expectError == nil { assert.NoError(t, err) @@ -334,7 +336,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { fppp := fakePodPreProcessor{} - recommendResources := []ContainerResources{ + recommendResources := []vpa_api_util.ContainerResources{ { Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), @@ -358,7 +360,8 @@ func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { }`) recommendAnnotations := vpa_api_util.ContainerToAnnotationsMap{} frp := fakeRecommendationProvider{recommendResources, recommendAnnotations, "name", nil} - s := NewAdmissionServer(&frp, &fppp) + lc := limitrange.NewNoopLimitsCalculator() + s := NewAdmissionServer(&frp, &fppp, lc) patches, err := s.getPatchesForPodResourceRequest(podJson, "default") assert.NoError(t, err) // Order of updates for cpu and unobtanium depends on order of iterating a map, both possible results are valid. diff --git a/vertical-pod-autoscaler/pkg/admission-controller/main.go b/vertical-pod-autoscaler/pkg/admission-controller/main.go index 61553925f7c5..901ccdbaae8b 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/main.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/main.go @@ -19,6 +19,7 @@ package main import ( "flag" "fmt" + "net/http" "os" "time" @@ -28,6 +29,7 @@ import ( "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/logic" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics" metrics_admission "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/admission" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" @@ -79,7 +81,16 @@ func main() { target.NewVpaTargetSelectorFetcher(config, kubeClient, factory), target.NewBeta1TargetSelectorFetcher(config), ) - as := logic.NewAdmissionServer(logic.NewRecommendationProvider(vpaLister, vpa_api_util.NewCappingRecommendationProcessor(), targetSelectorFetcher), logic.NewDefaultPodPreProcessor()) + podPreprocessor := logic.NewDefaultPodPreProcessor() + var limitRangeCalculator limitrange.LimitRangeCalculator + limitRangeCalculator, err = limitrange.NewLimitsRangeCalculator(factory) + if err != nil { + klog.Errorf("Failed to create limitRangeCalculator, falling back to not checking limits. Error message: %s", err) + limitRangeCalculator = limitrange.NewNoopLimitsCalculator() + } + recommendationProvider := logic.NewRecommendationProvider(limitRangeCalculator, vpa_api_util.NewCappingRecommendationProcessor(limitRangeCalculator), targetSelectorFetcher, vpaLister) + + as := logic.NewAdmissionServer(recommendationProvider, podPreprocessor, limitRangeCalculator) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { as.Serve(w, r) healthCheck.UpdateLastActivity() diff --git a/vertical-pod-autoscaler/pkg/updater/main.go b/vertical-pod-autoscaler/pkg/updater/main.go index f82eb96c294b..5826e0d1b832 100644 --- a/vertical-pod-autoscaler/pkg/updater/main.go +++ b/vertical-pod-autoscaler/pkg/updater/main.go @@ -18,6 +18,7 @@ package main import ( "flag" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "time" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target" @@ -71,8 +72,14 @@ func main() { target.NewVpaTargetSelectorFetcher(config, kubeClient, factory), target.NewBeta1TargetSelectorFetcher(config), ) + var limitRangeCalculator limitrange.LimitRangeCalculator + limitRangeCalculator, err = limitrange.NewLimitsRangeCalculator(factory) + if err != nil { + klog.Errorf("Failed to create limitRangeCalculator, falling back to not checking limits. Error message: %s", err) + limitRangeCalculator = limitrange.NewNoopLimitsCalculator() + } // TODO: use SharedInformerFactory in updater - updater, err := updater.NewUpdater(kubeClient, vpaClient, *minReplicas, *evictionToleranceFraction, vpa_api_util.NewCappingRecommendationProcessor(), nil, targetSelectorFetcher) + updater, err := updater.NewUpdater(kubeClient, vpaClient, *minReplicas, *evictionToleranceFraction, vpa_api_util.NewCappingRecommendationProcessor(limitRangeCalculator), nil, targetSelectorFetcher) if err != nil { klog.Fatalf("Failed to create updater: %v", err) } diff --git a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go new file mode 100644 index 000000000000..77dc394979cc --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go @@ -0,0 +1,137 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package limitrange + +import ( + "fmt" + + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/informers" + listers "k8s.io/client-go/listers/core/v1" +) + +// LimitRangeCalculator calculates limit range items that has the same effect as all limit range items present in the cluster. +type LimitRangeCalculator interface { + // GetContainerLimitRangeItem returns LimitRangeItem that describes limitation on container limits in the given namespace. + GetContainerLimitRangeItem(namespace string) (*core.LimitRangeItem, error) + // GetPodLimitRangeItem returns LimitRangeItem that describes limitation on pod limits in the given namespace. + GetPodLimitRangeItem(namespace string) (*core.LimitRangeItem, error) +} + +type noopLimitsRangeCalculator struct{} + +func (lc *noopLimitsRangeCalculator) GetContainerLimitRangeItem(namespace string) (*core.LimitRangeItem, error) { + return nil, nil +} + +func (lc *noopLimitsRangeCalculator) GetPodLimitRangeItem(namespace string) (*core.LimitRangeItem, error) { + return nil, nil +} + +type limitsChecker struct { + limitRangeLister listers.LimitRangeLister +} + +// NewLimitsRangeCalculator returns a limitsChecker or an error it encountered when attempting to create it. +func NewLimitsRangeCalculator(f informers.SharedInformerFactory) (*limitsChecker, error) { + if f == nil { + return nil, fmt.Errorf("NewLimitsRangeCalculator requires a SharedInformerFactory but got nil") + } + limitRangeLister := f.Core().V1().LimitRanges().Lister() + stopCh := make(chan struct{}) + f.Start(stopCh) + for _, ok := range f.WaitForCacheSync(stopCh) { + if !ok { + if !f.Core().V1().LimitRanges().Informer().HasSynced() { + return nil, fmt.Errorf("informer did not sync") + } + } + } + return &limitsChecker{limitRangeLister}, nil +} + +// NewNoopLimitsCalculator returns a limit calculator that instantly returns no limits. +func NewNoopLimitsCalculator() *noopLimitsRangeCalculator { + return &noopLimitsRangeCalculator{} +} + +func (lc *limitsChecker) GetContainerLimitRangeItem(namespace string) (*core.LimitRangeItem, error) { + return lc.getLimitRangeItem(namespace, core.LimitTypeContainer) +} + +func (lc *limitsChecker) GetPodLimitRangeItem(namespace string) (*core.LimitRangeItem, error) { + return lc.getLimitRangeItem(namespace, core.LimitTypePod) +} + +func (lc *limitsChecker) getLimitRangeItem(namespace string, limitType core.LimitType) (*core.LimitRangeItem, error) { + limitRanges, err := lc.limitRangeLister.LimitRanges(namespace).List(labels.Everything()) + if err != nil { + return nil, fmt.Errorf("error loading limit ranges: %s", err) + } + + updatedResult := func(result core.ResourceList, lrItem core.ResourceList, + resourceName core.ResourceName, picker func(q1, q2 resource.Quantity) resource.Quantity) core.ResourceList { + if lrItem == nil { + return result + } + if result == nil { + return lrItem.DeepCopy() + } + if lrResource, lrHas := lrItem[resourceName]; lrHas { + resultResource, resultHas := result[resourceName] + if !resultHas { + result[resourceName] = lrResource.DeepCopy() + } else { + result[resourceName] = picker(resultResource, lrResource) + } + } + return result + } + pickLowerMax := func(q1, q2 resource.Quantity) resource.Quantity { + if q1.Cmp(q2) < 0 { + return q1 + } + return q2 + } + chooseHigherMin := func(q1, q2 resource.Quantity) resource.Quantity { + if q1.Cmp(q2) > 0 { + return q1 + } + return q2 + } + + result := &core.LimitRangeItem{Type: limitType} + for _, lr := range limitRanges { + for _, lri := range lr.Spec.Limits { + if lri.Type == limitType && (lri.Max != nil || lri.Default != nil || lri.Min != nil) { + if lri.Default != nil { + result.Default = lri.Default + } + result.Max = updatedResult(result.Max, lri.Max, core.ResourceCPU, pickLowerMax) + result.Max = updatedResult(result.Max, lri.Max, core.ResourceMemory, pickLowerMax) + result.Min = updatedResult(result.Min, lri.Min, core.ResourceCPU, chooseHigherMin) + result.Min = updatedResult(result.Min, lri.Min, core.ResourceMemory, chooseHigherMin) + } + } + } + if result.Min != nil || result.Max != nil || result.Default != nil { + return result, nil + } + return nil, nil +} diff --git a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go new file mode 100644 index 000000000000..9e06f5c99938 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go @@ -0,0 +1,152 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package limitrange + +import ( + "testing" + + apiv1 "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + + "github.com/stretchr/testify/assert" +) + +const testNamespace = "test-namespace" + +func TestNewNoopLimitsChecker(t *testing.T) { + nlc := NewNoopLimitsCalculator() + limitRange, err := nlc.GetContainerLimitRangeItem(testNamespace) + assert.NoError(t, err) + assert.Nil(t, limitRange) +} + +func TestNoLimitRange(t *testing.T) { + cs := fake.NewSimpleClientset() + factory := informers.NewSharedInformerFactory(cs, 0) + lc, err := NewLimitsRangeCalculator(factory) + + if assert.NoError(t, err) { + limitRange, err := lc.GetContainerLimitRangeItem(testNamespace) + assert.NoError(t, err) + assert.Nil(t, limitRange) + } +} + +func TestGetContainerLimitRangeItem(t *testing.T) { + baseContainerLimitRange := test.LimitRange().WithName("test-lr").WithNamespace(testNamespace).WithType(apiv1.LimitTypeContainer) + containerLimitRangeWithMax := baseContainerLimitRange.WithMax(test.Resources("2", "2")).Get() + containerLimitRangeWithDefault := baseContainerLimitRange.WithDefault(test.Resources("2", "2")).Get() + containerLimitRangeWithMin := baseContainerLimitRange.WithMin(test.Resources("2", "2")).Get() + testCases := []struct { + name string + limitRanges []runtime.Object + expectedErr error + expectedLimits *apiv1.LimitRangeItem + }{ + { + name: "no matching limit ranges", + limitRanges: []runtime.Object{ + test.LimitRange().WithName("different-namespace").WithNamespace("different").WithType(apiv1.LimitTypeContainer).WithMax(test.Resources("2", "2")).Get(), + test.LimitRange().WithName("different-type").WithNamespace(testNamespace).WithType(apiv1.LimitTypePersistentVolumeClaim).WithMax(test.Resources("2", "2")).Get(), + }, + expectedErr: nil, + expectedLimits: nil, + }, + { + name: "matching container limit range", + limitRanges: []runtime.Object{ + containerLimitRangeWithMax, + }, + expectedErr: nil, + expectedLimits: &containerLimitRangeWithMax.Spec.Limits[0], + }, + { + name: "with default value", + limitRanges: []runtime.Object{ + containerLimitRangeWithDefault, + }, + expectedErr: nil, + expectedLimits: &containerLimitRangeWithDefault.Spec.Limits[0], + }, + { + name: "respects min", + limitRanges: []runtime.Object{ + containerLimitRangeWithMin, + }, + expectedErr: nil, + expectedLimits: &containerLimitRangeWithMin.Spec.Limits[0], + }, + { + name: "multiple items", + limitRanges: []runtime.Object{ + baseContainerLimitRange.WithMax(test.Resources("2", "2")).WithDefault(test.Resources("1.5", "1.5")). + WithMin(test.Resources("1", "1")).Get(), + }, + expectedErr: nil, + expectedLimits: &core.LimitRangeItem{ + Type: core.LimitTypeContainer, + Min: test.Resources("1", "1"), + Max: test.Resources("2", "2"), + Default: test.Resources("1.5", "1.5"), + }, + }, + { + name: "takes lowest max", + limitRanges: []runtime.Object{ + baseContainerLimitRange.WithMax(test.Resources("1.5", "1.5")).WithMax(test.Resources("2.", "2.")).Get(), + }, + expectedErr: nil, + expectedLimits: &core.LimitRangeItem{ + Type: core.LimitTypeContainer, + Max: test.Resources("1.5", "1.5"), + }, + }, + { + name: "takes highest min", + limitRanges: []runtime.Object{ + baseContainerLimitRange.WithMin(test.Resources("1.5", "1.5")).WithMin(test.Resources("1.", "1.")).Get(), + }, + expectedErr: nil, + expectedLimits: &core.LimitRangeItem{ + Type: core.LimitTypeContainer, + Min: test.Resources("1.5", "1.5"), + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cs := fake.NewSimpleClientset(tc.limitRanges...) + factory := informers.NewSharedInformerFactory(cs, 0) + lc, err := NewLimitsRangeCalculator(factory) + if assert.NoError(t, err) { + limitRange, err := lc.GetContainerLimitRangeItem(testNamespace) + if tc.expectedErr == nil { + assert.NoError(t, err) + } else { + assert.Error(t, err) + } + assert.Equal(t, tc.expectedLimits, limitRange) + } + }) + + } +} diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go b/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go new file mode 100644 index 000000000000..52cbc0e271dc --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go @@ -0,0 +1,105 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LimitRange returns an object that helps build a LimitRangeItem object for tests. +func LimitRange() *limitRangeBuilder { + return &limitRangeBuilder{} +} + +type limitRangeBuilder struct { + namespace string + name string + rangeType core.LimitType + defaultValues []*core.ResourceList + maxValues []*core.ResourceList + minValues []*core.ResourceList +} + +func (lrb *limitRangeBuilder) WithName(name string) *limitRangeBuilder { + result := *lrb + result.name = name + return &result +} + +func (lrb *limitRangeBuilder) WithNamespace(namespace string) *limitRangeBuilder { + result := *lrb + result.namespace = namespace + return &result +} + +func (lrb *limitRangeBuilder) WithType(rangeType core.LimitType) *limitRangeBuilder { + result := *lrb + result.rangeType = rangeType + return &result +} + +func (lrb *limitRangeBuilder) WithDefault(defaultValues core.ResourceList) *limitRangeBuilder { + result := *lrb + result.defaultValues = append(result.defaultValues, &defaultValues) + return &result +} + +func (lrb *limitRangeBuilder) WithMax(max core.ResourceList) *limitRangeBuilder { + result := *lrb + result.maxValues = append(result.maxValues, &max) + return &result +} + +func (lrb *limitRangeBuilder) WithMin(min core.ResourceList) *limitRangeBuilder { + result := *lrb + result.minValues = append(result.minValues, &min) + return &result +} + +func (lrb *limitRangeBuilder) Get() *core.LimitRange { + result := core.LimitRange{ + ObjectMeta: meta.ObjectMeta{ + Namespace: lrb.namespace, + Name: lrb.name, + }, + } + if len(lrb.defaultValues) > 0 || len(lrb.maxValues) > 0 || len(lrb.minValues) > 0 { + result.Spec = core.LimitRangeSpec{ + Limits: []core.LimitRangeItem{}, + } + } + for _, v := range lrb.defaultValues { + result.Spec.Limits = append(result.Spec.Limits, core.LimitRangeItem{ + Type: lrb.rangeType, + Default: *v, + }) + } + for _, v := range lrb.maxValues { + result.Spec.Limits = append(result.Spec.Limits, core.LimitRangeItem{ + Type: lrb.rangeType, + Max: *v, + }) + } + for _, v := range lrb.minValues { + result.Spec.Limits = append(result.Spec.Limits, core.LimitRangeItem{ + Type: lrb.rangeType, + Min: *v, + }) + } + return &result +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index 0703e90123d3..2744156a2edd 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -22,28 +22,32 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" - + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "k8s.io/klog" ) // NewCappingRecommendationProcessor constructs new RecommendationsProcessor that adjusts recommendation // for given pod to obey VPA resources policy and container limits -func NewCappingRecommendationProcessor() RecommendationProcessor { - return &cappingRecommendationProcessor{} +func NewCappingRecommendationProcessor(limitsRangeCalculator limitrange.LimitRangeCalculator) RecommendationProcessor { + return &cappingRecommendationProcessor{limitsRangeCalculator: limitsRangeCalculator} } type cappingAction string var ( - cappedToMinAllowed cappingAction = "capped to minAllowed" - cappedToMaxAllowed cappingAction = "capped to maxAllowed" + cappedToMinAllowed cappingAction = "capped to minAllowed" + cappedToMaxAllowed cappingAction = "capped to maxAllowed" + cappedProportionallyToMaxLimit cappingAction = "capped to fit Max in container LimitRange" + cappedProportionallyToMinLimit cappingAction = "capped to fit Min in container LimitRange" ) func toCappingAnnotation(resourceName apiv1.ResourceName, action cappingAction) string { return fmt.Sprintf("%s %s", resourceName, action) } -type cappingRecommendationProcessor struct{} +type cappingRecommendationProcessor struct { + limitsRangeCalculator limitrange.LimitRangeCalculator +} // Apply returns a recommendation for the given pod, adjusted to obey policy and limits. func (c *cappingRecommendationProcessor) Apply( @@ -51,6 +55,7 @@ func (c *cappingRecommendationProcessor) Apply( policy *vpa_types.PodResourcePolicy, conditions []vpa_types.VerticalPodAutoscalerCondition, pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) { + // TODO: Annotate if request enforced by maintaining proportion with limit and allowed limit range is in conflict with policy. if podRecommendation == nil && policy == nil { // If there is no recommendation and no policies have been defined then no recommendation can be computed. @@ -62,15 +67,24 @@ func (c *cappingRecommendationProcessor) Apply( } updatedRecommendations := []vpa_types.RecommendedContainerResources{} containerToAnnotationsMap := ContainerToAnnotationsMap{} - for _, containerRecommendation := range podRecommendation.ContainerRecommendations { + limitAdjustedRecommendation, err := c.capProportionallyToPodLimitRange(podRecommendation.ContainerRecommendations, pod) + if err != nil { + return nil, nil, err + } + for _, containerRecommendation := range limitAdjustedRecommendation { container := getContainer(containerRecommendation.ContainerName, pod) if container == nil { klog.V(2).Infof("no matching Container found for recommendation %s", containerRecommendation.ContainerName) continue } + + containerLimitRange, err := c.limitsRangeCalculator.GetContainerLimitRangeItem(pod.Namespace) + if err != nil { + klog.Warningf("failed to fetch LimitRange for %v namespace", pod.Namespace) + } updatedContainerResources, containerAnnotations, err := getCappedRecommendationForContainer( - *container, &containerRecommendation, policy) + *container, &containerRecommendation, policy, containerLimitRange) if len(containerAnnotations) != 0 { containerToAnnotationsMap[containerRecommendation.ContainerName] = containerAnnotations @@ -88,7 +102,7 @@ func (c *cappingRecommendationProcessor) Apply( func getCappedRecommendationForContainer( container apiv1.Container, containerRecommendation *vpa_types.RecommendedContainerResources, - policy *vpa_types.PodResourcePolicy) (*vpa_types.RecommendedContainerResources, []string, error) { + policy *vpa_types.PodResourcePolicy, limitRange *apiv1.LimitRangeItem) (*vpa_types.RecommendedContainerResources, []string, error) { if containerRecommendation == nil { return nil, nil, fmt.Errorf("no recommendation available for container name %v", container.Name) } @@ -100,8 +114,11 @@ func getCappedRecommendationForContainer( cappingAnnotations := make([]string, 0) process := func(recommendation apiv1.ResourceList, genAnnotations bool) { + // TODO: Add anotation if limitRange is conflicting with VPA policy. + limitAnnotations := applyContainerLimitRange(recommendation, container, limitRange) annotations := applyVPAPolicy(recommendation, containerPolicy) if genAnnotations { + cappingAnnotations = append(cappingAnnotations, limitAnnotations...) cappingAnnotations = append(cappingAnnotations, annotations...) } } @@ -120,12 +137,12 @@ func applyVPAPolicy(recommendation apiv1.ResourceList, policy *vpa_types.Contain } annotations := make([]string, 0) for resourceName, recommended := range recommendation { - cappedToMin, isCapped := maybeCapToMin(recommended, resourceName, policy) + cappedToMin, isCapped := maybeCapToPolicyMin(recommended, resourceName, policy) recommendation[resourceName] = cappedToMin if isCapped { annotations = append(annotations, toCappingAnnotation(resourceName, cappedToMinAllowed)) } - cappedToMax, isCapped := maybeCapToMax(cappedToMin, resourceName, policy) + cappedToMax, isCapped := maybeCapToPolicyMax(cappedToMin, resourceName, policy) recommendation[resourceName] = cappedToMax if isCapped { annotations = append(annotations, toCappingAnnotation(resourceName, cappedToMaxAllowed)) @@ -149,9 +166,9 @@ func applyVPAPolicyForContainer(containerName string, process := func(recommendation apiv1.ResourceList) { for resourceName, recommended := range recommendation { - cappedToMin, _ := maybeCapToMin(recommended, resourceName, containerPolicy) + cappedToMin, _ := maybeCapToPolicyMin(recommended, resourceName, containerPolicy) recommendation[resourceName] = cappedToMin - cappedToMax, _ := maybeCapToMax(cappedToMin, resourceName, containerPolicy) + cappedToMax, _ := maybeCapToPolicyMax(cappedToMin, resourceName, containerPolicy) recommendation[resourceName] = cappedToMax } } @@ -163,20 +180,30 @@ func applyVPAPolicyForContainer(containerName string, return cappedRecommendations, nil } -func maybeCapToMin(recommended resource.Quantity, resourceName apiv1.ResourceName, +func maybeCapToPolicyMin(recommended resource.Quantity, resourceName apiv1.ResourceName, containerPolicy *vpa_types.ContainerResourcePolicy) (resource.Quantity, bool) { - min, found := containerPolicy.MinAllowed[resourceName] - if found && !min.IsZero() && recommended.Cmp(min) < 0 { - return min, true + return maybeCapToMin(recommended, resourceName, containerPolicy.MinAllowed) +} + +func maybeCapToPolicyMax(recommended resource.Quantity, resourceName apiv1.ResourceName, + containerPolicy *vpa_types.ContainerResourcePolicy) (resource.Quantity, bool) { + return maybeCapToMax(recommended, resourceName, containerPolicy.MaxAllowed) +} + +func maybeCapToMax(recommended resource.Quantity, resourceName apiv1.ResourceName, + max apiv1.ResourceList) (resource.Quantity, bool) { + maxResource, found := max[resourceName] + if found && !maxResource.IsZero() && recommended.Cmp(maxResource) > 0 { + return maxResource, true } return recommended, false } -func maybeCapToMax(recommended resource.Quantity, resourceName apiv1.ResourceName, - containerPolicy *vpa_types.ContainerResourcePolicy) (resource.Quantity, bool) { - max, found := containerPolicy.MaxAllowed[resourceName] - if found && !max.IsZero() && recommended.Cmp(max) > 0 { - return max, true +func maybeCapToMin(recommended resource.Quantity, resourceName apiv1.ResourceName, + min apiv1.ResourceList) (resource.Quantity, bool) { + minResource, found := min[resourceName] + if found && !minResource.IsZero() && recommended.Cmp(minResource) < 0 { + return minResource, true } return recommended, false } @@ -225,3 +252,146 @@ func getContainer(containerName string, pod *apiv1.Pod) *apiv1.Container { } return nil } + +// applyContainerLimitRange updates recommendation if recommended resources are outside of limits defined in VPA resources policy +func applyContainerLimitRange(recommendation apiv1.ResourceList, container apiv1.Container, limitRange *apiv1.LimitRangeItem) []string { + annotations := make([]string, 0) + if limitRange == nil { + return annotations + } + maxAllowedRecommendation := getMaxAllowedRecommendation(recommendation, container, limitRange) + minAllowedRecommendation := getMinAllowedRecommendation(recommendation, container, limitRange) + for resourceName, recommended := range recommendation { + cappedToMin, isCapped := maybeCapToMin(recommended, resourceName, minAllowedRecommendation) + recommendation[resourceName] = cappedToMin + if isCapped { + annotations = append(annotations, toCappingAnnotation(resourceName, cappedProportionallyToMinLimit)) + } + cappedToMax, isCapped := maybeCapToMax(cappedToMin, resourceName, maxAllowedRecommendation) + recommendation[resourceName] = cappedToMax + if isCapped { + annotations = append(annotations, toCappingAnnotation(resourceName, cappedProportionallyToMaxLimit)) + } + } + return annotations +} + +func getMaxAllowedRecommendation(recommendation apiv1.ResourceList, container apiv1.Container, + podLimitRange *apiv1.LimitRangeItem) apiv1.ResourceList { + if podLimitRange == nil { + return apiv1.ResourceList{} + } + return getBoundaryRecommendation(recommendation, container, podLimitRange.Max, podLimitRange.Default) +} + +func getMinAllowedRecommendation(recommendation apiv1.ResourceList, container apiv1.Container, + podLimitRange *apiv1.LimitRangeItem) apiv1.ResourceList { + // Both limit and request must be higher than min set in the limit range: + // https://github.com/kubernetes/kubernetes/blob/016e9d5c06089774c6286fd825302cbae661a446/plugin/pkg/admission/limitranger/admission.go#L303 + if podLimitRange == nil { + return apiv1.ResourceList{} + } + minForLimit := getBoundaryRecommendation(recommendation, container, podLimitRange.Min, podLimitRange.Default) + minForRequest := podLimitRange.Min + if minForRequest == nil { + return minForLimit + } + result := minForLimit + if minForRequest.Cpu() != nil && minForRequest.Cpu().Cmp(*minForLimit.Cpu()) > 0 { + result[apiv1.ResourceCPU] = *minForRequest.Cpu() + } + if minForRequest.Memory() != nil && minForRequest.Memory().Cmp(*minForLimit.Memory()) > 0 { + result[apiv1.ResourceMemory] = *minForRequest.Memory() + } + return result +} + +func getBoundaryRecommendation(recommendation apiv1.ResourceList, container apiv1.Container, + boundaryLimit, defaultLimit apiv1.ResourceList) apiv1.ResourceList { + if boundaryLimit == nil { + return apiv1.ResourceList{} + } + cpuMaxRequest := GetBoundaryRequest(container.Resources.Requests.Cpu(), container.Resources.Limits.Cpu(), boundaryLimit.Cpu(), defaultLimit.Cpu()) + memMaxRequest := GetBoundaryRequest(container.Resources.Requests.Memory(), container.Resources.Limits.Memory(), boundaryLimit.Memory(), defaultLimit.Memory()) + return apiv1.ResourceList{ + apiv1.ResourceCPU: *cpuMaxRequest, + apiv1.ResourceMemory: *memMaxRequest, + } +} + +func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, + pod *apiv1.Pod, limitRange apiv1.LimitRangeItem, resourceName apiv1.ResourceName, + fieldGetter func(vpa_types.RecommendedContainerResources) *apiv1.ResourceList) []vpa_types.RecommendedContainerResources { + minLimit := limitRange.Min[resourceName] + maxLimit := limitRange.Max[resourceName] + defaultLimit := limitRange.Default[resourceName] + + var sumLimit, sumRecommendation resource.Quantity + for i, container := range pod.Spec.Containers { + if i >= len(resources) { + continue + } + limit := container.Resources.Limits[resourceName] + request := container.Resources.Requests[resourceName] + recommendation := (*fieldGetter(resources[i]))[resourceName] + containerLimit, _ := getProportionalResourceLimit(resourceName, &limit, &request, &recommendation, &defaultLimit) + if containerLimit != nil { + sumLimit.Add(*containerLimit) + } + sumRecommendation.Add(recommendation) + } + if minLimit.Cmp(sumLimit) <= 0 && minLimit.Cmp(sumRecommendation) <= 0 && (maxLimit.IsZero() || maxLimit.Cmp(sumLimit) >= 0) { + return resources + } + + if minLimit.Cmp(sumRecommendation) > 0 && !sumLimit.IsZero() { + for i := range pod.Spec.Containers { + request := (*fieldGetter(resources[i]))[resourceName] + cappedContainerRequest, _ := scaleQuantityProportionally(&request, &sumRecommendation, &minLimit) + (*fieldGetter(resources[i]))[resourceName] = *cappedContainerRequest + } + return resources + } + + if sumLimit.IsZero() { + return resources + } + + var targetTotalLimit resource.Quantity + if minLimit.Cmp(sumLimit) > 0 { + targetTotalLimit = minLimit + } + if !maxLimit.IsZero() && maxLimit.Cmp(sumLimit) < 0 { + targetTotalLimit = maxLimit + } + for i := range pod.Spec.Containers { + limit := (*fieldGetter(resources[i]))[resourceName] + cappedContainerRequest, _ := scaleQuantityProportionally(&limit, &sumLimit, &targetTotalLimit) + (*fieldGetter(resources[i]))[resourceName] = *cappedContainerRequest + } + return resources +} + +func (c *cappingRecommendationProcessor) capProportionallyToPodLimitRange( + containerRecommendations []vpa_types.RecommendedContainerResources, pod *apiv1.Pod) ([]vpa_types.RecommendedContainerResources, error) { + podLimitRange, err := c.limitsRangeCalculator.GetPodLimitRangeItem(pod.Namespace) + if err != nil { + return nil, fmt.Errorf("error obtaining limit range: %s", err) + } + if podLimitRange == nil { + return containerRecommendations, nil + } + getTarget := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.Target } + getUpper := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.UpperBound } + getLower := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.LowerBound } + + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getUpper) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getUpper) + + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getTarget) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getTarget) + + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getLower) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getLower) + return containerRecommendations, nil +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index ad80dff1f5dc..3ae3258f876a 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -41,7 +41,7 @@ func TestRecommendationNotAvailable(t *testing.T) { } policy := vpa_types.PodResourcePolicy{} - res, annotations, err := NewCappingRecommendationProcessor().Apply(&podRecommendation, &policy, nil, pod) + res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &policy, nil, pod) assert.Nil(t, err) assert.Empty(t, annotations) assert.Empty(t, res.ContainerRecommendations) @@ -84,7 +84,7 @@ func TestRecommendationCappedToMinMaxPolicy(t *testing.T) { }, } - res, annotations, err := NewCappingRecommendationProcessor().Apply(&podRecommendation, &policy, nil, pod) + res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &policy, nil, pod) assert.Nil(t, err) assert.Equal(t, apiv1.ResourceList{ apiv1.ResourceCPU: *resource.NewScaledQuantity(40, 1), @@ -146,7 +146,7 @@ func TestApply(t *testing.T) { pod := test.Pod().WithName("pod1").AddContainer(test.BuildTestContainer("ctr-name", "", "")).Get() for _, testCase := range applyTestCases { - res, _, err := NewCappingRecommendationProcessor().Apply( + res, _, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply( testCase.PodRecommendation, testCase.Policy, nil, pod) assert.Equal(t, testCase.ExpectedPodRecommendation, res) assert.Equal(t, testCase.ExpectedError, err) @@ -215,3 +215,410 @@ func TestApplyVpa(t *testing.T) { apiv1.ResourceMemory: *resource.NewScaledQuantity(4500, 1), }, res.ContainerRecommendations[0].UpperBound) } + +type fakeLimitRangeCalculator struct { + containerLimitRange apiv1.LimitRangeItem + podLimitRange apiv1.LimitRangeItem +} + +func (nlrc *fakeLimitRangeCalculator) GetContainerLimitRangeItem(namespace string) (*apiv1.LimitRangeItem, error) { + return &nlrc.containerLimitRange, nil +} + +func (nlrc *fakeLimitRangeCalculator) GetPodLimitRangeItem(namespace string) (*apiv1.LimitRangeItem, error) { + return &nlrc.podLimitRange, nil +} + +func TestApplyCapsToLimitRange(t *testing.T) { + limitRange := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Min: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("500M"), + }, + } + recommendation := vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("2"), + apiv1.ResourceMemory: resource.MustParse("200M"), + }, + }, + }, + } + pod := apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Name: "container", + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + }, + }, + }, + } + expectedRecommendation := vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1000m"), + apiv1.ResourceMemory: resource.MustParse("500000000000m"), + }, + }, + }, + } + + calculator := fakeLimitRangeCalculator{containerLimitRange: limitRange} + processor := NewCappingRecommendationProcessor(&calculator) + processedRecommendation, annotations, err := processor.Apply(&recommendation, nil, nil, &pod) + assert.NoError(t, err) + assert.Contains(t, annotations, "container") + assert.ElementsMatch(t, []string{"cpu capped to fit Max in container LimitRange", "memory capped to fit Min in container LimitRange"}, annotations["container"]) + assert.Equal(t, expectedRecommendation, *processedRecommendation) +} + +func TestApplyPodLimitRange(t *testing.T) { + tests := []struct { + name string + resources []vpa_types.RecommendedContainerResources + pod apiv1.Pod + limitRange apiv1.LimitRangeItem + resourceName apiv1.ResourceName + expect []vpa_types.RecommendedContainerResources + }{ + { + name: "cap target cpu to max", + resources: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + pod: apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + }, + limitRange: apiv1.LimitRangeItem{ + Max: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + resourceName: apiv1.ResourceCPU, + expect: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + }, + }, + { + name: "cap cpu to max", + resources: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + pod: apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + }, + limitRange: apiv1.LimitRangeItem{ + Max: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + resourceName: apiv1.ResourceCPU, + expect: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + }, + }, + { + name: "cap mem to min", + resources: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + }, + pod: apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + }, + }, + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + }, + }, + }, + }, + }, + limitRange: apiv1.LimitRangeItem{ + Min: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("4G"), + }, + }, + resourceName: apiv1.ResourceMemory, + expect: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2000000000000m"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2000000000000m"), + }, + }, + }, + }, + { + name: "cap mem request to pod min", + resources: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + }, + pod: apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2"), + }, + }, + }, + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2"), + }, + }, + }, + }, + }, + }, + limitRange: apiv1.LimitRangeItem{ + Type: apiv1.LimitTypePod, + Max: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("10G"), + }, + Min: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("4G"), + }, + }, + resourceName: apiv1.ResourceMemory, + expect: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2000000000000m"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2000000000000m"), + }, + }, + }, + }, + } + getTarget := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.Target } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := applyPodLimitRange(tc.resources, &tc.pod, tc.limitRange, tc.resourceName, getTarget) + assert.Equal(t, tc.expect, got) + }) + } +} + +func TestApplyLimitRangeMinToRequest(t *testing.T) { + limitRange := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Min: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("500M"), + }, + } + recommendation := vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("200M"), + }, + }, + }, + } + pod := apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Name: "container", + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("50M"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("100M"), + }, + }, + }, + }, + }, + } + expectedRecommendation := vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("500M"), + }, + }, + }, + } + + calculator := fakeLimitRangeCalculator{containerLimitRange: limitRange} + processor := NewCappingRecommendationProcessor(&calculator) + processedRecommendation, annotations, err := processor.Apply(&recommendation, nil, nil, &pod) + assert.NoError(t, err) + assert.Contains(t, annotations, "container") + assert.ElementsMatch(t, []string{"memory capped to fit Min in container LimitRange"}, annotations["container"]) + assert.Equal(t, expectedRecommendation, *processedRecommendation) +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go new file mode 100644 index 000000000000..bbd9cf2c9851 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go @@ -0,0 +1,123 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "fmt" + "math" + "math/big" + + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +// ContainerResources holds resources request for container +type ContainerResources struct { + Limits core.ResourceList + Requests core.ResourceList +} + +func newContainerResources() ContainerResources { + return ContainerResources{ + Requests: core.ResourceList{}, + Limits: core.ResourceList{}, + } +} + +// GetProportionalLimit returns limit that will be in the same proportion to recommended request as original limit had to original request. +func GetProportionalLimit(originalLimit, originalRequest, recommendation, defaultLimit core.ResourceList) (core.ResourceList, []string) { + annotations := []string{} + cpuLimit, annotation := getProportionalResourceLimit(core.ResourceCPU, originalLimit.Cpu(), originalRequest.Cpu(), recommendation.Cpu(), defaultLimit.Cpu()) + if annotation != "" { + annotations = append(annotations, annotation) + } + memLimit, annotation := getProportionalResourceLimit(core.ResourceMemory, originalLimit.Memory(), originalRequest.Memory(), recommendation.Memory(), defaultLimit.Memory()) + if annotation != "" { + annotations = append(annotations, annotation) + } + if memLimit == nil && cpuLimit == nil { + return nil, []string{} + } + result := core.ResourceList{} + if cpuLimit != nil { + result[core.ResourceCPU] = *cpuLimit + } + if memLimit != nil { + result[core.ResourceMemory] = *memLimit + } + return result, annotations +} + +func getProportionalResourceLimit(resourceName core.ResourceName, originalLimit, originalRequest, recommendedRequest, defaultLimit *resource.Quantity) (*resource.Quantity, string) { + if originalLimit == nil || originalLimit.Value() == 0 && defaultLimit != nil { + originalLimit = defaultLimit + } + // originalLimit not set, don't set limit. + if originalLimit == nil || originalLimit.Value() == 0 { + return nil, "" + } + // originalLimit set but originalRequest not set - K8s will treat the pod as if they were equal, + // recommend limit equal to request + if originalRequest == nil || originalRequest.Value() == 0 { + result := *recommendedRequest + return &result, "" + } + // originalLimit and originalRequest are set. If they are equal recommend limit equal to request. + if originalRequest.MilliValue() == originalLimit.MilliValue() { + result := *recommendedRequest + return &result, "" + } + result, capped := scaleQuantityProportionally( /*scaledQuantity=*/ originalLimit /*scaleBase=*/, originalRequest /*scaleResult=*/, recommendedRequest) + if !capped { + return result, "" + } + return result, fmt.Sprintf( + "%v: failed to keep limit to request ratio; capping limit to int64", resourceName) +} + +// GetBoundaryRequest returns the boundary (min/max) request that can be specified with +// preserving the original limit to request ratio. Returns nil if no boundary exists +func GetBoundaryRequest(originalRequest, originalLimit, boundaryLimit, defaultLimit *resource.Quantity) *resource.Quantity { + if originalLimit == nil || originalLimit.Value() == 0 && defaultLimit != nil { + originalLimit = defaultLimit + } + // originalLimit not set, no boundary + if originalLimit == nil || originalLimit.Value() == 0 { + return nil + } + // originalLimit set but originalRequest not set - K8s will treat the pod as if they were equal + if originalRequest == nil || originalRequest.Value() == 0 { + return boundaryLimit + } + result, _ := scaleQuantityProportionally(originalRequest /* scaledQuantity */, originalLimit /*scaleBase*/, boundaryLimit /*scaleResult*/) + return result +} + +// scaleQuantityProportionally returns value which has the same proportion to scaledQuantity as scaleResult has to scaleBase +// It also returns a bool indicating if it had to cap result to MaxInt64 milliunits. +func scaleQuantityProportionally(scaledQuantity, scaleBase, scaleResult *resource.Quantity) (*resource.Quantity, bool) { + originalMilli := big.NewInt(scaledQuantity.MilliValue()) + scaleBaseMilli := big.NewInt(scaleBase.MilliValue()) + scaleResultMilli := big.NewInt(scaleResult.MilliValue()) + var scaledOriginal big.Int + scaledOriginal.Mul(originalMilli, scaleResultMilli) + scaledOriginal.Div(&scaledOriginal, scaleBaseMilli) + if scaledOriginal.IsInt64() { + return resource.NewMilliQuantity(scaledOriginal.Int64(), scaledQuantity.Format), false + } + return resource.NewMilliQuantity(math.MaxInt64, scaledQuantity.Format), true +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling_test.go new file mode 100644 index 000000000000..c334279c6a4a --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "math" + "testing" + + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/stretchr/testify/assert" +) + +func mustParseToPointer(str string) *resource.Quantity { + val := resource.MustParse(str) + return &val +} + +func TestGetProportionalResourceLimit(t *testing.T) { + tests := []struct { + name string + originalLimit *resource.Quantity + originalRequest *resource.Quantity + recommendedRequest *resource.Quantity + defaultLimit *resource.Quantity + expectLimit *resource.Quantity + expectAnnotation bool + }{ + { + name: "scale proportionally", + originalLimit: mustParseToPointer("2"), + originalRequest: mustParseToPointer("1"), + recommendedRequest: mustParseToPointer("10"), + expectLimit: mustParseToPointer("20"), + }, + { + name: "scale proportionally with default", + originalRequest: mustParseToPointer("1"), + recommendedRequest: mustParseToPointer("10"), + defaultLimit: mustParseToPointer("2"), + expectLimit: mustParseToPointer("20"), + }, + { + name: "no original limit", + originalRequest: mustParseToPointer("1"), + recommendedRequest: mustParseToPointer("10"), + expectLimit: nil, + }, + { + name: "no original request", + originalLimit: mustParseToPointer("2"), + recommendedRequest: mustParseToPointer("10"), + expectLimit: mustParseToPointer("10"), + }, + { + name: "limit equal to request", + originalLimit: mustParseToPointer("1"), + originalRequest: mustParseToPointer("1"), + recommendedRequest: mustParseToPointer("10"), + expectLimit: mustParseToPointer("10"), + }, + { + name: "go over milli cap", + originalLimit: mustParseToPointer("10G"), + originalRequest: mustParseToPointer("1m"), + recommendedRequest: mustParseToPointer("10G"), + expectLimit: resource.NewMilliQuantity(math.MaxInt64, resource.DecimalExponent), + expectAnnotation: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + gotLimit, gotAnnotation := getProportionalResourceLimit(core.ResourceCPU, tc.originalLimit, tc.originalRequest, tc.recommendedRequest, tc.defaultLimit) + if tc.expectLimit == nil { + assert.Nil(t, gotLimit) + } else { + if assert.NotNil(t, gotLimit) { + assert.Equal(t, gotLimit.MilliValue(), tc.expectLimit.MilliValue()) + } + } + assert.Equal(t, gotAnnotation != "", tc.expectAnnotation) + }) + } +}