From a39b0df33ba717ec4cc9a35caf9ec4c48f33af3b Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Wed, 29 May 2019 10:49:31 +0200 Subject: [PATCH 1/3] e2e for VPA keeping limit to request ratio constant --- .../e2e/v1beta1/admission_controller.go | 37 ++++++++++++++++++- vertical-pod-autoscaler/e2e/v1beta1/common.go | 11 ++++++ .../e2e/v1beta2/admission_controller.go | 31 ++++++++++++++++ vertical-pod-autoscaler/e2e/v1beta2/common.go | 11 ++++++ 4 files changed, 89 insertions(+), 1 deletion(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index a250f3fdd5bb..28df6187af9a 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -63,7 +63,9 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ ContainerName: "hamster", @@ -88,6 +90,39 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) + ginkgo.It("keeps limits to request ratio constant", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 250m CPU and 200Mi of memory. Limits to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + ginkgo.It("caps request to max set in VPA", func() { d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index 98db1b4d0e08..40cbdafd41de 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -136,6 +136,17 @@ func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuan return d } +// NewHamsterDeploymentWithResourcesAndLimits creates a simple hamster deployment with specific +// resource requests and limits for e2e test purposes. +func NewHamsterDeploymentWithResourcesAndLimits(f *framework.Framework, cpuQuantityRequest, memoryQuantityRequest, cpuQuantityLimit, memoryQuantityLimit resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeploymentWithResources(f, cpuQuantityRequest, memoryQuantityRequest) + d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantityLimit, + apiv1.ResourceMemory: memoryQuantityLimit, + } + return d +} + // GetHamsterPods returns running hamster pods (matched by hamsterLabels) func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index cb7f4d92074f..a51562add069 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -85,6 +85,37 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) + ginkgo.It("keeps limits to request ratio constant", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 250m CPU and 200Mi of memory. Limits to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + ginkgo.It("caps request to max set in VPA", func() { d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index 93d33906e311..0c58690b4835 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -143,6 +143,17 @@ func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuan return d } +// NewHamsterDeploymentWithResourcesAndLimits creates a simple hamster deployment with specific +// resource requests and limits for e2e test purposes. +func NewHamsterDeploymentWithResourcesAndLimits(f *framework.Framework, cpuQuantityRequest, memoryQuantityRequest, cpuQuantityLimit, memoryQuantityLimit resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeploymentWithResources(f, cpuQuantityRequest, memoryQuantityRequest) + d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantityLimit, + apiv1.ResourceMemory: memoryQuantityLimit, + } + return d +} + // GetHamsterPods returns running hamster pods (matched by hamsterLabels) func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) From da76008702cfd1ae3aeebae41b6308a73fc7590b Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Wed, 29 May 2019 12:06:35 +0200 Subject: [PATCH 2/3] e2e test for VPA respecting LimitRange min and max --- .../e2e/v1beta1/admission_controller.go | 88 +++++++++++++++++++ vertical-pod-autoscaler/e2e/v1beta1/common.go | 54 ++++++++++++ .../e2e/v1beta2/admission_controller.go | 84 ++++++++++++++++++ vertical-pod-autoscaler/e2e/v1beta2/common.go | 54 ++++++++++++ 4 files changed, 280 insertions(+) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index 28df6187af9a..31a736168c90 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -17,6 +17,8 @@ limitations under the License. package autoscaling import ( + "fmt" + appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -123,6 +125,92 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) + ginkgo.It("caps request according to max limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + cpuLimit := "300m" + memLimit := "1T" + ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", cpuLimit, memLimit)) + // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while + // recommendation is 250m + // Max memory limit is 1T and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, cpuLimit, memLimit) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 200m CPU (as this is the recommendation + // capped according to max limit in LimitRange) and 200Mi of memory, + // which is uncapped. Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("raises request according to min limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }}, + } + InstallVPA(f, vpaCRD) + + cpuLimit := "75m" + memLimit := "250Mi" + ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", cpuLimit, memLimit)) + // Min CPU limit is 75m and ratio is 1.5, so min request is 50m + // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while + // recommendation is 100Mi. + InstallLimitRangeWithMin(f, cpuLimit, memLimit) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 200Mi of memory, but admission controller + // should change it to 250m CPU and 125Mi of memory, since this is the lowest + // request that limitrange allows. + // Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + ginkgo.It("caps request to max set in VPA", func() { d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index 40cbdafd41de..a01b0d69e992 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -353,3 +353,57 @@ func WaitForConditionPresent(c *vpa_clientset.Clientset, vpa *vpa_types.Vertical return false }) } + +func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity) { + lr := &apiv1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: "hamster-lr", + }, + Spec: apiv1.LimitRangeSpec{ + Limits: []apiv1.LimitRangeItem{}, + }, + } + + if maxMemoryLimit != nil || maxCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{}, + } + if maxCpuLimit != nil { + lrItem.Max[apiv1.ResourceCPU] = *maxCpuLimit + } + if maxMemoryLimit != nil { + lrItem.Max[apiv1.ResourceMemory] = *maxMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + + if minMemoryLimit != nil || minCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{}, + } + if minCpuLimit != nil { + lrItem.Min[apiv1.ResourceCPU] = *minCpuLimit + } + if minMemoryLimit != nil { + lrItem.Min[apiv1.ResourceMemory] = *minMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) { + maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) + maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) + installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity) +} + +func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) { + minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) + minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) + installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil) +} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index a51562add069..e218869e02e1 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -17,6 +17,8 @@ limitations under the License. package autoscaling import ( + "fmt" + appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" @@ -116,6 +118,88 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) + ginkgo.It("caps request according to max limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + cpuLimit := "300m" + memLimit := "1T" + ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", cpuLimit, memLimit)) + // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while + // recommendation is 250m + // Max memory limit is 1T and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, cpuLimit, memLimit) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 200m CPU (as this is the recommendation + // capped according to max limit in LimitRange) and 200Mi of memory, + // which is uncapped. Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("raises request according to min limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }}, + } + InstallVPA(f, vpaCRD) + + cpuLimit := "75m" + memLimit := "250Mi" + ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", cpuLimit, memLimit)) + // Min CPU limit is 75m and ratio is 1.5, so min request is 50m + // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while + // recommendation is 100Mi. + InstallLimitRangeWithMin(f, cpuLimit, memLimit) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 200Mi of memory, but admission controller + // should change it to 250m CPU and 125Mi of memory, since this is the lowest + // request that limitrange allows. + // Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + ginkgo.It("caps request to max set in VPA", func() { d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index 0c58690b4835..bafc2618cd9f 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -346,3 +346,57 @@ func WaitForRecommendationPresent(c *vpa_clientset.Clientset, vpa *vpa_types.Ver return vpa.Status.Recommendation != nil && len(vpa.Status.Recommendation.ContainerRecommendations) != 0 }) } + +func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity) { + lr := &apiv1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: "hamster-lr", + }, + Spec: apiv1.LimitRangeSpec{ + Limits: []apiv1.LimitRangeItem{}, + }, + } + + if maxMemoryLimit != nil || maxCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{}, + } + if maxCpuLimit != nil { + lrItem.Max[apiv1.ResourceCPU] = *maxCpuLimit + } + if maxMemoryLimit != nil { + lrItem.Max[apiv1.ResourceMemory] = *maxMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + + if minMemoryLimit != nil || minCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{}, + } + if minCpuLimit != nil { + lrItem.Min[apiv1.ResourceCPU] = *minCpuLimit + } + if minMemoryLimit != nil { + lrItem.Min[apiv1.ResourceMemory] = *minMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) { + maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) + maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) + installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity) +} + +func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) { + minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) + minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) + installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil) +} From c1ceb0fe433ca76aae6bc23f44f6dedc41501248 Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Wed, 29 May 2019 13:29:38 +0200 Subject: [PATCH 3/3] e2e test for VPA updater observing limit range --- .../e2e/v1beta1/admission_controller.go | 16 +++----- vertical-pod-autoscaler/e2e/v1beta1/common.go | 4 ++ .../e2e/v1beta1/updater.go | 38 +++++++++++++++++++ .../e2e/v1beta2/admission_controller.go | 16 +++----- vertical-pod-autoscaler/e2e/v1beta2/common.go | 4 ++ .../e2e/v1beta2/updater.go | 38 +++++++++++++++++++ 6 files changed, 96 insertions(+), 20 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index 31a736168c90..d40687bb7fca 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -17,8 +17,6 @@ limitations under the License. package autoscaling import ( - "fmt" - appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -145,13 +143,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } InstallVPA(f, vpaCRD) - cpuLimit := "300m" - memLimit := "1T" - ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", cpuLimit, memLimit)) // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while // recommendation is 250m // Max memory limit is 1T and ratio is 2., so max request is 0.5T - InstallLimitRangeWithMax(f, cpuLimit, memLimit) + InstallLimitRangeWithMax(f, "300m", "1T") ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -163,6 +158,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("300m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("1T"))) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } @@ -188,13 +185,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } InstallVPA(f, vpaCRD) - cpuLimit := "75m" - memLimit := "250Mi" - ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", cpuLimit, memLimit)) // Min CPU limit is 75m and ratio is 1.5, so min request is 50m // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while // recommendation is 100Mi. - InstallLimitRangeWithMin(f, cpuLimit, memLimit) + InstallLimitRangeWithMin(f, "75m", "250Mi") ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -206,6 +200,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("75m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("250Mi"))) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index a01b0d69e992..4780be450db4 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -396,13 +396,17 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC gomega.Expect(err).NotTo(gomega.HaveOccurred()) } +// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory. func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit)) maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity) } +// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory. func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit)) minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/updater.go b/vertical-pod-autoscaler/e2e/v1beta1/updater.go index 33b66e1d49d2..d61949fd5ee3 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/updater.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/updater.go @@ -98,6 +98,44 @@ var _ = UpdaterE2eDescribe("Updater", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue()) }) + + ginkgo.It("observes max in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "200m", vpa_types.UpdateModeAuto) + + // Max CPU limit is 300m and ratio is 3., so max request is 100m, while + // recommendation is 200m + // Max memory limit is 1T and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, "300m", "1T") + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) + + ginkgo.It("observes min in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "50m", vpa_types.UpdateModeAuto) + + // Min CPU limit is 300m and ratio is 3., so min request is 100m, while + // recommendation is 200m + // Min memory limit is 0 and ratio is 2., so min request is 0 + InstallLimitRangeWithMin(f, "300m", "0") + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) }) func testEvictsPods(f *framework.Framework, controllerKind string) { diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index e218869e02e1..98d2306c4a78 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -17,8 +17,6 @@ limitations under the License. package autoscaling import ( - "fmt" - appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" @@ -136,13 +134,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } InstallVPA(f, vpaCRD) - cpuLimit := "300m" - memLimit := "1T" - ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", cpuLimit, memLimit)) // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while // recommendation is 250m // Max memory limit is 1T and ratio is 2., so max request is 0.5T - InstallLimitRangeWithMax(f, cpuLimit, memLimit) + InstallLimitRangeWithMax(f, "300m", "1T") ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -154,6 +149,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("300m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("1T"))) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } @@ -177,13 +174,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } InstallVPA(f, vpaCRD) - cpuLimit := "75m" - memLimit := "250Mi" - ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", cpuLimit, memLimit)) // Min CPU limit is 75m and ratio is 1.5, so min request is 50m // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while // recommendation is 100Mi. - InstallLimitRangeWithMin(f, cpuLimit, memLimit) + InstallLimitRangeWithMin(f, "75m", "250Mi") ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -195,6 +189,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("75m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("250Mi"))) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index bafc2618cd9f..b3980f7c9b83 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -389,13 +389,17 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC gomega.Expect(err).NotTo(gomega.HaveOccurred()) } +// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory. func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit)) maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity) } +// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory. func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit)) minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/updater.go b/vertical-pod-autoscaler/e2e/v1beta2/updater.go index 67f9edb202d6..89b8492a726b 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/updater.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/updater.go @@ -119,6 +119,44 @@ var _ = UpdaterE2eDescribe("Updater", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue()) }) + + ginkgo.It("observes max in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef) + + // Max CPU limit is 300m and ratio is 3., so max request is 100m, while + // recommendation is 200m + // Max memory limit is 1T and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, "300m", "1T") + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) + + ginkgo.It("observes min in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef) + + // Min CPU limit is 300m and ratio is 3., so min request is 100m, while + // recommendation is 200m + // Min memory limit is 0 and ratio is 2., so min request is 0 + InstallLimitRangeWithMin(f, "300m", "0") + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) }) func testEvictsPods(f *framework.Framework, controller *autoscaling.CrossVersionObjectReference) {