diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index dbbb4230d335..a250f3fdd5bb 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -59,17 +59,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("caps request to limit set by the user", func() { - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("222m"), - apiv1.ResourceMemory: ParseQuantityOrDie("123Mi"), - } + ginkgo.It("keeps limits equal to request", func() { + d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ - MatchLabels: d.Spec.Template.Labels, - }) + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ ContainerName: "hamster", @@ -85,11 +79,12 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { podList := startDeploymentPods(f, d) // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 222m CPU and 123Mi of memory (as this is the recommendation - // capped to the limit set by the user) + // should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal. for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("222m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("123Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) } }) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index 902a3d6ae40b..98db1b4d0e08 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -123,6 +123,19 @@ func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memo return d } +// NewHamsterDeploymentWithGuaranteedResources creates a simple hamster deployment with specific +// resource requests for e2e test purposes. Since the container in the pod specifies resource limits +// but not resource requests K8s will set requests equal to limits and the pod will have guaranteed +// QoS class. +func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeployment(f) + d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantity, + apiv1.ResourceMemory: memoryQuantity, + } + return d +} + // GetHamsterPods returns running hamster pods (matched by hamsterLabels) func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index f3bce9c7b393..cb7f4d92074f 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -56,12 +56,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("caps request to limit set by the user", func() { - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("222m"), - apiv1.ResourceMemory: ParseQuantityOrDie("123Mi"), - } + ginkgo.It("keeps limits equal to request", func() { + d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) @@ -80,11 +76,12 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { podList := startDeploymentPods(f, d) // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 222m CPU and 123Mi of memory (as this is the recommendation - // capped to the limit set by the user) + // should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal. for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("222m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("123Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) } }) @@ -195,40 +192,6 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("keeps limits equal to request", func() { - d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), - apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), - }, - }}, - } - vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{ - ContainerPolicies: []vpa_types.ContainerResourcePolicy{{ - ContainerName: "hamster", - }}, - } - InstallVPA(f, vpaCRD) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal - for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) - } - }) - }) func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) *apiv1.PodList {