diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index deb5c3f967c6..3e9636388d1f 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -169,7 +169,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("caps request according to max limit set in LimitRange", func() { + ginkgo.It("caps request according to container max limit set in LimitRange", func() { d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) @@ -192,7 +192,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while // recommendation is 250m // Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi - InstallLimitRangeWithMax(f, "300m", "1Gi") + InstallLimitRangeWithMax(f, "300m", "1Gi", apiv1.LimitTypeContainer) ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -211,7 +211,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("raises request according to min limit set in LimitRange", func() { + ginkgo.It("raises request according to container min limit set in LimitRange", func() { d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) @@ -231,10 +231,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } InstallVPA(f, vpaCRD) - // Min CPU limit is 75m and ratio is 1.5, so min request is 50m - // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while - // recommendation is 100Mi. - InstallLimitRangeWithMin(f, "75m", "250Mi") + // Min CPU from limit range is 50m and ratio is 1.5. Min applies to both limit and request so min + // request is 50m and min limit is 75 + // Min memory limit is 250Mi and it applies to both limit and request. Recommendation is 100Mi. + // It should be scaled up to 250Mi. + InstallLimitRangeWithMin(f, "50m", "250Mi", apiv1.LimitTypeContainer) ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -245,7 +246,112 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // Limit to request ratio should stay unchanged. for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("caps request according to pod max limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }, + { + ContainerName: "hamster2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }, + }, + } + InstallVPA(f, vpaCRD) + + // Max CPU limit is 600m for pod, 300 per container and ratio is 1.5, so max request is 200m, + // while recommendation is 250m + // Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi + InstallLimitRangeWithMax(f, "600m", "1Gi", apiv1.LimitTypePod) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 200m CPU (as this is the recommendation + // capped according to max limit in LimitRange) and 200Mi of memory, + // which is uncapped. Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", 300)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("raises request according to pod min limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("120m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }, + { + ContainerName: "hamster2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("120m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }, + }, + } + InstallVPA(f, vpaCRD) + + // Min CPU from limit range is 100m, 50m per pod and ratio is 1.5. Min applies to both limit and + // request so min request is 50m and min limit is 75 + // Min memory limit is 500Mi per pod, 250 per container and it applies to both limit and request. + // Recommendation is 100Mi it should be scaled up to 250Mi. + InstallLimitRangeWithMin(f, "100m", "500Mi", apiv1.LimitTypePod) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 200Mi of memory, but admission controller + // should change it to 250m CPU and 125Mi of memory, since this is the lowest + // request that limitrange allows. + // Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("120m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi"))) gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index 68c7569a9e2e..8c4351cc8f72 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" + "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/test/e2e/framework" ) @@ -390,7 +391,7 @@ func WaitForConditionPresent(c *vpa_clientset.Clientset, vpa *vpa_types.Vertical }) } -func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity) { +func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity, lrType apiv1.LimitType) { lr := &apiv1.LimitRange{ ObjectMeta: metav1.ObjectMeta{ Namespace: f.Namespace.Name, @@ -403,7 +404,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC if maxMemoryLimit != nil || maxCpuLimit != nil { lrItem := apiv1.LimitRangeItem{ - Type: apiv1.LimitTypeContainer, + Type: lrType, Max: apiv1.ResourceList{}, } if maxCpuLimit != nil { @@ -417,7 +418,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC if minMemoryLimit != nil || minCpuLimit != nil { lrItem := apiv1.LimitRangeItem{ - Type: apiv1.LimitTypeContainer, + Type: lrType, Min: apiv1.ResourceList{}, } if minCpuLimit != nil { @@ -429,21 +430,50 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC lr.Spec.Limits = append(lr.Spec.Limits, lrItem) } _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when creating limit range") } // InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory. -func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) { +func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string, lrType apiv1.LimitType) { ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit)) maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) - installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity) + installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity, lrType) } // InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory. -func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) { +func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string, lrType apiv1.LimitType) { ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit)) minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) - installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil) + installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil, lrType) +} + +// SetupVPAForTwoHamsters creates and installs a simple pod with two hamster containers for e2e test purposes. +func SetupVPAForTwoHamsters(f *framework.Framework, cpu string, mode vpa_types.UpdateMode) { + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: hamsterLabels, + }) + vpaCRD.Spec.UpdatePolicy.UpdateMode = &mode + + cpuQuantity := ParseQuantityOrDie(cpu) + resourceList := apiv1.ResourceList{apiv1.ResourceCPU: cpuQuantity} + + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "hamster", + Target: resourceList, + LowerBound: resourceList, + UpperBound: resourceList, + }, + { + ContainerName: "hamster2", + Target: resourceList, + LowerBound: resourceList, + UpperBound: resourceList, + }, + }, + } + InstallVPA(f, vpaCRD) } diff --git a/vertical-pod-autoscaler/e2e/v1beta1/updater.go b/vertical-pod-autoscaler/e2e/v1beta1/updater.go index d61949fd5ee3..ac69c130a448 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/updater.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/updater.go @@ -99,7 +99,7 @@ var _ = UpdaterE2eDescribe("Updater", func() { gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue()) }) - ginkgo.It("observes max in LimitRange", func() { + ginkgo.It("observes container min in LimitRange", func() { ginkgo.By("Setting up a hamster deployment") d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ @@ -107,31 +107,54 @@ var _ = UpdaterE2eDescribe("Updater", func() { podList := startDeploymentPods(f, d) ginkgo.By("Setting up a VPA CRD") - SetupVPA(f, "200m", vpa_types.UpdateModeAuto) + SetupVPA(f, "50m", vpa_types.UpdateModeAuto) - // Max CPU limit is 300m and ratio is 3., so max request is 100m, while - // recommendation is 200m - // Max memory limit is 1T and ratio is 2., so max request is 0.5T - InstallLimitRangeWithMax(f, "300m", "1T") + // Min CPU from limit range is 100m and ratio is 3. Min applies both to limit and request so min + // request is 100m request and 300m limit + // Min memory limit is 0 and ratio is 2., so min request is 0 + InstallLimitRangeWithMin(f, "100m", "0", apiv1.LimitTypeContainer) ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) CheckNoPodsEvicted(f, MakePodSet(podList)) }) - ginkgo.It("observes min in LimitRange", func() { + ginkgo.It("observes pod max in LimitRange", func() { ginkgo.By("Setting up a hamster deployment") d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" podList := startDeploymentPods(f, d) ginkgo.By("Setting up a VPA CRD") - SetupVPA(f, "50m", vpa_types.UpdateModeAuto) + SetupVPAForTwoHamsters(f, "200m", vpa_types.UpdateModeAuto) + + // Max CPU limit is 600m per pod, 300m per container and ratio is 3., so max request is 100m, + // while recommendation is 200m + // Max memory limit is 2T per pod, 1T per container and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, "600m", "2T", apiv1.LimitTypePod) + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) + + ginkgo.It("observes pod min in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPAForTwoHamsters(f, "50m", vpa_types.UpdateModeAuto) - // Min CPU limit is 300m and ratio is 3., so min request is 100m, while - // recommendation is 200m + // Min CPU from limit range is 200m per pod, 100m per container and ratio is 3. Min applies both + // to limit and request so min request is 100m request and 300m limit // Min memory limit is 0 and ratio is 2., so min request is 0 - InstallLimitRangeWithMin(f, "300m", "0") + InstallLimitRangeWithMin(f, "200m", "0", apiv1.LimitTypePod) ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) CheckNoPodsEvicted(f, MakePodSet(podList))