Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bunch of e2e tests for making sure limit handling is correct in VPA #2068

Merged
merged 3 commits into from
May 29, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
121 changes: 120 additions & 1 deletion vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,9 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Expand All @@ -88,6 +90,123 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})

ginkgo.It("keeps limits to request ratio constant", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
InstallVPA(f, vpaCRD)

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 250m CPU and 200Mi of memory. Limits to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})

ginkgo.It("caps request according to max limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
InstallVPA(f, vpaCRD)

// Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while
// recommendation is 250m
// Max memory limit is 1T and ratio is 2., so max request is 0.5T
InstallLimitRangeWithMax(f, "300m", "1T")

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 200m CPU (as this is the recommendation
// capped according to max limit in LimitRange) and 200Mi of memory,
// which is uncapped. Limit to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
bskiba marked this conversation as resolved.
Show resolved Hide resolved
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("300m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("1T")))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})

ginkgo.It("raises request according to min limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled
},
}},
}
InstallVPA(f, vpaCRD)

// Min CPU limit is 75m and ratio is 1.5, so min request is 50m
// Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while
// recommendation is 100Mi.
InstallLimitRangeWithMin(f, "75m", "250Mi")

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 200Mi of memory, but admission controller
// should change it to 250m CPU and 125Mi of memory, since this is the lowest
// request that limitrange allows.
// Limit to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("75m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("250Mi")))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})

ginkgo.It("caps request to max set in VPA", func() {
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)

Expand Down
69 changes: 69 additions & 0 deletions vertical-pod-autoscaler/e2e/v1beta1/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,17 @@ func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuan
return d
}

// NewHamsterDeploymentWithResourcesAndLimits creates a simple hamster deployment with specific
// resource requests and limits for e2e test purposes.
func NewHamsterDeploymentWithResourcesAndLimits(f *framework.Framework, cpuQuantityRequest, memoryQuantityRequest, cpuQuantityLimit, memoryQuantityLimit resource.Quantity) *appsv1.Deployment {
d := NewHamsterDeploymentWithResources(f, cpuQuantityRequest, memoryQuantityRequest)
d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{
apiv1.ResourceCPU: cpuQuantityLimit,
apiv1.ResourceMemory: memoryQuantityLimit,
}
return d
}

// GetHamsterPods returns running hamster pods (matched by hamsterLabels)
func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(hamsterLabels))
Expand Down Expand Up @@ -342,3 +353,61 @@ func WaitForConditionPresent(c *vpa_clientset.Clientset, vpa *vpa_types.Vertical
return false
})
}

func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity) {
lr := &apiv1.LimitRange{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: "hamster-lr",
},
Spec: apiv1.LimitRangeSpec{
Limits: []apiv1.LimitRangeItem{},
},
}

if maxMemoryLimit != nil || maxCpuLimit != nil {
lrItem := apiv1.LimitRangeItem{
Type: apiv1.LimitTypeContainer,
Max: apiv1.ResourceList{},
}
if maxCpuLimit != nil {
lrItem.Max[apiv1.ResourceCPU] = *maxCpuLimit
}
if maxMemoryLimit != nil {
lrItem.Max[apiv1.ResourceMemory] = *maxMemoryLimit
}
lr.Spec.Limits = append(lr.Spec.Limits, lrItem)
}

if minMemoryLimit != nil || minCpuLimit != nil {
lrItem := apiv1.LimitRangeItem{
Type: apiv1.LimitTypeContainer,
Max: apiv1.ResourceList{},
}
if minCpuLimit != nil {
lrItem.Min[apiv1.ResourceCPU] = *minCpuLimit
}
if minMemoryLimit != nil {
lrItem.Min[apiv1.ResourceMemory] = *minMemoryLimit
}
lr.Spec.Limits = append(lr.Spec.Limits, lrItem)
}
_, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
}

// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory.
func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) {
ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit))
maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit)
maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit)
installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity)
}

// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory.
func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) {
ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit))
minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit)
minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit)
installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil)
}
38 changes: 38 additions & 0 deletions vertical-pod-autoscaler/e2e/v1beta1/updater.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,44 @@ var _ = UpdaterE2eDescribe("Updater", func() {
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue())
})

ginkgo.It("observes max in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
podList := startDeploymentPods(f, d)

ginkgo.By("Setting up a VPA CRD")
SetupVPA(f, "200m", vpa_types.UpdateModeAuto)

// Max CPU limit is 300m and ratio is 3., so max request is 100m, while
// recommendation is 200m
// Max memory limit is 1T and ratio is 2., so max request is 0.5T
InstallLimitRangeWithMax(f, "300m", "1T")

ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
})

ginkgo.It("observes min in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
podList := startDeploymentPods(f, d)

ginkgo.By("Setting up a VPA CRD")
SetupVPA(f, "50m", vpa_types.UpdateModeAuto)

// Min CPU limit is 300m and ratio is 3., so min request is 100m, while
// recommendation is 200m
// Min memory limit is 0 and ratio is 2., so min request is 0
InstallLimitRangeWithMin(f, "300m", "0")

ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
})
})

func testEvictsPods(f *framework.Framework, controllerKind string) {
Expand Down
111 changes: 111 additions & 0 deletions vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,117 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})

ginkgo.It("keeps limits to request ratio constant", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
InstallVPA(f, vpaCRD)

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 250m CPU and 200Mi of memory. Limits to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})

ginkgo.It("caps request according to max limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
InstallVPA(f, vpaCRD)

// Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while
// recommendation is 250m
// Max memory limit is 1T and ratio is 2., so max request is 0.5T
InstallLimitRangeWithMax(f, "300m", "1T")

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 200m CPU (as this is the recommendation
// capped according to max limit in LimitRange) and 200Mi of memory,
// which is uncapped. Limit to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("300m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("1T")))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})

ginkgo.It("raises request according to min limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled
},
}},
}
InstallVPA(f, vpaCRD)

// Min CPU limit is 75m and ratio is 1.5, so min request is 50m
// Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while
// recommendation is 100Mi.
InstallLimitRangeWithMin(f, "75m", "250Mi")

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 200Mi of memory, but admission controller
// should change it to 250m CPU and 125Mi of memory, since this is the lowest
// request that limitrange allows.
// Limit to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("75m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("250Mi")))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})

ginkgo.It("caps request to max set in VPA", func() {
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)

Expand Down
Loading