Skip to content

Commit

Permalink
Merge pull request #2224 from jbartosik/bugfix-00
Browse files Browse the repository at this point in the history
Fix v1beta1 VPA limit range e2e tests
  • Loading branch information
k8s-ci-robot authored Jul 30, 2019
2 parents 55082d6 + e156ce9 commit 0a7bc88
Show file tree
Hide file tree
Showing 3 changed files with 186 additions and 27 deletions.
122 changes: 114 additions & 8 deletions vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})

ginkgo.It("caps request according to max limit set in LimitRange", func() {
ginkgo.It("caps request according to container max limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)
Expand All @@ -192,7 +192,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
// Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while
// recommendation is 250m
// Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi
InstallLimitRangeWithMax(f, "300m", "1Gi")
InstallLimitRangeWithMax(f, "300m", "1Gi", apiv1.LimitTypeContainer)

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
Expand All @@ -211,7 +211,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})

ginkgo.It("raises request according to min limit set in LimitRange", func() {
ginkgo.It("raises request according to container min limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
Expand All @@ -231,10 +231,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
InstallVPA(f, vpaCRD)

// Min CPU limit is 75m and ratio is 1.5, so min request is 50m
// Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while
// recommendation is 100Mi.
InstallLimitRangeWithMin(f, "75m", "250Mi")
// Min CPU from limit range is 50m and ratio is 1.5. Min applies to both limit and request so min
// request is 50m and min limit is 75
// Min memory limit is 250Mi and it applies to both limit and request. Recommendation is 100Mi.
// It should be scaled up to 250Mi.
InstallLimitRangeWithMin(f, "50m", "250Mi", apiv1.LimitTypeContainer)

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)
Expand All @@ -245,7 +246,112 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
// Limit to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})

ginkgo.It("caps request according to pod max limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/)
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
d.Spec.Template.Spec.Containers[1].Name = "hamster2"
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
},
{
ContainerName: "hamster2",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
},
},
}
InstallVPA(f, vpaCRD)

// Max CPU limit is 600m for pod, 300 per container and ratio is 1.5, so max request is 200m,
// while recommendation is 250m
// Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi
InstallLimitRangeWithMax(f, "600m", "1Gi", apiv1.LimitTypePod)

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 200m CPU (as this is the recommendation
// capped according to max limit in LimitRange) and 200Mi of memory,
// which is uncapped. Limit to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", 300))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.))
}
})

ginkgo.It("raises request according to pod min limit set in LimitRange", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
d.Spec.Template.Spec.Containers[1].Name = "hamster2"
ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("120m"),
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled
},
},
{
ContainerName: "hamster2",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("120m"),
apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled
},
},
},
}
InstallVPA(f, vpaCRD)

// Min CPU from limit range is 100m, 50m per pod and ratio is 1.5. Min applies to both limit and
// request so min request is 50m and min limit is 75
// Min memory limit is 500Mi per pod, 250 per container and it applies to both limit and request.
// Recommendation is 100Mi it should be scaled up to 250Mi.
InstallLimitRangeWithMin(f, "100m", "500Mi", apiv1.LimitTypePod)

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 200Mi of memory, but admission controller
// should change it to 250m CPU and 125Mi of memory, since this is the lowest
// request that limitrange allows.
// Limit to request ratio should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("120m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024))
gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5))
Expand Down
46 changes: 38 additions & 8 deletions vertical-pod-autoscaler/e2e/v1beta1/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta1"
vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned"
"k8s.io/kubernetes/pkg/apis/autoscaling"
"k8s.io/kubernetes/test/e2e/framework"
)

Expand Down Expand Up @@ -390,7 +391,7 @@ func WaitForConditionPresent(c *vpa_clientset.Clientset, vpa *vpa_types.Vertical
})
}

func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity) {
func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity, lrType apiv1.LimitType) {
lr := &apiv1.LimitRange{
ObjectMeta: metav1.ObjectMeta{
Namespace: f.Namespace.Name,
Expand All @@ -403,7 +404,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC

if maxMemoryLimit != nil || maxCpuLimit != nil {
lrItem := apiv1.LimitRangeItem{
Type: apiv1.LimitTypeContainer,
Type: lrType,
Max: apiv1.ResourceList{},
}
if maxCpuLimit != nil {
Expand All @@ -417,7 +418,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC

if minMemoryLimit != nil || minCpuLimit != nil {
lrItem := apiv1.LimitRangeItem{
Type: apiv1.LimitTypeContainer,
Type: lrType,
Min: apiv1.ResourceList{},
}
if minCpuLimit != nil {
Expand All @@ -429,21 +430,50 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC
lr.Spec.Limits = append(lr.Spec.Limits, lrItem)
}
_, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr)
gomega.Expect(err).NotTo(gomega.HaveOccurred())
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when creating limit range")
}

// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory.
func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) {
func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string, lrType apiv1.LimitType) {
ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit))
maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit)
maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit)
installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity)
installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity, lrType)
}

// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory.
func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) {
func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string, lrType apiv1.LimitType) {
ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit))
minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit)
minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit)
installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil)
installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil, lrType)
}

// SetupVPAForTwoHamsters creates and installs a simple pod with two hamster containers for e2e test purposes.
func SetupVPAForTwoHamsters(f *framework.Framework, cpu string, mode vpa_types.UpdateMode) {
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: hamsterLabels,
})
vpaCRD.Spec.UpdatePolicy.UpdateMode = &mode

cpuQuantity := ParseQuantityOrDie(cpu)
resourceList := apiv1.ResourceList{apiv1.ResourceCPU: cpuQuantity}

vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "hamster",
Target: resourceList,
LowerBound: resourceList,
UpperBound: resourceList,
},
{
ContainerName: "hamster2",
Target: resourceList,
LowerBound: resourceList,
UpperBound: resourceList,
},
},
}
InstallVPA(f, vpaCRD)
}
45 changes: 34 additions & 11 deletions vertical-pod-autoscaler/e2e/v1beta1/updater.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,39 +99,62 @@ var _ = UpdaterE2eDescribe("Updater", func() {
gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue())
})

ginkgo.It("observes max in LimitRange", func() {
ginkgo.It("observes container min in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
podList := startDeploymentPods(f, d)

ginkgo.By("Setting up a VPA CRD")
SetupVPA(f, "200m", vpa_types.UpdateModeAuto)
SetupVPA(f, "50m", vpa_types.UpdateModeAuto)

// Max CPU limit is 300m and ratio is 3., so max request is 100m, while
// recommendation is 200m
// Max memory limit is 1T and ratio is 2., so max request is 0.5T
InstallLimitRangeWithMax(f, "300m", "1T")
// Min CPU from limit range is 100m and ratio is 3. Min applies both to limit and request so min
// request is 100m request and 300m limit
// Min memory limit is 0 and ratio is 2., so min request is 0
InstallLimitRangeWithMin(f, "100m", "0", apiv1.LimitTypeContainer)

ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
})

ginkgo.It("observes min in LimitRange", func() {
ginkgo.It("observes pod max in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
d.Spec.Template.Spec.Containers[1].Name = "hamster2"
podList := startDeploymentPods(f, d)

ginkgo.By("Setting up a VPA CRD")
SetupVPA(f, "50m", vpa_types.UpdateModeAuto)
SetupVPAForTwoHamsters(f, "200m", vpa_types.UpdateModeAuto)

// Max CPU limit is 600m per pod, 300m per container and ratio is 3., so max request is 100m,
// while recommendation is 200m
// Max memory limit is 2T per pod, 1T per container and ratio is 2., so max request is 0.5T
InstallLimitRangeWithMax(f, "600m", "2T", apiv1.LimitTypePod)

ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
})

ginkgo.It("observes pod min in LimitRange", func() {
ginkgo.By("Setting up a hamster deployment")
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/
ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/)
d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0])
d.Spec.Template.Spec.Containers[1].Name = "hamster2"
podList := startDeploymentPods(f, d)

ginkgo.By("Setting up a VPA CRD")
SetupVPAForTwoHamsters(f, "50m", vpa_types.UpdateModeAuto)

// Min CPU limit is 300m and ratio is 3., so min request is 100m, while
// recommendation is 200m
// Min CPU from limit range is 200m per pod, 100m per container and ratio is 3. Min applies both
// to limit and request so min request is 100m request and 300m limit
// Min memory limit is 0 and ratio is 2., so min request is 0
InstallLimitRangeWithMin(f, "300m", "0")
InstallLimitRangeWithMin(f, "200m", "0", apiv1.LimitTypePod)

ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String()))
CheckNoPodsEvicted(f, MakePodSet(podList))
Expand Down

0 comments on commit 0a7bc88

Please sign in to comment.