Skip to content

Commit

Permalink
Replace test for capping recomendation
Browse files Browse the repository at this point in the history
  • Loading branch information
jbartosik committed Jun 7, 2019
1 parent b73499a commit 877002b
Show file tree
Hide file tree
Showing 3 changed files with 28 additions and 57 deletions.
21 changes: 8 additions & 13 deletions vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,17 +59,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})

ginkgo.It("caps request to limit set by the user", func() {
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("222m"),
apiv1.ResourceMemory: ParseQuantityOrDie("123Mi"),
}
ginkgo.It("keeps limits equal to request", func() {
d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Expand All @@ -85,11 +79,12 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 222m CPU and 123Mi of memory (as this is the recommendation
// capped to the limit set by the user)
// should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal.
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("222m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("123Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
}
})

Expand Down
13 changes: 13 additions & 0 deletions vertical-pod-autoscaler/e2e/v1beta1/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,19 @@ func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memo
return d
}

// NewHamsterDeploymentWithGuaranteedResources creates a simple hamster deployment with specific
// resource requests for e2e test purposes. Since the container in the pod specifies resource limits
// but not resource requests K8s will set requests equal to limits and the pod will have guaranteed
// QoS class.
func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment {
d := NewHamsterDeployment(f)
d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{
apiv1.ResourceCPU: cpuQuantity,
apiv1.ResourceMemory: memoryQuantity,
}
return d
}

// GetHamsterPods returns running hamster pods (matched by hamsterLabels)
func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(hamsterLabels))
Expand Down
51 changes: 7 additions & 44 deletions vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})

ginkgo.It("caps request to limit set by the user", func() {
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("222m"),
apiv1.ResourceMemory: ParseQuantityOrDie("123Mi"),
}
ginkgo.It("keeps limits equal to request", func() {
d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
Expand All @@ -80,11 +76,12 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 222m CPU and 123Mi of memory (as this is the recommendation
// capped to the limit set by the user)
// should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal.
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("222m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("123Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
}
})

Expand Down Expand Up @@ -195,40 +192,6 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})

ginkgo.It("keeps limits equal to request", func() {
d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{
ContainerPolicies: []vpa_types.ContainerResourcePolicy{{
ContainerName: "hamster",
}},
}
InstallVPA(f, vpaCRD)

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
}
})

})

func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) *apiv1.PodList {
Expand Down

0 comments on commit 877002b

Please sign in to comment.