Skip to content

Commit

Permalink
Merge pull request #2105 from jbartosik/cherry-limit-05
Browse files Browse the repository at this point in the history
Cherry limit 05
  • Loading branch information
k8s-ci-robot authored Jun 7, 2019
2 parents 43dc566 + 14a589b commit 509f404
Show file tree
Hide file tree
Showing 13 changed files with 991 additions and 243 deletions.
21 changes: 8 additions & 13 deletions vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -59,17 +59,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})

ginkgo.It("caps request to limit set by the user", func() {
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("222m"),
apiv1.ResourceMemory: ParseQuantityOrDie("123Mi"),
}
ginkgo.It("keeps limits equal to request", func() {
d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{
MatchLabels: d.Spec.Template.Labels,
})
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Expand All @@ -85,11 +79,12 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 222m CPU and 123Mi of memory (as this is the recommendation
// capped to the limit set by the user)
// should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal.
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("222m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("123Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
}
})

Expand Down
13 changes: 13 additions & 0 deletions vertical-pod-autoscaler/e2e/v1beta1/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,19 @@ func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memo
return d
}

// NewHamsterDeploymentWithGuaranteedResources creates a simple hamster deployment with specific
// resource requests for e2e test purposes. Since the container in the pod specifies resource limits
// but not resource requests K8s will set requests equal to limits and the pod will have guaranteed
// QoS class.
func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment {
d := NewHamsterDeployment(f)
d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{
apiv1.ResourceCPU: cpuQuantity,
apiv1.ResourceMemory: memoryQuantity,
}
return d
}

// GetHamsterPods returns running hamster pods (matched by hamsterLabels)
func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(hamsterLabels))
Expand Down
17 changes: 7 additions & 10 deletions vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,12 +56,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})

ginkgo.It("caps request to limit set by the user", func() {
d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)
d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("222m"),
apiv1.ResourceMemory: ParseQuantityOrDie("123Mi"),
}
ginkgo.It("keeps limits equal to request", func() {
d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
Expand All @@ -80,11 +76,12 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 222m CPU and 123Mi of memory (as this is the recommendation
// capped to the limit set by the user)
// should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal.
for _, pod := range podList.Items {
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("222m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("123Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
}
})

Expand Down
13 changes: 13 additions & 0 deletions vertical-pod-autoscaler/e2e/v1beta2/common.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,19 @@ func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memo
return d
}

// NewHamsterDeploymentWithGuaranteedResources creates a simple hamster deployment with specific
// resource requests for e2e test purposes. Since the container in the pod specifies resource limits
// but not resource requests K8s will set requests equal to limits and the pod will have guaranteed
// QoS class.
func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment {
d := NewHamsterDeployment(f)
d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{
apiv1.ResourceCPU: cpuQuantity,
apiv1.ResourceMemory: memoryQuantity,
}
return d
}

// GetHamsterPods returns running hamster pods (matched by hamsterLabels)
func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(hamsterLabels))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,12 @@ limitations under the License.
package logic

import (
v1 "k8s.io/api/core/v1"
"fmt"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"math"
"math/big"

vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2"
vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta2"
Expand All @@ -29,11 +33,15 @@ import (

// ContainerResources holds resources request for container
type ContainerResources struct {
Limits v1.ResourceList
Requests v1.ResourceList
}

func newContainerResources() ContainerResources {
return ContainerResources{Requests: v1.ResourceList{}}
return ContainerResources{
Requests: v1.ResourceList{},
Limits: v1.ResourceList{},
}
}

// RecommendationProvider gets current recommendation, annotations and vpaName for the given pod.
Expand All @@ -56,8 +64,38 @@ func NewRecommendationProvider(vpaLister vpa_lister.VerticalPodAutoscalerLister,
}
}

// getContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec.
func getContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources) []ContainerResources {
func getProportionalLimit(originalLimit, originalRequest, recommendedRequest *resource.Quantity) (limit *resource.Quantity, capped bool) {
// originalLimit not set, don't set limit.
if originalLimit == nil || originalLimit.Value() == 0 {
return nil, false
}
// originalLimit set but originalRequest not set - K8s will treat the pod as if they were equal,
// recommend limit equal to request
if originalRequest == nil || originalRequest.Value() == 0 {
result := *recommendedRequest
return &result, false
}
// originalLimit and originalRequest are set. If they are equal recommend limit equal to request.
if originalRequest.MilliValue() == originalLimit.MilliValue() {
result := *recommendedRequest
return &result, false
}

// Input and output milli values should fit in int64 but intermediate values might be bigger.
originalMilliRequest := big.NewInt(originalRequest.MilliValue())
originalMilliLimit := big.NewInt(originalLimit.MilliValue())
recommendedMilliRequest := big.NewInt(recommendedRequest.MilliValue())
var recommendedMilliLimit big.Int
recommendedMilliLimit.Mul(recommendedMilliRequest, originalMilliLimit)
recommendedMilliLimit.Div(&recommendedMilliLimit, originalMilliRequest)
if recommendedMilliLimit.IsInt64() {
return resource.NewMilliQuantity(recommendedMilliLimit.Int64(), recommendedRequest.Format), false
}
return resource.NewMilliQuantity(math.MaxInt64, recommendedRequest.Format), true
}

// GetContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec.
func GetContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources, annotations vpa_api_util.ContainerToAnnotationsMap) []ContainerResources {
resources := make([]ContainerResources, len(pod.Spec.Containers))
for i, container := range pod.Spec.Containers {
resources[i] = newContainerResources()
Expand All @@ -68,6 +106,29 @@ func getContainersResources(pod *v1.Pod, podRecommendation vpa_types.Recommended
continue
}
resources[i].Requests = recommendation.Target

cpuLimit, capped := getProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), resources[i].Requests.Cpu())
if cpuLimit != nil {
resources[i].Limits[v1.ResourceCPU] = *cpuLimit
}
if capped {
annotations[container.Name] = append(
annotations[container.Name],
fmt.Sprintf(
"Failed to keep CPU limit to request proportion of %d to %d with recommended request of %d milliCPU; doesn't fit in int64. Capping limit to MaxInt64",
container.Resources.Limits.Cpu().MilliValue(), container.Resources.Requests.Cpu().MilliValue(), resources[i].Requests.Cpu().MilliValue()))
}
memLimit, capped := getProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), resources[i].Requests.Memory())
if memLimit != nil {
resources[i].Limits[v1.ResourceMemory] = *memLimit
}
if capped {
annotations[container.Name] = append(
annotations[container.Name],
fmt.Sprintf(
"Failed to keep memory limit to request proportion of %d to %d with recommended request of %d milliBytes; doesn't fit in int64. Capping limit to MaxInt64",
container.Resources.Limits.Memory().MilliValue(), container.Resources.Requests.Memory().MilliValue(), resources[i].Requests.Memory().MilliValue()))
}
}
return resources
}
Expand Down Expand Up @@ -122,6 +183,6 @@ func (p *recommendationProvider) GetContainersResourcesForPod(pod *v1.Pod) ([]Co
return nil, annotations, vpaConfig.Name, err
}
}
containerResources := getContainersResources(pod, *recommendedPodResources)
containerResources := GetContainersResources(pod, *recommendedPodResources, annotations)
return containerResources, annotations, vpaConfig.Name, nil
}
Loading

0 comments on commit 509f404

Please sign in to comment.