From 3c4098cd9e00cb0e7162a3f6cf9b43f1baf06306 Mon Sep 17 00:00:00 2001 From: Marco Bardelli Date: Fri, 10 May 2019 14:59:42 +0200 Subject: [PATCH 01/25] Added LimitsChecker that provides hints for limits To allow admission controller to set limits on containers if needed because LimitRange in namespace with default and max ratio. This feature have to be explicitly enabled by passing the flag `--allow-to-adjust-limits` Conflicts, resolved manually in(vpaPreprocessor is on master but not on this branch): vertical-pod-autoscaler/pkg/admission-controller/logic/server.go vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go vertical-pod-autoscaler/pkg/admission-controller/main.go --- .../logic/limitrange_checker.go | 249 ++++++++++++++++++ .../logic/limitrange_checker_test.go | 236 +++++++++++++++++ .../pkg/admission-controller/logic/server.go | 32 ++- .../admission-controller/logic/server_test.go | 6 +- .../pkg/admission-controller/main.go | 23 +- 5 files changed, 533 insertions(+), 13 deletions(-) create mode 100644 vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go create mode 100644 vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go new file mode 100644 index 000000000000..c2996b2495cf --- /dev/null +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go @@ -0,0 +1,249 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logic + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/informers" + + v1_listers "k8s.io/client-go/listers/core/v1" +) + +// LimitsHints provides hinted limits that respect limit range max ratio +type LimitsHints interface { + IsNil() bool + RequestsExceedsRatio(indexOfContainer int, resourceName v1.ResourceName) bool + HintedLimit(indexOfContainer int, resourceName v1.ResourceName) resource.Quantity +} + +// LimitRangeHints implements LimitsHints interface +type LimitRangeHints struct { + requestsExceedsRatio []map[v1.ResourceName]bool + limitsRespectingRatio []v1.ResourceList +} + +var _ LimitsHints = &LimitRangeHints{} + +// LimitsChecker checks for LimitRange and if container needs limits to be set +type LimitsChecker interface { + NeedsLimits(*v1.Pod, []ContainerResources) LimitsHints +} + +// IsNil return true if there is no hints to set limits +func (lrh *LimitRangeHints) IsNil() bool { + return lrh == (*LimitRangeHints)(nil) +} + +// RequestsExceedsRatio return true if limits have to be set to respect limit range max ratio +func (lrh *LimitRangeHints) RequestsExceedsRatio(indexOfContainer int, resourceName v1.ResourceName) bool { + if !lrh.IsNil() { + yes, ok := lrh.requestsExceedsRatio[indexOfContainer][resourceName] + return ok && yes + } + return false +} + +// HintedLimit return the limit Quantity that respect the limit range max ration +func (lrh *LimitRangeHints) HintedLimit(indexOfContainer int, resourceName v1.ResourceName) resource.Quantity { + if !lrh.IsNil() { + limit, ok := lrh.limitsRespectingRatio[indexOfContainer][resourceName] + if ok { + return limit + } + return resource.Quantity{} + } + return resource.Quantity{} +} + +type neverNeedsLimitsChecker struct{} + +var _ LimitsChecker = &neverNeedsLimitsChecker{} + +func (lc *neverNeedsLimitsChecker) NeedsLimits(pod *v1.Pod, containersResources []ContainerResources) LimitsHints { + return LimitsHints((*LimitRangeHints)(nil)) +} + +type limitsChecker struct { + limitrangeLister v1_listers.LimitRangeLister +} + +var _ LimitsChecker = &limitsChecker{} + +// NewLimitsChecker creates a LimitsChecker +func NewLimitsChecker(f informers.SharedInformerFactory) LimitsChecker { + if f != nil { + limitrangeLister := f.Core().V1().LimitRanges().Lister() + stopCh := make(chan struct{}) + f.Start(stopCh) + for _, ok := range f.WaitForCacheSync(stopCh) { + if !ok { + if ok := f.Core().V1().LimitRanges().Informer().HasSynced(); !ok { + return &neverNeedsLimitsChecker{} + } + } + } + return &limitsChecker{limitrangeLister} + } + return &neverNeedsLimitsChecker{} +} + +type interestingData struct { + MaxLimitRequestRatio v1.ResourceList + Default v1.ResourceList +} + +func (id *interestingData) parse(lri *v1.LimitRangeItem) { + if value, hasCPU := lri.MaxLimitRequestRatio[v1.ResourceCPU]; hasCPU { + if id.MaxLimitRequestRatio == nil { + id.MaxLimitRequestRatio = make(v1.ResourceList) + } + if maxRatio, exists := id.MaxLimitRequestRatio[v1.ResourceCPU]; !exists || maxRatio.Cmp(value) > 0 { + id.MaxLimitRequestRatio[v1.ResourceCPU] = *value.Copy() + } + } + if value, hasMemory := lri.MaxLimitRequestRatio[v1.ResourceMemory]; hasMemory { + if id.MaxLimitRequestRatio == nil { + id.MaxLimitRequestRatio = make(v1.ResourceList) + } + if maxRatio, exists := id.MaxLimitRequestRatio[v1.ResourceMemory]; !exists || maxRatio.Cmp(value) > 0 { + id.MaxLimitRequestRatio[v1.ResourceMemory] = *value.Copy() + } + } + if value, hasCPU := lri.Default[v1.ResourceCPU]; hasCPU { + if id.Default == nil { + id.Default = make(v1.ResourceList) + } + if _, exists := id.Default[v1.ResourceCPU]; !exists { + id.Default[v1.ResourceCPU] = *value.Copy() + } + } + if value, hasMemory := lri.Default[v1.ResourceMemory]; hasMemory { + if id.Default == nil { + id.Default = make(v1.ResourceList) + } + if _, exists := id.Default[v1.ResourceMemory]; !exists { + id.Default[v1.ResourceMemory] = *value.Copy() + } + } +} + +func (lc *limitsChecker) getLimitRangeItem(pod *v1.Pod) (ret *v1.LimitRangeItem) { + ret = nil + limitranges, err := lc.limitrangeLister. + LimitRanges(pod.GetNamespace()). + List(labels.Everything()) + + if err != nil { + return ret + } + + id := &interestingData{} + foundInterstingData := false + for _, lr := range limitranges { + for _, lri := range lr.Spec.Limits { + if lri.Type != v1.LimitTypeContainer && lri.Type != v1.LimitTypePod { + continue + } + if lri.MaxLimitRequestRatio == nil && + lri.Default == nil { + continue + } + foundInterstingData = true + id.parse(&lri) + } + } + if foundInterstingData { + ret = &v1.LimitRangeItem{ + MaxLimitRequestRatio: id.MaxLimitRequestRatio, + Default: id.Default, + } + } + + return ret +} + +func (lc *limitsChecker) NeedsLimits(pod *v1.Pod, containersResources []ContainerResources) LimitsHints { + lri := lc.getLimitRangeItem(pod) + + if lri == (*v1.LimitRangeItem)(nil) { + return LimitsHints((*LimitRangeHints)(nil)) + } + + lrh := &LimitRangeHints{ + requestsExceedsRatio: make([]map[v1.ResourceName]bool, len(containersResources)), + limitsRespectingRatio: make([]v1.ResourceList, len(containersResources)), + } + needsLimits := false + + for i, cr := range containersResources { + lrh.requestsExceedsRatio[i] = make(map[v1.ResourceName]bool) + lrh.limitsRespectingRatio[i] = make(v1.ResourceList) + for name, value := range cr.Requests { + var ctrLimit *resource.Quantity + if pod.Spec.Containers[i].Resources.Limits != nil { + if q, hasLimit := pod.Spec.Containers[i].Resources.Limits[name]; hasLimit { + ctrLimit = &q + } + } + if q, hasDefault := lri.Default[name]; hasDefault && ctrLimit == nil { + ctrLimit = &q + } + if ctrLimit == nil { + // no limits for this container, neither default will be set + continue + } + + if ratio, hasRatio := lri.MaxLimitRequestRatio[name]; hasRatio { + dl := *ctrLimit + dlv := dl.Value() + vv := value.Value() + useMilli := false + if dlv <= resource.MaxMilliValue && + vv <= resource.MaxMilliValue && + name == v1.ResourceCPU { + dlv = dl.MilliValue() + vv = value.MilliValue() + useMilli = true + } + + futureRatio := float64(dlv) / float64(vv) + maxRatio := float64(ratio.Value()) + + if futureRatio > maxRatio { + needsLimits = true + lrh.requestsExceedsRatio[i][name] = true + l := int64(float64(vv) * maxRatio) + if useMilli { + if l > resource.MaxMilliValue { + l = resource.MaxMilliValue + } + lrh.limitsRespectingRatio[i][name] = *resource.NewMilliQuantity(l, value.Format) + } else { + lrh.limitsRespectingRatio[i][name] = *resource.NewQuantity(l, value.Format) + } + } + } + } + } + + if !needsLimits { + lrh = nil + } + return LimitsHints(lrh) +} diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go new file mode 100644 index 000000000000..95298778c1a2 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go @@ -0,0 +1,236 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logic + +import ( + "fmt" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + + "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" +) + +func TestUpdateResourceLimits(t *testing.T) { + type testCase struct { + pod *apiv1.Pod + containerResources []ContainerResources + limitRanges []runtime.Object + requestsExceedsRatioCPU bool + requestsExceedsRatioMemory bool + limitsRespectingRatioCPU resource.Quantity + limitsRespectingRatioMemory resource.Quantity + } + containerName := "container1" + vpaName := "vpa1" + labels := map[string]string{"app": "testingApp"} + + minRatio := test.Resources("5", "5") + + limitranges := []runtime.Object{ + &apiv1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "limitRange-with-default-and-ratio", + }, + Spec: apiv1.LimitRangeSpec{ + Limits: []apiv1.LimitRangeItem{ + { + Type: apiv1.LimitTypeContainer, + Default: test.Resources("2000m", "2Gi"), + }, + { + Type: apiv1.LimitTypePod, + MaxLimitRequestRatio: test.Resources("10", "10"), + }, + }, + }, + }, + &apiv1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "limitRange-with-only-ratio", + }, + Spec: apiv1.LimitRangeSpec{ + Limits: []apiv1.LimitRangeItem{ + { + Type: apiv1.LimitTypePod, + MaxLimitRequestRatio: minRatio, + }, + }, + }, + }, + } + + uninitialized := test.Pod().WithName("test_uninitialized").AddContainer(test.BuildTestContainer(containerName, "", "")).Get() + uninitialized.ObjectMeta.Labels = labels + + initialized := test.Pod().WithName("test_initialized").AddContainer(test.BuildTestContainer(containerName, "1", "100Mi")).Get() + initialized.ObjectMeta.Labels = labels + + withLimits := test.Pod().WithName("test_initialized").AddContainer(test.BuildTestContainer(containerName, "1", "100Mi")).Get() + withLimits.ObjectMeta.Labels = labels + withLimits.Spec.Containers[0].Resources.Limits = test.Resources("1500m", "800Mi") + + withHugeMemLimits := test.Pod().WithName("test_initialized").AddContainer(test.BuildTestContainer(containerName, "1", "10Gi")).Get() + withHugeMemLimits.ObjectMeta.Labels = labels + withHugeMemLimits.Spec.Containers[0].Resources.Limits = test.Resources("1500m", "80Gi") + + vpaBuilder := test.VerticalPodAutoscaler(). + WithName(vpaName). + WithContainer(containerName). + WithTarget("20m", "200Mi") + vpa := vpaBuilder.Get() + + vpaWithHighMemory := vpaBuilder.WithTarget("2", "3Gi").Get() + + // short circuit recommendation provider + vpaContainersResources := []ContainerResources{{ + Requests: vpa.Status.Recommendation.ContainerRecommendations[0].Target, + }} + vpaHighMemContainersResources := []ContainerResources{{ + Requests: vpaWithHighMemory.Status.Recommendation.ContainerRecommendations[0].Target, + }} + + expectedMemory := func(crs []ContainerResources, ratio apiv1.ResourceList) resource.Quantity { + return *resource.NewQuantity( + int64(float64( + crs[0].Requests.Memory().Value())*float64(ratio.Memory().Value())), + crs[0].Requests.Memory().Format) + } + expectedCPU := func(crs []ContainerResources, ratio apiv1.ResourceList) resource.Quantity { + return *resource.NewMilliQuantity( + int64(float64( + crs[0].Requests.Cpu().MilliValue())*float64(ratio.Cpu().Value())), + crs[0].Requests.Cpu().Format) + } + + testCases := []testCase{{ + pod: uninitialized, + containerResources: vpaContainersResources, + limitRanges: limitranges, + requestsExceedsRatioCPU: true, + requestsExceedsRatioMemory: true, + limitsRespectingRatioCPU: expectedCPU(vpaContainersResources, minRatio), + limitsRespectingRatioMemory: expectedMemory(vpaContainersResources, minRatio), + }, { + pod: initialized, + containerResources: vpaContainersResources, + limitRanges: limitranges, + requestsExceedsRatioCPU: true, + requestsExceedsRatioMemory: true, + limitsRespectingRatioCPU: expectedCPU(vpaContainersResources, minRatio), + limitsRespectingRatioMemory: expectedMemory(vpaContainersResources, minRatio), + }, { + pod: withLimits, + containerResources: vpaContainersResources, + limitRanges: limitranges, + requestsExceedsRatioCPU: true, + requestsExceedsRatioMemory: false, + limitsRespectingRatioCPU: expectedCPU(vpaContainersResources, minRatio), + limitsRespectingRatioMemory: resource.Quantity{}, + }, { + pod: withHugeMemLimits, + containerResources: vpaHighMemContainersResources, + limitRanges: limitranges, + requestsExceedsRatioCPU: false, + requestsExceedsRatioMemory: true, + limitsRespectingRatioCPU: resource.Quantity{}, + limitsRespectingRatioMemory: expectedMemory(vpaHighMemContainersResources, minRatio), + }} + + // if admission controller is not allowed to adjust limits + // the limits checher have to return always: + // - no needed limits + // - RequestsExceedsRatio always return false + t.Run("test case for neverNeedsLimitsChecker", func(t *testing.T) { + nlc := NewLimitsChecker(nil) + hints := nlc.NeedsLimits(uninitialized, vpaContainersResources) + hintsPtr, _ := hints.(*LimitRangeHints) + if hintsPtr != nil { + t.Errorf("%v NeedsLimits didn't not return nil: %v", nlc, hints) + } + if !hints.IsNil() { + t.Errorf("%v NeedsLimits returned a LimitsHints not nil: %v", nlc, hints) + } + if hints.RequestsExceedsRatio(0, apiv1.ResourceMemory) != false { + t.Errorf("%v RequestsExceedsRatio didn't not return false", hints) + } + hinted := hints.HintedLimit(0, apiv1.ResourceMemory) + if !(&hinted).IsZero() { + t.Errorf("%v RequestsExceedsRatio didn't not return zero quantity", hints) + } + }) + + t.Run("test case for no Limit Range", func(t *testing.T) { + cs := fake.NewSimpleClientset() + factory := informers.NewSharedInformerFactory(cs, 0) + lc := NewLimitsChecker(factory) + hints := lc.NeedsLimits(uninitialized, vpaContainersResources) + hintsPtr, _ := hints.(*LimitRangeHints) + if hintsPtr != nil { + t.Errorf("%v NeedsLimits didn't not return nil: %v", lc, hints) + } + if !hints.IsNil() { + t.Errorf("%v NeedsLimits returned a LimitsHints not nil: %v", lc, hints) + } + if hints.RequestsExceedsRatio(0, apiv1.ResourceMemory) != false { + t.Errorf("%v RequestsExceedsRatio didn't not return false", hints) + } + hinted := hints.HintedLimit(0, apiv1.ResourceMemory) + if !(&hinted).IsZero() { + t.Errorf("%v RequestsExceedsRatio didn't not return zero quantity", hints) + } + }) + + for i, tc := range testCases { + + t.Run(fmt.Sprintf("test case number: %d", i), func(t *testing.T) { + cs := fake.NewSimpleClientset(tc.limitRanges...) + factory := informers.NewSharedInformerFactory(cs, 0) + lc := NewLimitsChecker(factory) + resources := tc.containerResources + + hints := lc.NeedsLimits(tc.pod, resources) + assert.NotNil(t, hints, fmt.Sprintf("hints is: %+v", hints)) + + if tc.requestsExceedsRatioCPU { + assert.True(t, hints.RequestsExceedsRatio(0, apiv1.ResourceCPU)) + } else { + assert.False(t, hints.RequestsExceedsRatio(0, apiv1.ResourceCPU)) + } + + if tc.requestsExceedsRatioMemory { + assert.True(t, hints.RequestsExceedsRatio(0, apiv1.ResourceMemory)) + } else { + assert.False(t, hints.RequestsExceedsRatio(0, apiv1.ResourceMemory)) + } + + hintedCPULimits := hints.HintedLimit(0, apiv1.ResourceCPU) + hintedMemoryLimits := hints.HintedLimit(0, apiv1.ResourceMemory) + assert.EqualValues(t, tc.limitsRespectingRatioCPU.Value(), hintedCPULimits.Value(), fmt.Sprintf("cpu limits doesn't match: %v != %v\n", tc.limitsRespectingRatioCPU.Value(), hintedCPULimits.Value())) + assert.EqualValues(t, tc.limitsRespectingRatioMemory.Value(), hintedMemoryLimits.Value(), fmt.Sprintf("memory limits doesn't match: %v != %v\n", tc.limitsRespectingRatioMemory.Value(), hintedMemoryLimits.Value())) + }) + + } +} diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index 885dae569147..dbd4650d1dc9 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -38,11 +38,12 @@ import ( type AdmissionServer struct { recommendationProvider RecommendationProvider podPreProcessor PodPreProcessor + limitsChecker LimitsChecker } // NewAdmissionServer constructs new AdmissionServer -func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor) *AdmissionServer { - return &AdmissionServer{recommendationProvider, podPreProcessor} +func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, limitsChecker LimitsChecker) *AdmissionServer { + return &AdmissionServer{recommendationProvider, podPreProcessor, limitsChecker} } type patchRecord struct { @@ -72,10 +73,13 @@ func (s *AdmissionServer) getPatchesForPodResourceRequest(raw []byte, namespace if annotationsPerContainer == nil { annotationsPerContainer = vpa_api_util.ContainerToAnnotationsMap{} } + + limitsHints := s.limitsChecker.NeedsLimits(&pod, containersResources) + patches := []patchRecord{} updatesAnnotation := []string{} for i, containerResources := range containersResources { - newPatches, newUpdatesAnnotation := s.getContainerPatch(pod, i, "requests", annotationsPerContainer, containerResources) + newPatches, newUpdatesAnnotation := s.getContainerPatch(pod, i, annotationsPerContainer, containerResources, limitsHints) patches = append(patches, newPatches...) updatesAnnotation = append(updatesAnnotation, newUpdatesAnnotation) } @@ -119,7 +123,7 @@ func getAddResourceRequirementValuePatch(i int, kind string, resource v1.Resourc Value: quantity.String()} } -func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, patchKind string, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources ContainerResources) ([]patchRecord, string) { +func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources ContainerResources, limitsHints LimitsHints) ([]patchRecord, string) { var patches []patchRecord // Add empty resources object if missing if pod.Spec.Containers[i].Resources.Limits == nil && @@ -132,6 +136,26 @@ func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, patchKind string, annotations = make([]string, 0) } + if !limitsHints.IsNil() { + var resources v1.ResourceList + resourceNames := []v1.ResourceName{"cpu", "memory"} + for _, resource := range resourceNames { + if limitsHints.RequestsExceedsRatio(i, resource) { + // we need just to take care of max ratio + // setting limits to request*maxRatio, + // It's needed when we are lowering requests too much + limit := limitsHints.HintedLimit(i, resource) + if resources == nil { + resources = make(v1.ResourceList) + } + resources[resource] = limit + annotations = append(annotations, fmt.Sprintf("%s limit decreased to respect ratio", resource)) + } + } + if len(resources) > 0 { + containerResources.Limits = resources + } + } patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Requests, i, containerResources.Requests, "requests", "request") patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Limits, i, containerResources.Limits, "limits", "limit") diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go index 35ed1e7ff9ce..adcf93f2f0c3 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -311,7 +311,8 @@ func TestGetPatchesForResourceRequest(t *testing.T) { t.Run(fmt.Sprintf("test case: %s", tc.name), func(t *testing.T) { fppp := fakePodPreProcessor{e: tc.preProcessorError} frp := fakeRecommendationProvider{tc.recommendResources, tc.recommendAnnotations, tc.recommendName, tc.recommendError} - s := NewAdmissionServer(&frp, &fppp) + lc := NewLimitsChecker(nil) + s := NewAdmissionServer(&frp, &fppp, lc) patches, err := s.getPatchesForPodResourceRequest(tc.podJson, tc.namespace) if tc.expectError == nil { assert.NoError(t, err) @@ -358,7 +359,8 @@ func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { }`) recommendAnnotations := vpa_api_util.ContainerToAnnotationsMap{} frp := fakeRecommendationProvider{recommendResources, recommendAnnotations, "name", nil} - s := NewAdmissionServer(&frp, &fppp) + lc := NewLimitsChecker(nil) + s := NewAdmissionServer(&frp, &fppp, lc) patches, err := s.getPatchesForPodResourceRequest(podJson, "default") assert.NoError(t, err) // Order of updates for cpu and unobtanium depends on order of iterating a map, both possible results are valid. diff --git a/vertical-pod-autoscaler/pkg/admission-controller/main.go b/vertical-pod-autoscaler/pkg/admission-controller/main.go index 61553925f7c5..ac6e063f3bb0 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/main.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/main.go @@ -48,12 +48,15 @@ var ( tlsPrivateKey: flag.String("tls-private-key", "/etc/tls-certs/serverKey.pem", "Path to server certificate key PEM file."), } - port = flag.Int("port", 8000, "The port to listen on.") - address = flag.String("address", ":8944", "The address to expose Prometheus metrics.") - namespace = os.Getenv("NAMESPACE") - webhookAddress = flag.String("webhook-address", "", "Address under which webhook is registered. Used when registerByURL is set to true.") - webhookPort = flag.String("webhook-port", "", "Server Port for Webhook") - registerByURL = flag.Bool("register-by-url", false, "If set to true, admission webhook will be registered by URL (webhookAddress:webhookPort) instead of by service name") + port = flag.Int("port", 8000, "The port to listen on.") + address = flag.String("address", ":8944", "The address to expose Prometheus metrics.") + namespace = os.Getenv("NAMESPACE") + webhookAddress = flag.String("webhook-address", "", "Address under which webhook is registered. Used when registerByURL is set to true.") + webhookPort = flag.String("webhook-port", "", "Server Port for Webhook") + registerByURL = flag.Bool("register-by-url", false, "If set to true, admission webhook will be registered by URL (webhookAddress:webhookPort) instead of by service name") + allowToAdjustLimits = flag.Bool("allow-to-adjust-limits", false, "If set to true, admission webhook will set limits per container too if needed") + + factoryForLimitsChecker interface{} ) func main() { @@ -79,7 +82,13 @@ func main() { target.NewVpaTargetSelectorFetcher(config, kubeClient, factory), target.NewBeta1TargetSelectorFetcher(config), ) - as := logic.NewAdmissionServer(logic.NewRecommendationProvider(vpaLister, vpa_api_util.NewCappingRecommendationProcessor(), targetSelectorFetcher), logic.NewDefaultPodPreProcessor()) + recommendationProvider := logic.NewRecommendationProvider(vpaLister, vpa_api_util.NewCappingRecommendationProcessor(), targetSelectorFetcher) + podPreprocessor := logic.NewDefaultPodPreProcessor() + limitsChecker := logic.NewLimitsChecker(nil) + if *allowToAdjustLimits { + limitsChecker = logic.NewLimitsChecker(factory) + } + as := logic.NewAdmissionServer(recommendationProvider, podPreprocessor, limitsChecker) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { as.Serve(w, r) healthCheck.UpdateLastActivity() From 77821a03ab524767722cc8b74767499ef41ae206 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Mon, 20 May 2019 17:29:13 +0200 Subject: [PATCH 02/25] Address comments from original PR review I'm keeping original CL from https://github.com/kubernetes/autoscaler/pull/1813 and applying changes requested in the review in a separate CL to keep autoship information clean. Conflicts because master has VPA preprocessor, resolved manually: vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go vertical-pod-autoscaler/pkg/admission-controller/main.go --- .../logic/limitrange_checker.go | 78 +++++++------ .../logic/limitrange_checker_test.go | 107 +++++++++--------- .../pkg/admission-controller/logic/server.go | 12 +- .../admission-controller/logic/server_test.go | 4 +- .../pkg/admission-controller/main.go | 10 +- 5 files changed, 111 insertions(+), 100 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go index c2996b2495cf..88bc1097a9c1 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go @@ -17,6 +17,7 @@ limitations under the License. package logic import ( + "fmt" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" @@ -27,14 +28,12 @@ import ( // LimitsHints provides hinted limits that respect limit range max ratio type LimitsHints interface { - IsNil() bool RequestsExceedsRatio(indexOfContainer int, resourceName v1.ResourceName) bool HintedLimit(indexOfContainer int, resourceName v1.ResourceName) resource.Quantity } // LimitRangeHints implements LimitsHints interface type LimitRangeHints struct { - requestsExceedsRatio []map[v1.ResourceName]bool limitsRespectingRatio []v1.ResourceList } @@ -42,26 +41,21 @@ var _ LimitsHints = &LimitRangeHints{} // LimitsChecker checks for LimitRange and if container needs limits to be set type LimitsChecker interface { - NeedsLimits(*v1.Pod, []ContainerResources) LimitsHints -} - -// IsNil return true if there is no hints to set limits -func (lrh *LimitRangeHints) IsNil() bool { - return lrh == (*LimitRangeHints)(nil) + NeedsLimits(*v1.Pod, []ContainerResources) (LimitsHints, error) } // RequestsExceedsRatio return true if limits have to be set to respect limit range max ratio func (lrh *LimitRangeHints) RequestsExceedsRatio(indexOfContainer int, resourceName v1.ResourceName) bool { - if !lrh.IsNil() { - yes, ok := lrh.requestsExceedsRatio[indexOfContainer][resourceName] - return ok && yes + if lrh != nil && indexOfContainer < len(lrh.limitsRespectingRatio) { + _, present := lrh.limitsRespectingRatio[indexOfContainer][resourceName] + return present } return false } // HintedLimit return the limit Quantity that respect the limit range max ration func (lrh *LimitRangeHints) HintedLimit(indexOfContainer int, resourceName v1.ResourceName) resource.Quantity { - if !lrh.IsNil() { + if lrh != nil && indexOfContainer < len(lrh.limitsRespectingRatio) { limit, ok := lrh.limitsRespectingRatio[indexOfContainer][resourceName] if ok { return limit @@ -75,8 +69,8 @@ type neverNeedsLimitsChecker struct{} var _ LimitsChecker = &neverNeedsLimitsChecker{} -func (lc *neverNeedsLimitsChecker) NeedsLimits(pod *v1.Pod, containersResources []ContainerResources) LimitsHints { - return LimitsHints((*LimitRangeHints)(nil)) +func (lc *neverNeedsLimitsChecker) NeedsLimits(pod *v1.Pod, containersResources []ContainerResources) (LimitsHints, error) { + return LimitsHints((*LimitRangeHints)(nil)), nil } type limitsChecker struct { @@ -85,21 +79,26 @@ type limitsChecker struct { var _ LimitsChecker = &limitsChecker{} -// NewLimitsChecker creates a LimitsChecker -func NewLimitsChecker(f informers.SharedInformerFactory) LimitsChecker { - if f != nil { - limitrangeLister := f.Core().V1().LimitRanges().Lister() - stopCh := make(chan struct{}) - f.Start(stopCh) - for _, ok := range f.WaitForCacheSync(stopCh) { - if !ok { - if ok := f.Core().V1().LimitRanges().Informer().HasSynced(); !ok { - return &neverNeedsLimitsChecker{} - } +// NewLimitsChecker returns a limitsChecker or an error it encountered when attempting to create it. +func NewLimitsChecker(f informers.SharedInformerFactory) (*limitsChecker, error) { + if f == nil { + return nil, fmt.Errorf("NewLimitsChecker requires a SharedInformerFactory but got nil") + } + limitrangeLister := f.Core().V1().LimitRanges().Lister() + stopCh := make(chan struct{}) + f.Start(stopCh) + for _, ok := range f.WaitForCacheSync(stopCh) { + if !ok { + if f.Core().V1().LimitRanges().Informer().HasSynced() { + return nil, fmt.Errorf("Informer did not sync") } } - return &limitsChecker{limitrangeLister} } + return &limitsChecker{limitrangeLister}, nil +} + +// NewNoopLimitsChecker returns a limit checker that +func NewNoopLimitsChecker() *neverNeedsLimitsChecker { return &neverNeedsLimitsChecker{} } @@ -143,14 +142,13 @@ func (id *interestingData) parse(lri *v1.LimitRangeItem) { } } -func (lc *limitsChecker) getLimitRangeItem(pod *v1.Pod) (ret *v1.LimitRangeItem) { - ret = nil +func (lc *limitsChecker) getLimitRangeItem(pod *v1.Pod) (*v1.LimitRangeItem, error) { limitranges, err := lc.limitrangeLister. LimitRanges(pod.GetNamespace()). List(labels.Everything()) if err != nil { - return ret + return nil, fmt.Errorf("error loading limit ranges: %s", err) } id := &interestingData{} @@ -164,35 +162,36 @@ func (lc *limitsChecker) getLimitRangeItem(pod *v1.Pod) (ret *v1.LimitRangeItem) lri.Default == nil { continue } + // TODO: handle multiple limit ranges matching a pod. foundInterstingData = true id.parse(&lri) } } if foundInterstingData { - ret = &v1.LimitRangeItem{ + return &v1.LimitRangeItem{ MaxLimitRequestRatio: id.MaxLimitRequestRatio, Default: id.Default, - } + }, nil } - - return ret + return nil, nil } -func (lc *limitsChecker) NeedsLimits(pod *v1.Pod, containersResources []ContainerResources) LimitsHints { - lri := lc.getLimitRangeItem(pod) +func (lc *limitsChecker) NeedsLimits(pod *v1.Pod, containersResources []ContainerResources) (LimitsHints, error) { + lri, err := lc.getLimitRangeItem(pod) + if err != nil { + return nil, fmt.Errorf("error getting limit range for pod: %s", err) + } if lri == (*v1.LimitRangeItem)(nil) { - return LimitsHints((*LimitRangeHints)(nil)) + return &LimitRangeHints{}, nil } lrh := &LimitRangeHints{ - requestsExceedsRatio: make([]map[v1.ResourceName]bool, len(containersResources)), limitsRespectingRatio: make([]v1.ResourceList, len(containersResources)), } needsLimits := false for i, cr := range containersResources { - lrh.requestsExceedsRatio[i] = make(map[v1.ResourceName]bool) lrh.limitsRespectingRatio[i] = make(v1.ResourceList) for name, value := range cr.Requests { var ctrLimit *resource.Quantity @@ -227,7 +226,6 @@ func (lc *limitsChecker) NeedsLimits(pod *v1.Pod, containersResources []Containe if futureRatio > maxRatio { needsLimits = true - lrh.requestsExceedsRatio[i][name] = true l := int64(float64(vv) * maxRatio) if useMilli { if l > resource.MaxMilliValue { @@ -245,5 +243,5 @@ func (lc *limitsChecker) NeedsLimits(pod *v1.Pod, containersResources []Containe if !needsLimits { lrh = nil } - return LimitsHints(lrh) + return LimitsHints(lrh), nil } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go index 95298778c1a2..4b1519720e69 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go @@ -164,42 +164,43 @@ func TestUpdateResourceLimits(t *testing.T) { // - no needed limits // - RequestsExceedsRatio always return false t.Run("test case for neverNeedsLimitsChecker", func(t *testing.T) { - nlc := NewLimitsChecker(nil) - hints := nlc.NeedsLimits(uninitialized, vpaContainersResources) - hintsPtr, _ := hints.(*LimitRangeHints) - if hintsPtr != nil { - t.Errorf("%v NeedsLimits didn't not return nil: %v", nlc, hints) - } - if !hints.IsNil() { - t.Errorf("%v NeedsLimits returned a LimitsHints not nil: %v", nlc, hints) - } - if hints.RequestsExceedsRatio(0, apiv1.ResourceMemory) != false { - t.Errorf("%v RequestsExceedsRatio didn't not return false", hints) - } - hinted := hints.HintedLimit(0, apiv1.ResourceMemory) - if !(&hinted).IsZero() { - t.Errorf("%v RequestsExceedsRatio didn't not return zero quantity", hints) + nlc := NewNoopLimitsChecker() + hints, err := nlc.NeedsLimits(uninitialized, vpaContainersResources) + if assert.NoError(t, err) { + hintsPtr, _ := hints.(*LimitRangeHints) + if hintsPtr != nil { + t.Errorf("%v NeedsLimits didn't not return nil: %v", nlc, hints) + } + assert.Nil(t, hints) + if hints.RequestsExceedsRatio(0, apiv1.ResourceMemory) != false { + t.Errorf("%v RequestsExceedsRatio didn't not return false", hints) + } + hinted := hints.HintedLimit(0, apiv1.ResourceMemory) + if !(&hinted).IsZero() { + t.Errorf("%v RequestsExceedsRatio didn't not return zero quantity", hints) + } } }) t.Run("test case for no Limit Range", func(t *testing.T) { cs := fake.NewSimpleClientset() factory := informers.NewSharedInformerFactory(cs, 0) - lc := NewLimitsChecker(factory) - hints := lc.NeedsLimits(uninitialized, vpaContainersResources) - hintsPtr, _ := hints.(*LimitRangeHints) - if hintsPtr != nil { - t.Errorf("%v NeedsLimits didn't not return nil: %v", lc, hints) - } - if !hints.IsNil() { - t.Errorf("%v NeedsLimits returned a LimitsHints not nil: %v", lc, hints) - } - if hints.RequestsExceedsRatio(0, apiv1.ResourceMemory) != false { - t.Errorf("%v RequestsExceedsRatio didn't not return false", hints) - } - hinted := hints.HintedLimit(0, apiv1.ResourceMemory) - if !(&hinted).IsZero() { - t.Errorf("%v RequestsExceedsRatio didn't not return zero quantity", hints) + lc, err := NewLimitsChecker(factory) + if assert.NoError(t, err) { + hints, err := lc.NeedsLimits(uninitialized, vpaContainersResources) + if assert.NoError(t, err) { + hintsPtr, _ := hints.(*LimitRangeHints) + if assert.NotNil(t, hintsPtr) { + assert.Empty(t, hintsPtr.limitsRespectingRatio) + } + if hints.RequestsExceedsRatio(0, apiv1.ResourceMemory) != false { + t.Errorf("%v RequestsExceedsRatio didn't not return false", hints) + } + hinted := hints.HintedLimit(0, apiv1.ResourceMemory) + if !(&hinted).IsZero() { + t.Errorf("%v RequestsExceedsRatio didn't not return zero quantity", hints) + } + } } }) @@ -208,28 +209,32 @@ func TestUpdateResourceLimits(t *testing.T) { t.Run(fmt.Sprintf("test case number: %d", i), func(t *testing.T) { cs := fake.NewSimpleClientset(tc.limitRanges...) factory := informers.NewSharedInformerFactory(cs, 0) - lc := NewLimitsChecker(factory) - resources := tc.containerResources - - hints := lc.NeedsLimits(tc.pod, resources) - assert.NotNil(t, hints, fmt.Sprintf("hints is: %+v", hints)) - - if tc.requestsExceedsRatioCPU { - assert.True(t, hints.RequestsExceedsRatio(0, apiv1.ResourceCPU)) - } else { - assert.False(t, hints.RequestsExceedsRatio(0, apiv1.ResourceCPU)) + lc, err := NewLimitsChecker(factory) + if assert.NoError(t, err) { + resources := tc.containerResources + + hints, err := lc.NeedsLimits(tc.pod, resources) + if assert.NoError(t, err) { + assert.NotNil(t, hints, fmt.Sprintf("hints is: %+v", hints)) + + if tc.requestsExceedsRatioCPU { + assert.True(t, hints.RequestsExceedsRatio(0, apiv1.ResourceCPU)) + } else { + assert.False(t, hints.RequestsExceedsRatio(0, apiv1.ResourceCPU)) + } + + if tc.requestsExceedsRatioMemory { + assert.True(t, hints.RequestsExceedsRatio(0, apiv1.ResourceMemory)) + } else { + assert.False(t, hints.RequestsExceedsRatio(0, apiv1.ResourceMemory)) + } + + hintedCPULimits := hints.HintedLimit(0, apiv1.ResourceCPU) + hintedMemoryLimits := hints.HintedLimit(0, apiv1.ResourceMemory) + assert.EqualValues(t, tc.limitsRespectingRatioCPU.Value(), hintedCPULimits.Value(), fmt.Sprintf("cpu limits doesn't match: %v != %v\n", tc.limitsRespectingRatioCPU.Value(), hintedCPULimits.Value())) + assert.EqualValues(t, tc.limitsRespectingRatioMemory.Value(), hintedMemoryLimits.Value(), fmt.Sprintf("memory limits doesn't match: %v != %v\n", tc.limitsRespectingRatioMemory.Value(), hintedMemoryLimits.Value())) + } } - - if tc.requestsExceedsRatioMemory { - assert.True(t, hints.RequestsExceedsRatio(0, apiv1.ResourceMemory)) - } else { - assert.False(t, hints.RequestsExceedsRatio(0, apiv1.ResourceMemory)) - } - - hintedCPULimits := hints.HintedLimit(0, apiv1.ResourceCPU) - hintedMemoryLimits := hints.HintedLimit(0, apiv1.ResourceMemory) - assert.EqualValues(t, tc.limitsRespectingRatioCPU.Value(), hintedCPULimits.Value(), fmt.Sprintf("cpu limits doesn't match: %v != %v\n", tc.limitsRespectingRatioCPU.Value(), hintedCPULimits.Value())) - assert.EqualValues(t, tc.limitsRespectingRatioMemory.Value(), hintedMemoryLimits.Value(), fmt.Sprintf("memory limits doesn't match: %v != %v\n", tc.limitsRespectingRatioMemory.Value(), hintedMemoryLimits.Value())) }) } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index dbd4650d1dc9..eba2581f99a1 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -74,7 +74,10 @@ func (s *AdmissionServer) getPatchesForPodResourceRequest(raw []byte, namespace annotationsPerContainer = vpa_api_util.ContainerToAnnotationsMap{} } - limitsHints := s.limitsChecker.NeedsLimits(&pod, containersResources) + limitsHints, err := s.limitsChecker.NeedsLimits(&pod, containersResources) + if err != nil { + return nil, err + } patches := []patchRecord{} updatesAnnotation := []string{} @@ -136,14 +139,13 @@ func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, annotationsPerCon annotations = make([]string, 0) } - if !limitsHints.IsNil() { + if limitsHints != nil { var resources v1.ResourceList resourceNames := []v1.ResourceName{"cpu", "memory"} for _, resource := range resourceNames { if limitsHints.RequestsExceedsRatio(i, resource) { - // we need just to take care of max ratio - // setting limits to request*maxRatio, - // It's needed when we are lowering requests too much + // LimitRange cannot specify min ratio: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#limitrangeitem-v1-core + // If we exceed max ratio cap limit to request*maxRatio. limit := limitsHints.HintedLimit(i, resource) if resources == nil { resources = make(v1.ResourceList) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go index adcf93f2f0c3..6d641f76adce 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -311,7 +311,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { t.Run(fmt.Sprintf("test case: %s", tc.name), func(t *testing.T) { fppp := fakePodPreProcessor{e: tc.preProcessorError} frp := fakeRecommendationProvider{tc.recommendResources, tc.recommendAnnotations, tc.recommendName, tc.recommendError} - lc := NewLimitsChecker(nil) + lc := NewNoopLimitsChecker() s := NewAdmissionServer(&frp, &fppp, lc) patches, err := s.getPatchesForPodResourceRequest(tc.podJson, tc.namespace) if tc.expectError == nil { @@ -359,7 +359,7 @@ func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { }`) recommendAnnotations := vpa_api_util.ContainerToAnnotationsMap{} frp := fakeRecommendationProvider{recommendResources, recommendAnnotations, "name", nil} - lc := NewLimitsChecker(nil) + lc := NewNoopLimitsChecker() s := NewAdmissionServer(&frp, &fppp, lc) patches, err := s.getPatchesForPodResourceRequest(podJson, "default") assert.NoError(t, err) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/main.go b/vertical-pod-autoscaler/pkg/admission-controller/main.go index ac6e063f3bb0..6afb57e0d825 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/main.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/main.go @@ -84,9 +84,15 @@ func main() { ) recommendationProvider := logic.NewRecommendationProvider(vpaLister, vpa_api_util.NewCappingRecommendationProcessor(), targetSelectorFetcher) podPreprocessor := logic.NewDefaultPodPreProcessor() - limitsChecker := logic.NewLimitsChecker(nil) + var limitsChecker logic.LimitsChecker if *allowToAdjustLimits { - limitsChecker = logic.NewLimitsChecker(factory) + limitsChecker, err = logic.NewLimitsChecker(factory) + if err != nil { + klog.Errorf("Failed to create limitsChecker, falling back to not checking limits. Error message: %s", err) + limitsChecker = logic.NewNoopLimitsChecker() + } + } else { + limitsChecker = logic.NewNoopLimitsChecker() } as := logic.NewAdmissionServer(recommendationProvider, podPreprocessor, limitsChecker) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { From de5d67fb0de80c45fa4e4b284d49c4bc1de05b0e Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Tue, 21 May 2019 17:43:32 +0200 Subject: [PATCH 03/25] Change logic for supporting limits. Keep limit/request user set (even if the limit comes from limit range). Cap limits to Max configured in the limit range. Conflicts (VPA preprocessor): vertical-pod-autoscaler/pkg/admission-controller/logic/server.go vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go vertical-pod-autoscaler/pkg/admission-controller/main.go --- .../logic/limitrange_checker.go | 214 ++------------ .../logic/limitrange_checker_test.go | 271 +++++------------- .../logic/recommendation_provider.go | 102 +++++-- .../logic/recommendation_provider_test.go | 91 +++++- .../pkg/admission-controller/logic/server.go | 32 +-- .../admission-controller/logic/server_test.go | 4 +- .../pkg/admission-controller/main.go | 13 +- .../pkg/utils/test/test_limit_range.go | 92 ++++++ 8 files changed, 361 insertions(+), 458 deletions(-) create mode 100644 vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go index 88bc1097a9c1..0eff99d82228 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go @@ -19,229 +19,67 @@ package logic import ( "fmt" "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/informers" v1_listers "k8s.io/client-go/listers/core/v1" ) -// LimitsHints provides hinted limits that respect limit range max ratio -type LimitsHints interface { - RequestsExceedsRatio(indexOfContainer int, resourceName v1.ResourceName) bool - HintedLimit(indexOfContainer int, resourceName v1.ResourceName) resource.Quantity +// LimitsRangeCalculator checks for LimitRange and if container needs limits to be set +type LimitsRangeCalculator interface { + // GetContainerLimitRangeItem returns LimitRangeItem that describes limitation on container limits in the given namespace. + GetContainerLimitRangeItem(namespace string) (*v1.LimitRangeItem, error) } -// LimitRangeHints implements LimitsHints interface -type LimitRangeHints struct { - limitsRespectingRatio []v1.ResourceList -} - -var _ LimitsHints = &LimitRangeHints{} - -// LimitsChecker checks for LimitRange and if container needs limits to be set -type LimitsChecker interface { - NeedsLimits(*v1.Pod, []ContainerResources) (LimitsHints, error) -} - -// RequestsExceedsRatio return true if limits have to be set to respect limit range max ratio -func (lrh *LimitRangeHints) RequestsExceedsRatio(indexOfContainer int, resourceName v1.ResourceName) bool { - if lrh != nil && indexOfContainer < len(lrh.limitsRespectingRatio) { - _, present := lrh.limitsRespectingRatio[indexOfContainer][resourceName] - return present - } - return false -} +type noopLimitsRangeCalculator struct{} -// HintedLimit return the limit Quantity that respect the limit range max ration -func (lrh *LimitRangeHints) HintedLimit(indexOfContainer int, resourceName v1.ResourceName) resource.Quantity { - if lrh != nil && indexOfContainer < len(lrh.limitsRespectingRatio) { - limit, ok := lrh.limitsRespectingRatio[indexOfContainer][resourceName] - if ok { - return limit - } - return resource.Quantity{} - } - return resource.Quantity{} -} - -type neverNeedsLimitsChecker struct{} - -var _ LimitsChecker = &neverNeedsLimitsChecker{} - -func (lc *neverNeedsLimitsChecker) NeedsLimits(pod *v1.Pod, containersResources []ContainerResources) (LimitsHints, error) { - return LimitsHints((*LimitRangeHints)(nil)), nil +func (lc *noopLimitsRangeCalculator) GetContainerLimitRangeItem(namespace string) (*v1.LimitRangeItem, error) { + return nil, nil } type limitsChecker struct { - limitrangeLister v1_listers.LimitRangeLister + limitRangeLister v1_listers.LimitRangeLister } -var _ LimitsChecker = &limitsChecker{} - -// NewLimitsChecker returns a limitsChecker or an error it encountered when attempting to create it. -func NewLimitsChecker(f informers.SharedInformerFactory) (*limitsChecker, error) { +// NewLimitsRangeCalculator returns a limitsChecker or an error it encountered when attempting to create it. +func NewLimitsRangeCalculator(f informers.SharedInformerFactory) (*limitsChecker, error) { if f == nil { - return nil, fmt.Errorf("NewLimitsChecker requires a SharedInformerFactory but got nil") + return nil, fmt.Errorf("NewLimitsRangeCalculator requires a SharedInformerFactory but got nil") } - limitrangeLister := f.Core().V1().LimitRanges().Lister() + limitRangeLister := f.Core().V1().LimitRanges().Lister() stopCh := make(chan struct{}) f.Start(stopCh) for _, ok := range f.WaitForCacheSync(stopCh) { if !ok { if f.Core().V1().LimitRanges().Informer().HasSynced() { - return nil, fmt.Errorf("Informer did not sync") + return nil, fmt.Errorf("informer did not sync") } } } - return &limitsChecker{limitrangeLister}, nil -} - -// NewNoopLimitsChecker returns a limit checker that -func NewNoopLimitsChecker() *neverNeedsLimitsChecker { - return &neverNeedsLimitsChecker{} + return &limitsChecker{limitRangeLister}, nil } -type interestingData struct { - MaxLimitRequestRatio v1.ResourceList - Default v1.ResourceList -} - -func (id *interestingData) parse(lri *v1.LimitRangeItem) { - if value, hasCPU := lri.MaxLimitRequestRatio[v1.ResourceCPU]; hasCPU { - if id.MaxLimitRequestRatio == nil { - id.MaxLimitRequestRatio = make(v1.ResourceList) - } - if maxRatio, exists := id.MaxLimitRequestRatio[v1.ResourceCPU]; !exists || maxRatio.Cmp(value) > 0 { - id.MaxLimitRequestRatio[v1.ResourceCPU] = *value.Copy() - } - } - if value, hasMemory := lri.MaxLimitRequestRatio[v1.ResourceMemory]; hasMemory { - if id.MaxLimitRequestRatio == nil { - id.MaxLimitRequestRatio = make(v1.ResourceList) - } - if maxRatio, exists := id.MaxLimitRequestRatio[v1.ResourceMemory]; !exists || maxRatio.Cmp(value) > 0 { - id.MaxLimitRequestRatio[v1.ResourceMemory] = *value.Copy() - } - } - if value, hasCPU := lri.Default[v1.ResourceCPU]; hasCPU { - if id.Default == nil { - id.Default = make(v1.ResourceList) - } - if _, exists := id.Default[v1.ResourceCPU]; !exists { - id.Default[v1.ResourceCPU] = *value.Copy() - } - } - if value, hasMemory := lri.Default[v1.ResourceMemory]; hasMemory { - if id.Default == nil { - id.Default = make(v1.ResourceList) - } - if _, exists := id.Default[v1.ResourceMemory]; !exists { - id.Default[v1.ResourceMemory] = *value.Copy() - } - } +// NewNoopLimitsCalculator returns a limit calculator that instantly returns no limits. +func NewNoopLimitsCalculator() *noopLimitsRangeCalculator { + return &noopLimitsRangeCalculator{} } -func (lc *limitsChecker) getLimitRangeItem(pod *v1.Pod) (*v1.LimitRangeItem, error) { - limitranges, err := lc.limitrangeLister. - LimitRanges(pod.GetNamespace()). - List(labels.Everything()) - +func (lc *limitsChecker) GetContainerLimitRangeItem(namespace string) (*v1.LimitRangeItem, error) { + limitRanges, err := lc.limitRangeLister.LimitRanges(namespace).List(labels.Everything()) if err != nil { return nil, fmt.Errorf("error loading limit ranges: %s", err) } - id := &interestingData{} - foundInterstingData := false - for _, lr := range limitranges { + for _, lr := range limitRanges { for _, lri := range lr.Spec.Limits { - if lri.Type != v1.LimitTypeContainer && lri.Type != v1.LimitTypePod { - continue - } - if lri.MaxLimitRequestRatio == nil && - lri.Default == nil { - continue + if lri.Type == v1.LimitTypeContainer && (lri.Max != nil || lri.Default != nil) { + // TODO: handle multiple limit ranges matching a pod. + return &v1.LimitRangeItem{ + Max: lri.Max.DeepCopy(), + Default: lri.Default.DeepCopy(), + }, nil } - // TODO: handle multiple limit ranges matching a pod. - foundInterstingData = true - id.parse(&lri) } } - if foundInterstingData { - return &v1.LimitRangeItem{ - MaxLimitRequestRatio: id.MaxLimitRequestRatio, - Default: id.Default, - }, nil - } return nil, nil } - -func (lc *limitsChecker) NeedsLimits(pod *v1.Pod, containersResources []ContainerResources) (LimitsHints, error) { - lri, err := lc.getLimitRangeItem(pod) - if err != nil { - return nil, fmt.Errorf("error getting limit range for pod: %s", err) - } - - if lri == (*v1.LimitRangeItem)(nil) { - return &LimitRangeHints{}, nil - } - - lrh := &LimitRangeHints{ - limitsRespectingRatio: make([]v1.ResourceList, len(containersResources)), - } - needsLimits := false - - for i, cr := range containersResources { - lrh.limitsRespectingRatio[i] = make(v1.ResourceList) - for name, value := range cr.Requests { - var ctrLimit *resource.Quantity - if pod.Spec.Containers[i].Resources.Limits != nil { - if q, hasLimit := pod.Spec.Containers[i].Resources.Limits[name]; hasLimit { - ctrLimit = &q - } - } - if q, hasDefault := lri.Default[name]; hasDefault && ctrLimit == nil { - ctrLimit = &q - } - if ctrLimit == nil { - // no limits for this container, neither default will be set - continue - } - - if ratio, hasRatio := lri.MaxLimitRequestRatio[name]; hasRatio { - dl := *ctrLimit - dlv := dl.Value() - vv := value.Value() - useMilli := false - if dlv <= resource.MaxMilliValue && - vv <= resource.MaxMilliValue && - name == v1.ResourceCPU { - dlv = dl.MilliValue() - vv = value.MilliValue() - useMilli = true - } - - futureRatio := float64(dlv) / float64(vv) - maxRatio := float64(ratio.Value()) - - if futureRatio > maxRatio { - needsLimits = true - l := int64(float64(vv) * maxRatio) - if useMilli { - if l > resource.MaxMilliValue { - l = resource.MaxMilliValue - } - lrh.limitsRespectingRatio[i][name] = *resource.NewMilliQuantity(l, value.Format) - } else { - lrh.limitsRespectingRatio[i][name] = *resource.NewQuantity(l, value.Format) - } - } - } - } - } - - if !needsLimits { - lrh = nil - } - return LimitsHints(lrh), nil -} diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go index 4b1519720e69..91c464955d52 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go @@ -17,223 +17,106 @@ limitations under the License. package logic import ( - "fmt" - "testing" - + "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/informers" + + //"fmt" + "testing" + + //"k8s.io/apimachinery/pkg/runtime" + //"k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" - "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + //"k8s.io/apimachinery/pkg/api/resource" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" ) -func TestUpdateResourceLimits(t *testing.T) { - type testCase struct { - pod *apiv1.Pod - containerResources []ContainerResources - limitRanges []runtime.Object - requestsExceedsRatioCPU bool - requestsExceedsRatioMemory bool - limitsRespectingRatioCPU resource.Quantity - limitsRespectingRatioMemory resource.Quantity - } - containerName := "container1" - vpaName := "vpa1" +func getPod() *apiv1.Pod { labels := map[string]string{"app": "testingApp"} + return test.Pod().WithName("test_uninitialized").AddContainer(test.BuildTestContainer("container1", "", "")).WithLabels(labels).Get() +} - minRatio := test.Resources("5", "5") +func TestNewNoopLimitsChecker(t *testing.T) { + nlc := NewNoopLimitsCalculator() + limitRange, err := nlc.GetContainerLimitRangeItem(getPod().Namespace) + if assert.NoError(t, err) { + assert.Nil(t, limitRange) + } +} + +func TestNoLimitRange(t *testing.T) { + cs := fake.NewSimpleClientset() + factory := informers.NewSharedInformerFactory(cs, 0) + lc, err := NewLimitsRangeCalculator(factory) + + if assert.NoError(t, err) { + limitRange, err := lc.GetContainerLimitRangeItem(getPod().Namespace) + if assert.NoError(t, err) { + assert.Nil(t, limitRange) + } + } +} + +func TestUpdateResourceLimits(t *testing.T) { - limitranges := []runtime.Object{ - &apiv1.LimitRange{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "limitRange-with-default-and-ratio", + testCases := []struct { + name string + pod *apiv1.Pod + limitRanges []runtime.Object + expectErr error + expectLimits *apiv1.LimitRangeItem + }{ + { + name: "no matching limit ranges", + pod: getPod(), + limitRanges: []runtime.Object{ + test.LimitRange().WithName("different-namespace").WithNamespace("different").WithType(apiv1.LimitTypePod).WithMax(test.Resources("2", "2")).Get(), + test.LimitRange().WithName("differen-type").WithNamespace("default").WithType(apiv1.LimitTypePersistentVolumeClaim).WithMax(test.Resources("2", "2")).Get(), }, - Spec: apiv1.LimitRangeSpec{ - Limits: []apiv1.LimitRangeItem{ - { - Type: apiv1.LimitTypeContainer, - Default: test.Resources("2000m", "2Gi"), - }, - { - Type: apiv1.LimitTypePod, - MaxLimitRequestRatio: test.Resources("10", "10"), - }, - }, + expectErr: nil, + expectLimits: nil, + }, + { + name: "matching container limit range", + pod: getPod(), + limitRanges: []runtime.Object{ + test.LimitRange().WithName("default").WithNamespace("default").WithType(apiv1.LimitTypeContainer).WithMax(test.Resources("2", "2")).Get(), + }, + expectErr: nil, + expectLimits: &apiv1.LimitRangeItem{ + Max: test.Resources("2", "2"), }, }, - &apiv1.LimitRange{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "default", - Name: "limitRange-with-only-ratio", + { + name: "with default value", + pod: getPod(), + limitRanges: []runtime.Object{ + test.LimitRange().WithName("default").WithNamespace("default").WithType(apiv1.LimitTypeContainer).WithDefault(test.Resources("2", "2")).Get(), }, - Spec: apiv1.LimitRangeSpec{ - Limits: []apiv1.LimitRangeItem{ - { - Type: apiv1.LimitTypePod, - MaxLimitRequestRatio: minRatio, - }, - }, + expectErr: nil, + expectLimits: &apiv1.LimitRangeItem{ + Default: test.Resources("2", "2"), }, }, } - uninitialized := test.Pod().WithName("test_uninitialized").AddContainer(test.BuildTestContainer(containerName, "", "")).Get() - uninitialized.ObjectMeta.Labels = labels - - initialized := test.Pod().WithName("test_initialized").AddContainer(test.BuildTestContainer(containerName, "1", "100Mi")).Get() - initialized.ObjectMeta.Labels = labels - - withLimits := test.Pod().WithName("test_initialized").AddContainer(test.BuildTestContainer(containerName, "1", "100Mi")).Get() - withLimits.ObjectMeta.Labels = labels - withLimits.Spec.Containers[0].Resources.Limits = test.Resources("1500m", "800Mi") - - withHugeMemLimits := test.Pod().WithName("test_initialized").AddContainer(test.BuildTestContainer(containerName, "1", "10Gi")).Get() - withHugeMemLimits.ObjectMeta.Labels = labels - withHugeMemLimits.Spec.Containers[0].Resources.Limits = test.Resources("1500m", "80Gi") - - vpaBuilder := test.VerticalPodAutoscaler(). - WithName(vpaName). - WithContainer(containerName). - WithTarget("20m", "200Mi") - vpa := vpaBuilder.Get() - - vpaWithHighMemory := vpaBuilder.WithTarget("2", "3Gi").Get() - - // short circuit recommendation provider - vpaContainersResources := []ContainerResources{{ - Requests: vpa.Status.Recommendation.ContainerRecommendations[0].Target, - }} - vpaHighMemContainersResources := []ContainerResources{{ - Requests: vpaWithHighMemory.Status.Recommendation.ContainerRecommendations[0].Target, - }} - - expectedMemory := func(crs []ContainerResources, ratio apiv1.ResourceList) resource.Quantity { - return *resource.NewQuantity( - int64(float64( - crs[0].Requests.Memory().Value())*float64(ratio.Memory().Value())), - crs[0].Requests.Memory().Format) - } - expectedCPU := func(crs []ContainerResources, ratio apiv1.ResourceList) resource.Quantity { - return *resource.NewMilliQuantity( - int64(float64( - crs[0].Requests.Cpu().MilliValue())*float64(ratio.Cpu().Value())), - crs[0].Requests.Cpu().Format) - } - - testCases := []testCase{{ - pod: uninitialized, - containerResources: vpaContainersResources, - limitRanges: limitranges, - requestsExceedsRatioCPU: true, - requestsExceedsRatioMemory: true, - limitsRespectingRatioCPU: expectedCPU(vpaContainersResources, minRatio), - limitsRespectingRatioMemory: expectedMemory(vpaContainersResources, minRatio), - }, { - pod: initialized, - containerResources: vpaContainersResources, - limitRanges: limitranges, - requestsExceedsRatioCPU: true, - requestsExceedsRatioMemory: true, - limitsRespectingRatioCPU: expectedCPU(vpaContainersResources, minRatio), - limitsRespectingRatioMemory: expectedMemory(vpaContainersResources, minRatio), - }, { - pod: withLimits, - containerResources: vpaContainersResources, - limitRanges: limitranges, - requestsExceedsRatioCPU: true, - requestsExceedsRatioMemory: false, - limitsRespectingRatioCPU: expectedCPU(vpaContainersResources, minRatio), - limitsRespectingRatioMemory: resource.Quantity{}, - }, { - pod: withHugeMemLimits, - containerResources: vpaHighMemContainersResources, - limitRanges: limitranges, - requestsExceedsRatioCPU: false, - requestsExceedsRatioMemory: true, - limitsRespectingRatioCPU: resource.Quantity{}, - limitsRespectingRatioMemory: expectedMemory(vpaHighMemContainersResources, minRatio), - }} - - // if admission controller is not allowed to adjust limits - // the limits checher have to return always: - // - no needed limits - // - RequestsExceedsRatio always return false - t.Run("test case for neverNeedsLimitsChecker", func(t *testing.T) { - nlc := NewNoopLimitsChecker() - hints, err := nlc.NeedsLimits(uninitialized, vpaContainersResources) - if assert.NoError(t, err) { - hintsPtr, _ := hints.(*LimitRangeHints) - if hintsPtr != nil { - t.Errorf("%v NeedsLimits didn't not return nil: %v", nlc, hints) - } - assert.Nil(t, hints) - if hints.RequestsExceedsRatio(0, apiv1.ResourceMemory) != false { - t.Errorf("%v RequestsExceedsRatio didn't not return false", hints) - } - hinted := hints.HintedLimit(0, apiv1.ResourceMemory) - if !(&hinted).IsZero() { - t.Errorf("%v RequestsExceedsRatio didn't not return zero quantity", hints) - } - } - }) - - t.Run("test case for no Limit Range", func(t *testing.T) { - cs := fake.NewSimpleClientset() - factory := informers.NewSharedInformerFactory(cs, 0) - lc, err := NewLimitsChecker(factory) - if assert.NoError(t, err) { - hints, err := lc.NeedsLimits(uninitialized, vpaContainersResources) - if assert.NoError(t, err) { - hintsPtr, _ := hints.(*LimitRangeHints) - if assert.NotNil(t, hintsPtr) { - assert.Empty(t, hintsPtr.limitsRespectingRatio) - } - if hints.RequestsExceedsRatio(0, apiv1.ResourceMemory) != false { - t.Errorf("%v RequestsExceedsRatio didn't not return false", hints) - } - hinted := hints.HintedLimit(0, apiv1.ResourceMemory) - if !(&hinted).IsZero() { - t.Errorf("%v RequestsExceedsRatio didn't not return zero quantity", hints) - } - } - } - }) - - for i, tc := range testCases { - - t.Run(fmt.Sprintf("test case number: %d", i), func(t *testing.T) { + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { cs := fake.NewSimpleClientset(tc.limitRanges...) factory := informers.NewSharedInformerFactory(cs, 0) - lc, err := NewLimitsChecker(factory) + lc, err := NewLimitsRangeCalculator(factory) if assert.NoError(t, err) { - resources := tc.containerResources - - hints, err := lc.NeedsLimits(tc.pod, resources) - if assert.NoError(t, err) { - assert.NotNil(t, hints, fmt.Sprintf("hints is: %+v", hints)) - - if tc.requestsExceedsRatioCPU { - assert.True(t, hints.RequestsExceedsRatio(0, apiv1.ResourceCPU)) - } else { - assert.False(t, hints.RequestsExceedsRatio(0, apiv1.ResourceCPU)) - } - - if tc.requestsExceedsRatioMemory { - assert.True(t, hints.RequestsExceedsRatio(0, apiv1.ResourceMemory)) - } else { - assert.False(t, hints.RequestsExceedsRatio(0, apiv1.ResourceMemory)) - } - - hintedCPULimits := hints.HintedLimit(0, apiv1.ResourceCPU) - hintedMemoryLimits := hints.HintedLimit(0, apiv1.ResourceMemory) - assert.EqualValues(t, tc.limitsRespectingRatioCPU.Value(), hintedCPULimits.Value(), fmt.Sprintf("cpu limits doesn't match: %v != %v\n", tc.limitsRespectingRatioCPU.Value(), hintedCPULimits.Value())) - assert.EqualValues(t, tc.limitsRespectingRatioMemory.Value(), hintedMemoryLimits.Value(), fmt.Sprintf("memory limits doesn't match: %v != %v\n", tc.limitsRespectingRatioMemory.Value(), hintedMemoryLimits.Value())) + labels := map[string]string{"app": "testingApp"} + pod := test.Pod().WithName("test_uninitialized").AddContainer(test.BuildTestContainer("container1", "", "")).WithLabels(labels).Get() + limitRange, err := lc.GetContainerLimitRangeItem(pod.Namespace) + if tc.expectErr == nil { + assert.NoError(t, err) + } else { + assert.Error(t, err) } + assert.Equal(t, tc.expectLimits, limitRange) } }) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go index ec1d490a96d8..3c0c4b24dc08 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go @@ -50,21 +50,42 @@ type RecommendationProvider interface { } type recommendationProvider struct { - vpaLister vpa_lister.VerticalPodAutoscalerLister + limitsRangeCalculator LimitsRangeCalculator recommendationProcessor vpa_api_util.RecommendationProcessor selectorFetcher target.VpaTargetSelectorFetcher + vpaLister vpa_lister.VerticalPodAutoscalerLister } // NewRecommendationProvider constructs the recommendation provider that list VPAs and can be used to determine recommendations for pods. -func NewRecommendationProvider(vpaLister vpa_lister.VerticalPodAutoscalerLister, recommendationProcessor vpa_api_util.RecommendationProcessor, selectorFetcher target.VpaTargetSelectorFetcher) *recommendationProvider { +func NewRecommendationProvider(calculator LimitsRangeCalculator, recommendationProcessor vpa_api_util.RecommendationProcessor, + selectorFetcher target.VpaTargetSelectorFetcher, vpaLister vpa_lister.VerticalPodAutoscalerLister) *recommendationProvider { return &recommendationProvider{ - vpaLister: vpaLister, + limitsRangeCalculator: calculator, recommendationProcessor: recommendationProcessor, selectorFetcher: selectorFetcher, + vpaLister: vpaLister, } } -func getProportionalLimit(originalLimit, originalRequest, recommendedRequest *resource.Quantity) (limit *resource.Quantity, capped bool) { +// scaleQuantityProportionally returns value which has the same proportion to scaledQuantity as scaleResult has to scaleBase +// It also returns a bool indicating if it had to cap result to MaxInt64 milliunits. +func scaleQuantityProportionally(scaledQuantity, scaleBase, scaleResult *resource.Quantity) (*resource.Quantity, bool) { + originalMilli := big.NewInt(scaledQuantity.MilliValue()) + scaleBaseMilli := big.NewInt(scaleBase.MilliValue()) + scaleResultMilli := big.NewInt(scaleResult.MilliValue()) + var scaledOriginal big.Int + scaledOriginal.Mul(originalMilli, scaleResultMilli) + scaledOriginal.Div(&scaledOriginal, scaleBaseMilli) + if scaledOriginal.IsInt64() { + return resource.NewMilliQuantity(scaledOriginal.Int64(), scaledQuantity.Format), false + } + return resource.NewMilliQuantity(math.MaxInt64, scaledQuantity.Format), true +} + +func getProportionalLimit(originalLimit, originalRequest, recommendedRequest, defaultLimit *resource.Quantity) (limit *resource.Quantity, capped bool) { + if originalLimit == nil || originalLimit.Value() == 0 && defaultLimit != nil { + originalLimit = defaultLimit + } // originalLimit not set, don't set limit. if originalLimit == nil || originalLimit.Value() == 0 { return nil, false @@ -82,53 +103,75 @@ func getProportionalLimit(originalLimit, originalRequest, recommendedRequest *re } // Input and output milli values should fit in int64 but intermediate values might be bigger. - originalMilliRequest := big.NewInt(originalRequest.MilliValue()) - originalMilliLimit := big.NewInt(originalLimit.MilliValue()) - recommendedMilliRequest := big.NewInt(recommendedRequest.MilliValue()) - var recommendedMilliLimit big.Int - recommendedMilliLimit.Mul(recommendedMilliRequest, originalMilliLimit) - recommendedMilliLimit.Div(&recommendedMilliLimit, originalMilliRequest) - if recommendedMilliLimit.IsInt64() { - return resource.NewMilliQuantity(recommendedMilliLimit.Int64(), recommendedRequest.Format), false - } - return resource.NewMilliQuantity(math.MaxInt64, recommendedRequest.Format), true + return scaleQuantityProportionally( /*scaledQuantity=*/ originalLimit /*scaleBase=*/, originalRequest /*scaleResult=*/, recommendedRequest) +} + +func proportionallyCapLimitToMax(recommendedRequest, recommendedLimit, maxLimit *resource.Quantity) (request, limit *resource.Quantity) { + if recommendedLimit == nil || maxLimit == nil || maxLimit.IsZero() { + return recommendedRequest, recommendedLimit + } + if recommendedLimit.Cmp(*maxLimit) <= 0 { + return recommendedRequest, recommendedLimit + } + scaledRequest, _ := scaleQuantityProportionally(recommendedRequest, recommendedLimit, maxLimit) + return scaledRequest, maxLimit +} + +func proportionallyCapLimitsToMax(recommendedRequests v1.ResourceList, cpuLimit, memLimit, maxCpuLimit, maxMemLimit *resource.Quantity) ContainerResources { + scaledCpuRequest, scaledCpuLimit := proportionallyCapLimitToMax(recommendedRequests.Cpu(), cpuLimit, maxCpuLimit) + scaledMemRequest, scaledMemLimit := proportionallyCapLimitToMax(recommendedRequests.Memory(), memLimit, maxMemLimit) + result := newContainerResources() + if scaledCpuRequest != nil { + result.Requests[v1.ResourceCPU] = *scaledCpuRequest + } + if scaledCpuLimit != nil { + result.Limits[v1.ResourceCPU] = *scaledCpuLimit + } + if scaledMemRequest != nil { + result.Requests[v1.ResourceMemory] = *scaledMemRequest + } + if scaledMemLimit != nil { + result.Limits[v1.ResourceMemory] = *scaledMemLimit + } + return result } // GetContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec. -func GetContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources, annotations vpa_api_util.ContainerToAnnotationsMap) []ContainerResources { +func GetContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources, limitRange *v1.LimitRangeItem, + annotations vpa_api_util.ContainerToAnnotationsMap) []ContainerResources { resources := make([]ContainerResources, len(pod.Spec.Containers)) for i, container := range pod.Spec.Containers { - resources[i] = newContainerResources() recommendation := vpa_api_util.GetRecommendationForContainer(container.Name, &podRecommendation) if recommendation == nil { klog.V(2).Infof("no matching recommendation found for container %s", container.Name) continue } - resources[i].Requests = recommendation.Target - cpuLimit, capped := getProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), resources[i].Requests.Cpu()) - if cpuLimit != nil { - resources[i].Limits[v1.ResourceCPU] = *cpuLimit + var defaultCpu, defaultMem, maxCpuLimit, maxMemLimit *resource.Quantity + if limitRange != nil { + defaultCpu = limitRange.Default.Cpu() + defaultMem = limitRange.Default.Memory() + maxCpuLimit = limitRange.Max.Cpu() + maxMemLimit = limitRange.Max.Memory() } + cpuLimit, capped := getProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), recommendation.Target.Cpu(), defaultCpu) if capped { annotations[container.Name] = append( annotations[container.Name], fmt.Sprintf( "Failed to keep CPU limit to request proportion of %d to %d with recommended request of %d milliCPU; doesn't fit in int64. Capping limit to MaxInt64", - container.Resources.Limits.Cpu().MilliValue(), container.Resources.Requests.Cpu().MilliValue(), resources[i].Requests.Cpu().MilliValue())) - } - memLimit, capped := getProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), resources[i].Requests.Memory()) - if memLimit != nil { - resources[i].Limits[v1.ResourceMemory] = *memLimit + container.Resources.Limits.Cpu().MilliValue(), container.Resources.Requests.Cpu().MilliValue(), recommendation.Target.Cpu().MilliValue())) } + memLimit, capped := getProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), recommendation.Target.Memory(), defaultMem) if capped { annotations[container.Name] = append( annotations[container.Name], fmt.Sprintf( "Failed to keep memory limit to request proportion of %d to %d with recommended request of %d milliBytes; doesn't fit in int64. Capping limit to MaxInt64", - container.Resources.Limits.Memory().MilliValue(), container.Resources.Requests.Memory().MilliValue(), resources[i].Requests.Memory().MilliValue())) + container.Resources.Limits.Memory().MilliValue(), container.Resources.Requests.Memory().MilliValue(), recommendation.Target.Memory().MilliValue())) } + resources[i] = proportionallyCapLimitsToMax(recommendation.Target, cpuLimit, memLimit, maxCpuLimit, maxMemLimit) } return resources } @@ -183,6 +226,11 @@ func (p *recommendationProvider) GetContainersResourcesForPod(pod *v1.Pod) ([]Co return nil, annotations, vpaConfig.Name, err } } - containerResources := GetContainersResources(pod, *recommendedPodResources, annotations) + podLimitRange, err := p.limitsRangeCalculator.GetContainerLimitRangeItem(pod.Namespace) + // TODO: Support limit range on pod level. + if err != nil { + return nil, nil, "", fmt.Errorf("error getting podLimitRange: %s", err) + } + containerResources := GetContainersResources(pod, *recommendedPodResources, podLimitRange, annotations) return containerResources, annotations, vpaConfig.Name, nil } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index fa0555d867a8..1624ef118e18 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -30,7 +30,7 @@ import ( vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" target_mock "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/mock" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" - api "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" ) @@ -45,6 +45,15 @@ func mustParseResourcePointer(val string) *resource.Quantity { return &q } +type fakeLimitRangeCalculator struct { + limitRange *apiv1.LimitRangeItem + err error +} + +func (nlrc *fakeLimitRangeCalculator) GetContainerLimitRangeItem(namespace string) (*apiv1.LimitRangeItem, error) { + return nlrc.limitRange, nlrc.err +} + func TestUpdateResourceRequests(t *testing.T) { containerName := "container1" vpaName := "vpa1" @@ -62,7 +71,7 @@ func TestUpdateResourceRequests(t *testing.T) { WithLabels(labels).Get() initializedContainer := test.Container().WithName(containerName). - WithCPURequest(resource.MustParse("1")).WithMemRequest(resource.MustParse("100Mi")).Get() + WithCPURequest(resource.MustParse("1")).WithCPURequest(resource.MustParse("2")).WithMemRequest(resource.MustParse("100Mi")).Get() initialized := test.Pod().WithName("test_initialized"). AddContainer(initializedContainer).WithLabels(labels).Get() @@ -102,16 +111,19 @@ func TestUpdateResourceRequests(t *testing.T) { vpaWithNilRecommendation.Status.Recommendation = nil testCases := []struct { - name string - pod *apiv1.Pod - vpas []*vpa_types.VerticalPodAutoscaler - expectedAction bool - expectedMem resource.Quantity - expectedCPU resource.Quantity - expectedCPULimit *resource.Quantity - expectedMemLimit *resource.Quantity - annotations vpa_api_util.ContainerToAnnotationsMap - labelSelector string + name string + pod *apiv1.Pod + vpas []*vpa_types.VerticalPodAutoscaler + expectedAction bool + expectedError error + expectedMem resource.Quantity + expectedCPU resource.Quantity + expectedCPULimit *resource.Quantity + expectedMemLimit *resource.Quantity + limitRange *apiv1.LimitRangeItem + limitRangeCalcErr error + annotations vpa_api_util.ContainerToAnnotationsMap + labelSelector string }{ { name: "uninitialized pod", @@ -254,7 +266,52 @@ func TestUpdateResourceRequests(t *testing.T) { }, }, }, + { + name: "limit range calculation error", + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + limitRangeCalcErr: fmt.Errorf("oh no"), + expectedAction: false, + expectedError: fmt.Errorf("error getting podLimitRange: oh no"), + }, + { + name: "proportional limit from default", + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedCPU: resource.MustParse("2"), + expectedMem: resource.MustParse("200Mi"), + expectedCPULimit: mustParseResourcePointer("2"), + expectedMemLimit: mustParseResourcePointer("200Mi"), + labelSelector: "app = testingApp", + limitRange: &apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Default: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("2"), + apiv1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + }, + { + name: "cap limits to max", + pod: limitsMatchRequestsPod, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedCPU: resource.MustParse("1.5"), + expectedMem: resource.MustParse("150Mi"), + expectedCPULimit: mustParseResourcePointer("1.5"), + expectedMemLimit: mustParseResourcePointer("150Mi"), + labelSelector: "app = testingApp", + limitRange: &apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1.5"), + apiv1.ResourceMemory: resource.MustParse("150Mi"), + }, + }, + }, } + for _, tc := range testCases { t.Run(fmt.Sprintf(tc.name), func(t *testing.T) { ctrl := gomock.NewController(t) @@ -274,6 +331,10 @@ func TestUpdateResourceRequests(t *testing.T) { vpaLister: vpaLister, recommendationProcessor: api.NewCappingRecommendationProcessor(), selectorFetcher: mockSelectorFetcher, + limitsRangeCalculator: &fakeLimitRangeCalculator{ + tc.limitRange, + tc.limitRangeCalcErr, + }, } resources, annotations, name, err := recommendationProvider.GetContainersResourcesForPod(tc.pod) @@ -320,6 +381,12 @@ func TestUpdateResourceRequests(t *testing.T) { } } else { assert.Empty(t, resources) + if tc.expectedError != nil { + assert.Error(t, err) + assert.Equal(t, tc.expectedError.Error(), err.Error()) + } else { + assert.NoError(t, err) + } } }) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index eba2581f99a1..9e7b191ea379 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -38,11 +38,11 @@ import ( type AdmissionServer struct { recommendationProvider RecommendationProvider podPreProcessor PodPreProcessor - limitsChecker LimitsChecker + limitsChecker LimitsRangeCalculator } // NewAdmissionServer constructs new AdmissionServer -func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, limitsChecker LimitsChecker) *AdmissionServer { +func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, limitsChecker LimitsRangeCalculator) *AdmissionServer { return &AdmissionServer{recommendationProvider, podPreProcessor, limitsChecker} } @@ -74,15 +74,10 @@ func (s *AdmissionServer) getPatchesForPodResourceRequest(raw []byte, namespace annotationsPerContainer = vpa_api_util.ContainerToAnnotationsMap{} } - limitsHints, err := s.limitsChecker.NeedsLimits(&pod, containersResources) - if err != nil { - return nil, err - } - patches := []patchRecord{} updatesAnnotation := []string{} for i, containerResources := range containersResources { - newPatches, newUpdatesAnnotation := s.getContainerPatch(pod, i, annotationsPerContainer, containerResources, limitsHints) + newPatches, newUpdatesAnnotation := s.getContainerPatch(pod, i, annotationsPerContainer, containerResources) patches = append(patches, newPatches...) updatesAnnotation = append(updatesAnnotation, newUpdatesAnnotation) } @@ -126,7 +121,7 @@ func getAddResourceRequirementValuePatch(i int, kind string, resource v1.Resourc Value: quantity.String()} } -func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources ContainerResources, limitsHints LimitsHints) ([]patchRecord, string) { +func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources ContainerResources) ([]patchRecord, string) { var patches []patchRecord // Add empty resources object if missing if pod.Spec.Containers[i].Resources.Limits == nil && @@ -139,25 +134,6 @@ func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, annotationsPerCon annotations = make([]string, 0) } - if limitsHints != nil { - var resources v1.ResourceList - resourceNames := []v1.ResourceName{"cpu", "memory"} - for _, resource := range resourceNames { - if limitsHints.RequestsExceedsRatio(i, resource) { - // LimitRange cannot specify min ratio: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.12/#limitrangeitem-v1-core - // If we exceed max ratio cap limit to request*maxRatio. - limit := limitsHints.HintedLimit(i, resource) - if resources == nil { - resources = make(v1.ResourceList) - } - resources[resource] = limit - annotations = append(annotations, fmt.Sprintf("%s limit decreased to respect ratio", resource)) - } - } - if len(resources) > 0 { - containerResources.Limits = resources - } - } patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Requests, i, containerResources.Requests, "requests", "request") patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Limits, i, containerResources.Limits, "limits", "limit") diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go index 6d641f76adce..3cd4ad9d3568 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -311,7 +311,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { t.Run(fmt.Sprintf("test case: %s", tc.name), func(t *testing.T) { fppp := fakePodPreProcessor{e: tc.preProcessorError} frp := fakeRecommendationProvider{tc.recommendResources, tc.recommendAnnotations, tc.recommendName, tc.recommendError} - lc := NewNoopLimitsChecker() + lc := NewNoopLimitsCalculator() s := NewAdmissionServer(&frp, &fppp, lc) patches, err := s.getPatchesForPodResourceRequest(tc.podJson, tc.namespace) if tc.expectError == nil { @@ -359,7 +359,7 @@ func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { }`) recommendAnnotations := vpa_api_util.ContainerToAnnotationsMap{} frp := fakeRecommendationProvider{recommendResources, recommendAnnotations, "name", nil} - lc := NewNoopLimitsChecker() + lc := NewNoopLimitsCalculator() s := NewAdmissionServer(&frp, &fppp, lc) patches, err := s.getPatchesForPodResourceRequest(podJson, "default") assert.NoError(t, err) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/main.go b/vertical-pod-autoscaler/pkg/admission-controller/main.go index 6afb57e0d825..f41c604cdeb0 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/main.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/main.go @@ -55,8 +55,6 @@ var ( webhookPort = flag.String("webhook-port", "", "Server Port for Webhook") registerByURL = flag.Bool("register-by-url", false, "If set to true, admission webhook will be registered by URL (webhookAddress:webhookPort) instead of by service name") allowToAdjustLimits = flag.Bool("allow-to-adjust-limits", false, "If set to true, admission webhook will set limits per container too if needed") - - factoryForLimitsChecker interface{} ) func main() { @@ -82,18 +80,19 @@ func main() { target.NewVpaTargetSelectorFetcher(config, kubeClient, factory), target.NewBeta1TargetSelectorFetcher(config), ) - recommendationProvider := logic.NewRecommendationProvider(vpaLister, vpa_api_util.NewCappingRecommendationProcessor(), targetSelectorFetcher) podPreprocessor := logic.NewDefaultPodPreProcessor() - var limitsChecker logic.LimitsChecker + var limitsChecker logic.LimitsRangeCalculator if *allowToAdjustLimits { - limitsChecker, err = logic.NewLimitsChecker(factory) + limitsChecker, err = logic.NewLimitsRangeCalculator(factory) if err != nil { klog.Errorf("Failed to create limitsChecker, falling back to not checking limits. Error message: %s", err) - limitsChecker = logic.NewNoopLimitsChecker() + limitsChecker = logic.NewNoopLimitsCalculator() } } else { - limitsChecker = logic.NewNoopLimitsChecker() + limitsChecker = logic.NewNoopLimitsCalculator() } + recommendationProvider := logic.NewRecommendationProvider(limitsChecker, vpa_api_util.NewCappingRecommendationProcessor(), targetSelectorFetcher, vpaLister) + as := logic.NewAdmissionServer(recommendationProvider, podPreprocessor, limitsChecker) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { as.Serve(w, r) diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go b/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go new file mode 100644 index 000000000000..e62efe059bfe --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go @@ -0,0 +1,92 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LimitRange returns an object that helps build a LimitRangeItem object for tests. +func LimitRange() *limitRangeBuilder { + return &limitRangeBuilder{} +} + +type limitRangeBuilder struct { + namespace string + name string + rangeType v1.LimitType + defaultValues *v1.ResourceList + max *v1.ResourceList +} + +func (lrb *limitRangeBuilder) WithName(name string) *limitRangeBuilder { + result := *lrb + result.name = name + return &result +} + +func (lrb *limitRangeBuilder) WithNamespace(namespace string) *limitRangeBuilder { + result := *lrb + result.namespace = namespace + return &result +} + +func (lrb *limitRangeBuilder) WithType(rangeType v1.LimitType) *limitRangeBuilder { + result := *lrb + result.rangeType = rangeType + return &result +} + +func (lrb *limitRangeBuilder) WithDefault(defaultValues v1.ResourceList) *limitRangeBuilder { + result := *lrb + result.defaultValues = &defaultValues + return &result +} + +func (lrb *limitRangeBuilder) WithMax(max v1.ResourceList) *limitRangeBuilder { + result := *lrb + result.max = &max + return &result +} + +func (lrb *limitRangeBuilder) Get() *v1.LimitRange { + result := v1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: lrb.namespace, + Name: lrb.name, + }, + } + if lrb.defaultValues != nil || lrb.max != nil { + result.Spec = v1.LimitRangeSpec{ + Limits: []v1.LimitRangeItem{}, + } + } + if lrb.defaultValues != nil { + result.Spec.Limits = append(result.Spec.Limits, v1.LimitRangeItem{ + Type: lrb.rangeType, + Default: *lrb.defaultValues, + }) + } + if lrb.max != nil { + result.Spec.Limits = append(result.Spec.Limits, v1.LimitRangeItem{ + Type: lrb.rangeType, + Max: *lrb.max, + }) + } + return &result +} From c837a4129c1412740547a4cdc676f356268195b2 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Fri, 24 May 2019 17:44:14 +0200 Subject: [PATCH 04/25] Rename limit_range_checker files to limit_range_calculator --- .../logic/{limitrange_checker.go => limit_range_calculator.go} | 0 ...{limitrange_checker_test.go => limit_range_calculator_test.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename vertical-pod-autoscaler/pkg/admission-controller/logic/{limitrange_checker.go => limit_range_calculator.go} (100%) rename vertical-pod-autoscaler/pkg/admission-controller/logic/{limitrange_checker_test.go => limit_range_calculator_test.go} (100%) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go similarity index 100% rename from vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go rename to vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator_test.go similarity index 100% rename from vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go rename to vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator_test.go From c5edfcf8c63a922569aacd6cfdd640a6dd77ed1f Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Tue, 28 May 2019 16:08:34 +0200 Subject: [PATCH 05/25] Address review comments I want to squash this commit before merging but have it separate to make review clearer. --- .../logic/limit_range_calculator.go | 15 ++-- .../logic/limit_range_calculator_test.go | 83 +++++++------------ .../logic/recommendation_provider.go | 13 ++- 3 files changed, 43 insertions(+), 68 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go index 0eff99d82228..1a20264fe34a 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go @@ -18,14 +18,14 @@ package logic import ( "fmt" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/informers" - - v1_listers "k8s.io/client-go/listers/core/v1" + listers "k8s.io/client-go/listers/core/v1" ) -// LimitsRangeCalculator checks for LimitRange and if container needs limits to be set +// LimitsRangeCalculator calculates limit range items that has the same effect as all limit range items present in the cluster. type LimitsRangeCalculator interface { // GetContainerLimitRangeItem returns LimitRangeItem that describes limitation on container limits in the given namespace. GetContainerLimitRangeItem(namespace string) (*v1.LimitRangeItem, error) @@ -38,7 +38,7 @@ func (lc *noopLimitsRangeCalculator) GetContainerLimitRangeItem(namespace string } type limitsChecker struct { - limitRangeLister v1_listers.LimitRangeLister + limitRangeLister listers.LimitRangeLister } // NewLimitsRangeCalculator returns a limitsChecker or an error it encountered when attempting to create it. @@ -51,7 +51,7 @@ func NewLimitsRangeCalculator(f informers.SharedInformerFactory) (*limitsChecker f.Start(stopCh) for _, ok := range f.WaitForCacheSync(stopCh) { if !ok { - if f.Core().V1().LimitRanges().Informer().HasSynced() { + if !f.Core().V1().LimitRanges().Informer().HasSynced() { return nil, fmt.Errorf("informer did not sync") } } @@ -74,10 +74,7 @@ func (lc *limitsChecker) GetContainerLimitRangeItem(namespace string) (*v1.Limit for _, lri := range lr.Spec.Limits { if lri.Type == v1.LimitTypeContainer && (lri.Max != nil || lri.Default != nil) { // TODO: handle multiple limit ranges matching a pod. - return &v1.LimitRangeItem{ - Max: lri.Max.DeepCopy(), - Default: lri.Default.DeepCopy(), - }, nil + return &lri, nil } } } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator_test.go index 91c464955d52..c7f6a979db8f 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator_test.go @@ -17,33 +17,24 @@ limitations under the License. package logic import ( - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/informers" - - //"fmt" "testing" - //"k8s.io/apimachinery/pkg/runtime" - //"k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes/fake" - apiv1 "k8s.io/api/core/v1" - //"k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + + "github.com/stretchr/testify/assert" ) -func getPod() *apiv1.Pod { - labels := map[string]string{"app": "testingApp"} - return test.Pod().WithName("test_uninitialized").AddContainer(test.BuildTestContainer("container1", "", "")).WithLabels(labels).Get() -} +const defaultNamespace = "default" func TestNewNoopLimitsChecker(t *testing.T) { nlc := NewNoopLimitsCalculator() - limitRange, err := nlc.GetContainerLimitRangeItem(getPod().Namespace) - if assert.NoError(t, err) { - assert.Nil(t, limitRange) - } + limitRange, err := nlc.GetContainerLimitRangeItem(defaultNamespace) + assert.NoError(t, err) + assert.Nil(t, limitRange) } func TestNoLimitRange(t *testing.T) { @@ -52,53 +43,45 @@ func TestNoLimitRange(t *testing.T) { lc, err := NewLimitsRangeCalculator(factory) if assert.NoError(t, err) { - limitRange, err := lc.GetContainerLimitRangeItem(getPod().Namespace) - if assert.NoError(t, err) { - assert.Nil(t, limitRange) - } + limitRange, err := lc.GetContainerLimitRangeItem(defaultNamespace) + assert.NoError(t, err) + assert.Nil(t, limitRange) } } -func TestUpdateResourceLimits(t *testing.T) { - +func TestGetContainerLimitRangeItem(t *testing.T) { + containerLimitRangeWithMax := test.LimitRange().WithName("default").WithNamespace(defaultNamespace).WithType(apiv1.LimitTypeContainer).WithMax(test.Resources("2", "2")).Get() + containerLimitRangeWithDefault := test.LimitRange().WithName("default").WithNamespace(defaultNamespace).WithType(apiv1.LimitTypeContainer).WithDefault(test.Resources("2", "2")).Get() testCases := []struct { - name string - pod *apiv1.Pod - limitRanges []runtime.Object - expectErr error - expectLimits *apiv1.LimitRangeItem + name string + limitRanges []runtime.Object + expectedErr error + expectedLimits *apiv1.LimitRangeItem }{ { name: "no matching limit ranges", - pod: getPod(), limitRanges: []runtime.Object{ - test.LimitRange().WithName("different-namespace").WithNamespace("different").WithType(apiv1.LimitTypePod).WithMax(test.Resources("2", "2")).Get(), - test.LimitRange().WithName("differen-type").WithNamespace("default").WithType(apiv1.LimitTypePersistentVolumeClaim).WithMax(test.Resources("2", "2")).Get(), + test.LimitRange().WithName("different-namespace").WithNamespace("different").WithType(apiv1.LimitTypeContainer).WithMax(test.Resources("2", "2")).Get(), + test.LimitRange().WithName("differen-type").WithNamespace(defaultNamespace).WithType(apiv1.LimitTypePersistentVolumeClaim).WithMax(test.Resources("2", "2")).Get(), }, - expectErr: nil, - expectLimits: nil, + expectedErr: nil, + expectedLimits: nil, }, { name: "matching container limit range", - pod: getPod(), limitRanges: []runtime.Object{ - test.LimitRange().WithName("default").WithNamespace("default").WithType(apiv1.LimitTypeContainer).WithMax(test.Resources("2", "2")).Get(), - }, - expectErr: nil, - expectLimits: &apiv1.LimitRangeItem{ - Max: test.Resources("2", "2"), + containerLimitRangeWithMax, }, + expectedErr: nil, + expectedLimits: &containerLimitRangeWithMax.Spec.Limits[0], }, { name: "with default value", - pod: getPod(), limitRanges: []runtime.Object{ - test.LimitRange().WithName("default").WithNamespace("default").WithType(apiv1.LimitTypeContainer).WithDefault(test.Resources("2", "2")).Get(), - }, - expectErr: nil, - expectLimits: &apiv1.LimitRangeItem{ - Default: test.Resources("2", "2"), + containerLimitRangeWithDefault, }, + expectedErr: nil, + expectedLimits: &containerLimitRangeWithDefault.Spec.Limits[0], }, } @@ -108,15 +91,13 @@ func TestUpdateResourceLimits(t *testing.T) { factory := informers.NewSharedInformerFactory(cs, 0) lc, err := NewLimitsRangeCalculator(factory) if assert.NoError(t, err) { - labels := map[string]string{"app": "testingApp"} - pod := test.Pod().WithName("test_uninitialized").AddContainer(test.BuildTestContainer("container1", "", "")).WithLabels(labels).Get() - limitRange, err := lc.GetContainerLimitRangeItem(pod.Namespace) - if tc.expectErr == nil { + limitRange, err := lc.GetContainerLimitRangeItem(defaultNamespace) + if tc.expectedErr == nil { assert.NoError(t, err) } else { assert.Error(t, err) } - assert.Equal(t, tc.expectLimits, limitRange) + assert.Equal(t, tc.expectedLimits, limitRange) } }) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go index 3c0c4b24dc08..51b87c3d30e4 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go @@ -117,19 +117,16 @@ func proportionallyCapLimitToMax(recommendedRequest, recommendedLimit, maxLimit return scaledRequest, maxLimit } -func proportionallyCapLimitsToMax(recommendedRequests v1.ResourceList, cpuLimit, memLimit, maxCpuLimit, maxMemLimit *resource.Quantity) ContainerResources { +func proportionallyCapResourcesToMaxLimit(recommendedRequests v1.ResourceList, cpuLimit, memLimit, maxCpuLimit, maxMemLimit *resource.Quantity) ContainerResources { scaledCpuRequest, scaledCpuLimit := proportionallyCapLimitToMax(recommendedRequests.Cpu(), cpuLimit, maxCpuLimit) scaledMemRequest, scaledMemLimit := proportionallyCapLimitToMax(recommendedRequests.Memory(), memLimit, maxMemLimit) result := newContainerResources() - if scaledCpuRequest != nil { - result.Requests[v1.ResourceCPU] = *scaledCpuRequest - } + + result.Requests[v1.ResourceCPU] = *scaledCpuRequest + result.Requests[v1.ResourceMemory] = *scaledMemRequest if scaledCpuLimit != nil { result.Limits[v1.ResourceCPU] = *scaledCpuLimit } - if scaledMemRequest != nil { - result.Requests[v1.ResourceMemory] = *scaledMemRequest - } if scaledMemLimit != nil { result.Limits[v1.ResourceMemory] = *scaledMemLimit } @@ -171,7 +168,7 @@ func GetContainersResources(pod *v1.Pod, podRecommendation vpa_types.Recommended "Failed to keep memory limit to request proportion of %d to %d with recommended request of %d milliBytes; doesn't fit in int64. Capping limit to MaxInt64", container.Resources.Limits.Memory().MilliValue(), container.Resources.Requests.Memory().MilliValue(), recommendation.Target.Memory().MilliValue())) } - resources[i] = proportionallyCapLimitsToMax(recommendation.Target, cpuLimit, memLimit, maxCpuLimit, maxMemLimit) + resources[i] = proportionallyCapResourcesToMaxLimit(recommendation.Target, cpuLimit, memLimit, maxCpuLimit, maxMemLimit) } return resources } From c891835da5c35dc72b6bfd9a23a00873b1164f96 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Tue, 28 May 2019 18:09:28 +0200 Subject: [PATCH 06/25] Extract code to ba shared between updater and admission controller To be squashed Conflicts: vertical-pod-autoscaler/pkg/admission-controller/logic/server.go vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go vertical-pod-autoscaler/pkg/admission-controller/main.go --- .../logic/limit_range_calculator.go | 4 +- .../logic/recommendation_provider.go | 131 +++--------------- .../logic/recommendation_provider_test.go | 4 +- .../pkg/admission-controller/logic/server.go | 6 +- .../admission-controller/logic/server_test.go | 22 +-- .../pkg/admission-controller/main.go | 25 ++-- .../utils/vpa/limit_and_request_scaling.go | 110 +++++++++++++++ .../vpa/limit_and_request_scaling_test.go | 96 +++++++++++++ .../utils/vpa/limit_proportion_maintaining.go | 45 ++++++ .../pkg/utils/vpa/recommendation_processor.go | 4 +- 10 files changed, 301 insertions(+), 146 deletions(-) create mode 100644 vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go create mode 100644 vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling_test.go create mode 100644 vertical-pod-autoscaler/pkg/utils/vpa/limit_proportion_maintaining.go diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go index 1a20264fe34a..add40387fb7f 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go @@ -25,8 +25,8 @@ import ( listers "k8s.io/client-go/listers/core/v1" ) -// LimitsRangeCalculator calculates limit range items that has the same effect as all limit range items present in the cluster. -type LimitsRangeCalculator interface { +// LimitRangeCalculator calculates limit range items that has the same effect as all limit range items present in the cluster. +type LimitRangeCalculator interface { // GetContainerLimitRangeItem returns LimitRangeItem that describes limitation on container limits in the given namespace. GetContainerLimitRangeItem(namespace string) (*v1.LimitRangeItem, error) } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go index 51b87c3d30e4..558ae148d77a 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go @@ -21,8 +21,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" - "math" - "math/big" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta2" @@ -31,33 +29,20 @@ import ( "k8s.io/klog" ) -// ContainerResources holds resources request for container -type ContainerResources struct { - Limits v1.ResourceList - Requests v1.ResourceList -} - -func newContainerResources() ContainerResources { - return ContainerResources{ - Requests: v1.ResourceList{}, - Limits: v1.ResourceList{}, - } -} - // RecommendationProvider gets current recommendation, annotations and vpaName for the given pod. type RecommendationProvider interface { - GetContainersResourcesForPod(pod *v1.Pod) ([]ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) + GetContainersResourcesForPod(pod *v1.Pod) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) } type recommendationProvider struct { - limitsRangeCalculator LimitsRangeCalculator + limitsRangeCalculator LimitRangeCalculator recommendationProcessor vpa_api_util.RecommendationProcessor selectorFetcher target.VpaTargetSelectorFetcher vpaLister vpa_lister.VerticalPodAutoscalerLister } // NewRecommendationProvider constructs the recommendation provider that list VPAs and can be used to determine recommendations for pods. -func NewRecommendationProvider(calculator LimitsRangeCalculator, recommendationProcessor vpa_api_util.RecommendationProcessor, +func NewRecommendationProvider(calculator LimitRangeCalculator, recommendationProcessor vpa_api_util.RecommendationProcessor, selectorFetcher target.VpaTargetSelectorFetcher, vpaLister vpa_lister.VerticalPodAutoscalerLister) *recommendationProvider { return &recommendationProvider{ limitsRangeCalculator: calculator, @@ -67,108 +52,32 @@ func NewRecommendationProvider(calculator LimitsRangeCalculator, recommendationP } } -// scaleQuantityProportionally returns value which has the same proportion to scaledQuantity as scaleResult has to scaleBase -// It also returns a bool indicating if it had to cap result to MaxInt64 milliunits. -func scaleQuantityProportionally(scaledQuantity, scaleBase, scaleResult *resource.Quantity) (*resource.Quantity, bool) { - originalMilli := big.NewInt(scaledQuantity.MilliValue()) - scaleBaseMilli := big.NewInt(scaleBase.MilliValue()) - scaleResultMilli := big.NewInt(scaleResult.MilliValue()) - var scaledOriginal big.Int - scaledOriginal.Mul(originalMilli, scaleResultMilli) - scaledOriginal.Div(&scaledOriginal, scaleBaseMilli) - if scaledOriginal.IsInt64() { - return resource.NewMilliQuantity(scaledOriginal.Int64(), scaledQuantity.Format), false - } - return resource.NewMilliQuantity(math.MaxInt64, scaledQuantity.Format), true -} - -func getProportionalLimit(originalLimit, originalRequest, recommendedRequest, defaultLimit *resource.Quantity) (limit *resource.Quantity, capped bool) { - if originalLimit == nil || originalLimit.Value() == 0 && defaultLimit != nil { - originalLimit = defaultLimit - } - // originalLimit not set, don't set limit. - if originalLimit == nil || originalLimit.Value() == 0 { - return nil, false - } - // originalLimit set but originalRequest not set - K8s will treat the pod as if they were equal, - // recommend limit equal to request - if originalRequest == nil || originalRequest.Value() == 0 { - result := *recommendedRequest - return &result, false - } - // originalLimit and originalRequest are set. If they are equal recommend limit equal to request. - if originalRequest.MilliValue() == originalLimit.MilliValue() { - result := *recommendedRequest - return &result, false - } - - // Input and output milli values should fit in int64 but intermediate values might be bigger. - return scaleQuantityProportionally( /*scaledQuantity=*/ originalLimit /*scaleBase=*/, originalRequest /*scaleResult=*/, recommendedRequest) -} - -func proportionallyCapLimitToMax(recommendedRequest, recommendedLimit, maxLimit *resource.Quantity) (request, limit *resource.Quantity) { - if recommendedLimit == nil || maxLimit == nil || maxLimit.IsZero() { - return recommendedRequest, recommendedLimit - } - if recommendedLimit.Cmp(*maxLimit) <= 0 { - return recommendedRequest, recommendedLimit - } - scaledRequest, _ := scaleQuantityProportionally(recommendedRequest, recommendedLimit, maxLimit) - return scaledRequest, maxLimit -} - -func proportionallyCapResourcesToMaxLimit(recommendedRequests v1.ResourceList, cpuLimit, memLimit, maxCpuLimit, maxMemLimit *resource.Quantity) ContainerResources { - scaledCpuRequest, scaledCpuLimit := proportionallyCapLimitToMax(recommendedRequests.Cpu(), cpuLimit, maxCpuLimit) - scaledMemRequest, scaledMemLimit := proportionallyCapLimitToMax(recommendedRequests.Memory(), memLimit, maxMemLimit) - result := newContainerResources() - - result.Requests[v1.ResourceCPU] = *scaledCpuRequest - result.Requests[v1.ResourceMemory] = *scaledMemRequest - if scaledCpuLimit != nil { - result.Limits[v1.ResourceCPU] = *scaledCpuLimit - } - if scaledMemLimit != nil { - result.Limits[v1.ResourceMemory] = *scaledMemLimit - } - return result -} - // GetContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec. func GetContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources, limitRange *v1.LimitRangeItem, - annotations vpa_api_util.ContainerToAnnotationsMap) []ContainerResources { - resources := make([]ContainerResources, len(pod.Spec.Containers)) + annotations vpa_api_util.ContainerToAnnotationsMap) []vpa_api_util.ContainerResources { + resources := make([]vpa_api_util.ContainerResources, len(pod.Spec.Containers)) + var defaultCpu, defaultMem, maxCpuLimit, maxMemLimit *resource.Quantity + if limitRange != nil { + defaultCpu = limitRange.Default.Cpu() + defaultMem = limitRange.Default.Memory() + maxCpuLimit = limitRange.Max.Cpu() + maxMemLimit = limitRange.Max.Memory() + } for i, container := range pod.Spec.Containers { - recommendation := vpa_api_util.GetRecommendationForContainer(container.Name, &podRecommendation) if recommendation == nil { klog.V(2).Infof("no matching recommendation found for container %s", container.Name) continue } - - var defaultCpu, defaultMem, maxCpuLimit, maxMemLimit *resource.Quantity - if limitRange != nil { - defaultCpu = limitRange.Default.Cpu() - defaultMem = limitRange.Default.Memory() - maxCpuLimit = limitRange.Max.Cpu() - maxMemLimit = limitRange.Max.Memory() - } - cpuLimit, capped := getProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), recommendation.Target.Cpu(), defaultCpu) - if capped { - annotations[container.Name] = append( - annotations[container.Name], - fmt.Sprintf( - "Failed to keep CPU limit to request proportion of %d to %d with recommended request of %d milliCPU; doesn't fit in int64. Capping limit to MaxInt64", - container.Resources.Limits.Cpu().MilliValue(), container.Resources.Requests.Cpu().MilliValue(), recommendation.Target.Cpu().MilliValue())) + cpuLimit, annotation := vpa_api_util.GetProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), recommendation.Target.Cpu(), defaultCpu) + if annotation != "" { + annotations[container.Name] = append(annotations[container.Name], fmt.Sprintf("CPU: %s", annotation)) } - memLimit, capped := getProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), recommendation.Target.Memory(), defaultMem) - if capped { - annotations[container.Name] = append( - annotations[container.Name], - fmt.Sprintf( - "Failed to keep memory limit to request proportion of %d to %d with recommended request of %d milliBytes; doesn't fit in int64. Capping limit to MaxInt64", - container.Resources.Limits.Memory().MilliValue(), container.Resources.Requests.Memory().MilliValue(), recommendation.Target.Memory().MilliValue())) + memLimit, annotation := vpa_api_util.GetProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), recommendation.Target.Memory(), defaultMem) + if annotation != "" { + annotations[container.Name] = append(annotations[container.Name], fmt.Sprintf("memory: %s", annotation)) } - resources[i] = proportionallyCapResourcesToMaxLimit(recommendation.Target, cpuLimit, memLimit, maxCpuLimit, maxMemLimit) + resources[i] = vpa_api_util.ProportionallyCapResourcesToMaxLimit(recommendation.Target, cpuLimit, memLimit, maxCpuLimit, maxMemLimit) } return resources } @@ -204,7 +113,7 @@ func (p *recommendationProvider) getMatchingVPA(pod *v1.Pod) *vpa_types.Vertical // GetContainersResourcesForPod returns recommended request for a given pod, annotations and name of controlling VPA. // The returned slice corresponds 1-1 to containers in the Pod. -func (p *recommendationProvider) GetContainersResourcesForPod(pod *v1.Pod) ([]ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { +func (p *recommendationProvider) GetContainersResourcesForPod(pod *v1.Pod) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { klog.V(2).Infof("updating requirements for pod %s.", pod.Name) vpaConfig := p.getMatchingVPA(pod) if vpaConfig == nil { diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index 1624ef118e18..aa9015dc0fed 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -261,8 +261,8 @@ func TestUpdateResourceRequests(t *testing.T) { labelSelector: "app = testingApp", annotations: vpa_api_util.ContainerToAnnotationsMap{ containerName: []string{ - "Failed to keep CPU limit to request proportion of 10000 to 1000 with recommended request of -9223372036854775808 milliCPU; doesn't fit in int64. Capping limit to MaxInt64", - "Failed to keep memory limit to request proportion of 1048576000000 to 104857600000 with recommended request of -9223372036854775808 milliBytes; doesn't fit in int64. Capping limit to MaxInt64", + "CPU: failed to keep limit to request proportion of 10 to 1 with recommended request of 1Ei; doesn't fit in int64. Capping limit to MaxInt64 milliunits", + "memory: failed to keep limit to request proportion of 1000Mi to 100Mi with recommended request of 1Ei; doesn't fit in int64. Capping limit to MaxInt64 milliunits", }, }, }, diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index 9e7b191ea379..5ceb29a025aa 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -38,11 +38,11 @@ import ( type AdmissionServer struct { recommendationProvider RecommendationProvider podPreProcessor PodPreProcessor - limitsChecker LimitsRangeCalculator + limitsChecker LimitRangeCalculator } // NewAdmissionServer constructs new AdmissionServer -func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, limitsChecker LimitsRangeCalculator) *AdmissionServer { +func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, limitsChecker LimitRangeCalculator) *AdmissionServer { return &AdmissionServer{recommendationProvider, podPreProcessor, limitsChecker} } @@ -121,7 +121,7 @@ func getAddResourceRequirementValuePatch(i int, kind string, resource v1.Resourc Value: quantity.String()} } -func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources ContainerResources) ([]patchRecord, string) { +func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources vpa_api_util.ContainerResources) ([]patchRecord, string) { var patches []patchRecord // Add empty resources object if missing if pod.Spec.Containers[i].Resources.Limits == nil && diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go index 3cd4ad9d3568..18e30f326128 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -45,13 +45,13 @@ func (fpp *fakePodPreProcessor) Process(pod apiv1.Pod) (apiv1.Pod, error) { } type fakeRecommendationProvider struct { - resources []ContainerResources + resources []vpa_api_util.ContainerResources containerToAnnotations vpa_api_util.ContainerToAnnotationsMap name string e error } -func (frp *fakeRecommendationProvider) GetContainersResourcesForPod(pod *apiv1.Pod) ([]ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { +func (frp *fakeRecommendationProvider) GetContainersResourcesForPod(pod *apiv1.Pod) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { return frp.resources, frp.containerToAnnotations, frp.name, frp.e } @@ -127,7 +127,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { podJson []byte namespace string preProcessorError error - recommendResources []ContainerResources + recommendResources []vpa_api_util.ContainerResources recommendAnnotations vpa_api_util.ContainerToAnnotationsMap recommendName string recommendError error @@ -139,7 +139,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { podJson: []byte("{"), namespace: "default", preProcessorError: nil, - recommendResources: []ContainerResources{}, + recommendResources: []vpa_api_util.ContainerResources{}, recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, recommendName: "name", expectError: fmt.Errorf("unexpected end of JSON input"), @@ -149,7 +149,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { podJson: []byte("{}"), namespace: "default", preProcessorError: fmt.Errorf("bad pod"), - recommendResources: []ContainerResources{}, + recommendResources: []vpa_api_util.ContainerResources{}, recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, recommendName: "name", expectError: fmt.Errorf("bad pod"), @@ -163,7 +163,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { } }`), namespace: "default", - recommendResources: []ContainerResources{ + recommendResources: []vpa_api_util.ContainerResources{ { Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), @@ -196,7 +196,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { } }`), namespace: "default", - recommendResources: []ContainerResources{ + recommendResources: []vpa_api_util.ContainerResources{ { Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), @@ -228,7 +228,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { } }`), namespace: "default", - recommendResources: []ContainerResources{ + recommendResources: []vpa_api_util.ContainerResources{ { Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), @@ -259,7 +259,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { } }`), namespace: "default", - recommendResources: []ContainerResources{ + recommendResources: []vpa_api_util.ContainerResources{ { Limits: apiv1.ResourceList{ cpu: resource.MustParse("1"), @@ -292,7 +292,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { } }`), namespace: "default", - recommendResources: []ContainerResources{ + recommendResources: []vpa_api_util.ContainerResources{ { Limits: apiv1.ResourceList{ cpu: resource.MustParse("1"), @@ -335,7 +335,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { fppp := fakePodPreProcessor{} - recommendResources := []ContainerResources{ + recommendResources := []vpa_api_util.ContainerResources{ { Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), diff --git a/vertical-pod-autoscaler/pkg/admission-controller/main.go b/vertical-pod-autoscaler/pkg/admission-controller/main.go index f41c604cdeb0..7eefcd8f8f13 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/main.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/main.go @@ -48,13 +48,12 @@ var ( tlsPrivateKey: flag.String("tls-private-key", "/etc/tls-certs/serverKey.pem", "Path to server certificate key PEM file."), } - port = flag.Int("port", 8000, "The port to listen on.") - address = flag.String("address", ":8944", "The address to expose Prometheus metrics.") - namespace = os.Getenv("NAMESPACE") - webhookAddress = flag.String("webhook-address", "", "Address under which webhook is registered. Used when registerByURL is set to true.") - webhookPort = flag.String("webhook-port", "", "Server Port for Webhook") - registerByURL = flag.Bool("register-by-url", false, "If set to true, admission webhook will be registered by URL (webhookAddress:webhookPort) instead of by service name") - allowToAdjustLimits = flag.Bool("allow-to-adjust-limits", false, "If set to true, admission webhook will set limits per container too if needed") + port = flag.Int("port", 8000, "The port to listen on.") + address = flag.String("address", ":8944", "The address to expose Prometheus metrics.") + namespace = os.Getenv("NAMESPACE") + webhookAddress = flag.String("webhook-address", "", "Address under which webhook is registered. Used when registerByURL is set to true.") + webhookPort = flag.String("webhook-port", "", "Server Port for Webhook") + registerByURL = flag.Bool("register-by-url", false, "If set to true, admission webhook will be registered by URL (webhookAddress:webhookPort) instead of by service name") ) func main() { @@ -81,14 +80,10 @@ func main() { target.NewBeta1TargetSelectorFetcher(config), ) podPreprocessor := logic.NewDefaultPodPreProcessor() - var limitsChecker logic.LimitsRangeCalculator - if *allowToAdjustLimits { - limitsChecker, err = logic.NewLimitsRangeCalculator(factory) - if err != nil { - klog.Errorf("Failed to create limitsChecker, falling back to not checking limits. Error message: %s", err) - limitsChecker = logic.NewNoopLimitsCalculator() - } - } else { + var limitsChecker logic.LimitRangeCalculator + limitsChecker, err = logic.NewLimitsRangeCalculator(factory) + if err != nil { + klog.Errorf("Failed to create limitsChecker, falling back to not checking limits. Error message: %s", err) limitsChecker = logic.NewNoopLimitsCalculator() } recommendationProvider := logic.NewRecommendationProvider(limitsChecker, vpa_api_util.NewCappingRecommendationProcessor(), targetSelectorFetcher, vpaLister) diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go new file mode 100644 index 000000000000..5f2e19e74e75 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go @@ -0,0 +1,110 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "fmt" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "math" + "math/big" +) + +// ContainerResources holds resources request for container +type ContainerResources struct { + Limits v1.ResourceList + Requests v1.ResourceList +} + +func newContainerResources() ContainerResources { + return ContainerResources{ + Requests: v1.ResourceList{}, + Limits: v1.ResourceList{}, + } +} + +// GetProportionalLimit returns limit that will be in the same proportion to recommended request as original limit had to original request. +func GetProportionalLimit(originalLimit, originalRequest, recommendedRequest, defaultLimit *resource.Quantity) (*resource.Quantity, string) { + if originalLimit == nil || originalLimit.Value() == 0 && defaultLimit != nil { + originalLimit = defaultLimit + } + // originalLimit not set, don't set limit. + if originalLimit == nil || originalLimit.Value() == 0 { + return nil, "" + } + // originalLimit set but originalRequest not set - K8s will treat the pod as if they were equal, + // recommend limit equal to request + if originalRequest == nil || originalRequest.Value() == 0 { + result := *recommendedRequest + return &result, "" + } + // originalLimit and originalRequest are set. If they are equal recommend limit equal to request. + if originalRequest.MilliValue() == originalLimit.MilliValue() { + result := *recommendedRequest + return &result, "" + } + result, capped := scaleQuantityProportionally( /*scaledQuantity=*/ originalLimit /*scaleBase=*/, originalRequest /*scaleResult=*/, recommendedRequest) + if capped { + return result, "" + } + return result, fmt.Sprintf( + "failed to keep limit to request proportion of %s to %s with recommended request of %s; doesn't fit in int64. Capping limit to MaxInt64 milliunits", + originalLimit, originalRequest, recommendedRequest) +} + +// scaleQuantityProportionally returns value which has the same proportion to scaledQuantity as scaleResult has to scaleBase +// It also returns a bool indicating if it had to cap result to MaxInt64 milliunits. +func scaleQuantityProportionally(scaledQuantity, scaleBase, scaleResult *resource.Quantity) (*resource.Quantity, bool) { + originalMilli := big.NewInt(scaledQuantity.MilliValue()) + scaleBaseMilli := big.NewInt(scaleBase.MilliValue()) + scaleResultMilli := big.NewInt(scaleResult.MilliValue()) + var scaledOriginal big.Int + scaledOriginal.Mul(originalMilli, scaleResultMilli) + scaledOriginal.Div(&scaledOriginal, scaleBaseMilli) + if scaledOriginal.IsInt64() { + return resource.NewMilliQuantity(scaledOriginal.Int64(), scaledQuantity.Format), false + } + return resource.NewMilliQuantity(math.MaxInt64, scaledQuantity.Format), true +} + +func proportionallyCapLimitToMax(recommendedRequest, recommendedLimit, maxLimit *resource.Quantity) (request, limit *resource.Quantity) { + if recommendedLimit == nil || maxLimit == nil || maxLimit.IsZero() { + return recommendedRequest, recommendedLimit + } + if recommendedLimit.Cmp(*maxLimit) <= 0 { + return recommendedRequest, recommendedLimit + } + scaledRequest, _ := scaleQuantityProportionally(recommendedRequest, recommendedLimit, maxLimit) + return scaledRequest, maxLimit +} + +// ProportionallyCapResourcesToMaxLimit caps CPU and memory limit to maximu and scales requests to maintain limit/request ratio. +func ProportionallyCapResourcesToMaxLimit(recommendedRequests v1.ResourceList, cpuLimit, memLimit, maxCpuLimit, maxMemLimit *resource.Quantity) ContainerResources { + scaledCpuRequest, scaledCpuLimit := proportionallyCapLimitToMax(recommendedRequests.Cpu(), cpuLimit, maxCpuLimit) + scaledMemRequest, scaledMemLimit := proportionallyCapLimitToMax(recommendedRequests.Memory(), memLimit, maxMemLimit) + result := newContainerResources() + + result.Requests[v1.ResourceCPU] = *scaledCpuRequest + result.Requests[v1.ResourceMemory] = *scaledMemRequest + if scaledCpuLimit != nil { + result.Limits[v1.ResourceCPU] = *scaledCpuLimit + } + if scaledMemLimit != nil { + result.Limits[v1.ResourceMemory] = *scaledMemLimit + } + return result +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling_test.go new file mode 100644 index 000000000000..151706f29cb4 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling_test.go @@ -0,0 +1,96 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "github.com/stretchr/testify/assert" + "k8s.io/apimachinery/pkg/api/resource" + "math" + "testing" +) + +func mustParseToPointer(str string) *resource.Quantity { + val := resource.MustParse(str) + return &val +} + +func TestGetProportionalLimit(t *testing.T) { + tests := []struct { + name string + originalLimit *resource.Quantity + originalRequest *resource.Quantity + recommendedRequest *resource.Quantity + defaultLimit *resource.Quantity + expectLimit *resource.Quantity + expectAnnotation bool + }{ + { + name: "scale proportionally", + originalLimit: mustParseToPointer("2"), + originalRequest: mustParseToPointer("1"), + recommendedRequest: mustParseToPointer("10"), + expectLimit: mustParseToPointer("20"), + }, + { + name: "scale proportionally with default", + originalRequest: mustParseToPointer("1"), + recommendedRequest: mustParseToPointer("10"), + defaultLimit: mustParseToPointer("2"), + expectLimit: mustParseToPointer("20"), + }, + { + name: "no original limit", + originalRequest: mustParseToPointer("1"), + recommendedRequest: mustParseToPointer("10"), + expectLimit: nil, + }, + { + name: "no original request", + originalLimit: mustParseToPointer("2"), + recommendedRequest: mustParseToPointer("10"), + expectLimit: mustParseToPointer("10"), + }, + { + name: "limit equal to request", + originalLimit: mustParseToPointer("1"), + originalRequest: mustParseToPointer("1"), + recommendedRequest: mustParseToPointer("10"), + expectLimit: mustParseToPointer("10"), + }, + { + name: "go over milli cap", + originalLimit: mustParseToPointer("10G"), + originalRequest: mustParseToPointer("1m"), + recommendedRequest: mustParseToPointer("10G"), + expectLimit: resource.NewMilliQuantity(math.MaxInt64, resource.DecimalExponent), + expectAnnotation: true, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + gotLimit, gotAnnotation := GetProportionalLimit(tc.originalLimit, tc.originalRequest, tc.recommendedRequest, tc.defaultLimit) + if tc.expectLimit == nil { + assert.Nil(t, gotLimit) + } else { + if assert.NotNil(t, gotLimit) { + assert.Equal(t, gotLimit.MilliValue(), tc.expectLimit.MilliValue()) + } + } + assert.Equal(t, gotAnnotation != "", tc.expectAnnotation) + }) + } +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/limit_proportion_maintaining.go b/vertical-pod-autoscaler/pkg/utils/vpa/limit_proportion_maintaining.go new file mode 100644 index 000000000000..20a842d0e650 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/vpa/limit_proportion_maintaining.go @@ -0,0 +1,45 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "fmt" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" + + "k8s.io/klog" +) + +// NewLimitProportionMaintainingRecommendationProcessor constructs new RecommendationsProcessor that adjusts recommendation +// for given pod to maintain proportion to limit and restrictions on limits +func NewLimitProportionMaintainingRecommendationProcessor() RecommendationProcessor { + return &limitProportionMaintainingRecommendationProcessor{} +} + +type limitProportionMaintainingRecommendationProcessor struct { + limitsRangeCalculator LimitRangeCalculator +} + +// Apply returns a recommendation for the given pod, adjusted to obey policy and limits. +func (c *limitProportionMaintainingRecommendationProcessor) Apply( + podRecommendation *vpa_types.RecommendedPodResources, + policy *vpa_types.PodResourcePolicy, + conditions []vpa_types.VerticalPodAutoscalerCondition, + pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) { +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go b/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go index c697ee735551..5c2b332a3a74 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go @@ -30,7 +30,7 @@ type RecommendationProcessor interface { // VPA policy and possibly other internal RecommendationProcessor context. // Must return a non-nil pointer to RecommendedPodResources or error. Apply(podRecommendation *vpa_types.RecommendedPodResources, - policy *vpa_types.PodResourcePolicy, - conditions []vpa_types.VerticalPodAutoscalerCondition, + _ *vpa_types.PodResourcePolicy, + _ []vpa_types.VerticalPodAutoscalerCondition, pod *v1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) } From 2f54583f9d35704e2cf4b703931c84b389637401 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Wed, 29 May 2019 11:58:12 +0200 Subject: [PATCH 07/25] Move limit range calc to be squashed before merging --- .../logic => utils/limitrange}/limit_range_calculator.go | 0 .../logic => utils/limitrange}/limit_range_calculator_test.go | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename vertical-pod-autoscaler/pkg/{admission-controller/logic => utils/limitrange}/limit_range_calculator.go (100%) rename vertical-pod-autoscaler/pkg/{admission-controller/logic => utils/limitrange}/limit_range_calculator_test.go (100%) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go similarity index 100% rename from vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator.go rename to vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator_test.go b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go similarity index 100% rename from vertical-pod-autoscaler/pkg/admission-controller/logic/limit_range_calculator_test.go rename to vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go From 279e11552088a37bbde397274e21e00eb0ad935d Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Wed, 29 May 2019 13:42:22 +0200 Subject: [PATCH 08/25] Support scaling request to maintain ratio with limit in updater To be squashed before submit Conflicts: vertical-pod-autoscaler/pkg/admission-controller/logic/server.go vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go vertical-pod-autoscaler/pkg/admission-controller/main.go --- .../logic/recommendation_provider.go | 5 +- .../logic/recommendation_provider_test.go | 3 +- .../pkg/admission-controller/logic/server.go | 5 +- .../admission-controller/logic/server_test.go | 5 +- .../pkg/admission-controller/main.go | 13 +-- vertical-pod-autoscaler/pkg/updater/main.go | 9 ++- .../limitrange/limit_range_calculator.go | 2 +- .../limitrange/limit_range_calculator_test.go | 2 +- .../pkg/utils/vpa/capping.go | 65 +++++++++++++-- .../pkg/utils/vpa/capping_test.go | 80 ++++++++++++++++++- .../utils/vpa/limit_and_request_scaling.go | 4 +- .../utils/vpa/limit_proportion_maintaining.go | 45 ----------- .../pkg/utils/vpa/recommendation_processor.go | 4 +- 13 files changed, 168 insertions(+), 74 deletions(-) delete mode 100644 vertical-pod-autoscaler/pkg/utils/vpa/limit_proportion_maintaining.go diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go index 558ae148d77a..77b43d7b6a16 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go @@ -21,6 +21,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta2" @@ -35,14 +36,14 @@ type RecommendationProvider interface { } type recommendationProvider struct { - limitsRangeCalculator LimitRangeCalculator + limitsRangeCalculator limitrange.LimitRangeCalculator recommendationProcessor vpa_api_util.RecommendationProcessor selectorFetcher target.VpaTargetSelectorFetcher vpaLister vpa_lister.VerticalPodAutoscalerLister } // NewRecommendationProvider constructs the recommendation provider that list VPAs and can be used to determine recommendations for pods. -func NewRecommendationProvider(calculator LimitRangeCalculator, recommendationProcessor vpa_api_util.RecommendationProcessor, +func NewRecommendationProvider(calculator limitrange.LimitRangeCalculator, recommendationProcessor vpa_api_util.RecommendationProcessor, selectorFetcher target.VpaTargetSelectorFetcher, vpaLister vpa_lister.VerticalPodAutoscalerLister) *recommendationProvider { return &recommendationProvider{ limitsRangeCalculator: calculator, diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index aa9015dc0fed..218a42fdea87 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -18,6 +18,7 @@ package logic import ( "fmt" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "math" "testing" @@ -329,7 +330,7 @@ func TestUpdateResourceRequests(t *testing.T) { recommendationProvider := &recommendationProvider{ vpaLister: vpaLister, - recommendationProcessor: api.NewCappingRecommendationProcessor(), + recommendationProcessor: api.NewCappingRecommendationProcessor(limitrange.NewNoopLimitsCalculator()), selectorFetcher: mockSelectorFetcher, limitsRangeCalculator: &fakeLimitRangeCalculator{ tc.limitRange, diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index 5ceb29a025aa..57c9542d068c 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "net/http" "strings" @@ -38,11 +39,11 @@ import ( type AdmissionServer struct { recommendationProvider RecommendationProvider podPreProcessor PodPreProcessor - limitsChecker LimitRangeCalculator + limitsChecker limitrange.LimitRangeCalculator } // NewAdmissionServer constructs new AdmissionServer -func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, limitsChecker LimitRangeCalculator) *AdmissionServer { +func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, limitsChecker limitrange.LimitRangeCalculator) *AdmissionServer { return &AdmissionServer{recommendationProvider, podPreProcessor, limitsChecker} } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go index 18e30f326128..dc994bf8bf2f 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -20,6 +20,7 @@ import ( "encoding/json" "fmt" "github.com/stretchr/testify/assert" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "strings" "testing" @@ -311,7 +312,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { t.Run(fmt.Sprintf("test case: %s", tc.name), func(t *testing.T) { fppp := fakePodPreProcessor{e: tc.preProcessorError} frp := fakeRecommendationProvider{tc.recommendResources, tc.recommendAnnotations, tc.recommendName, tc.recommendError} - lc := NewNoopLimitsCalculator() + lc := limitrange.NewNoopLimitsCalculator() s := NewAdmissionServer(&frp, &fppp, lc) patches, err := s.getPatchesForPodResourceRequest(tc.podJson, tc.namespace) if tc.expectError == nil { @@ -359,7 +360,7 @@ func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { }`) recommendAnnotations := vpa_api_util.ContainerToAnnotationsMap{} frp := fakeRecommendationProvider{recommendResources, recommendAnnotations, "name", nil} - lc := NewNoopLimitsCalculator() + lc := limitrange.NewNoopLimitsCalculator() s := NewAdmissionServer(&frp, &fppp, lc) patches, err := s.getPatchesForPodResourceRequest(podJson, "default") assert.NoError(t, err) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/main.go b/vertical-pod-autoscaler/pkg/admission-controller/main.go index 7eefcd8f8f13..80e0ad7dd4a0 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/main.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/main.go @@ -19,6 +19,7 @@ package main import ( "flag" "fmt" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "net/http" "os" "time" @@ -80,15 +81,15 @@ func main() { target.NewBeta1TargetSelectorFetcher(config), ) podPreprocessor := logic.NewDefaultPodPreProcessor() - var limitsChecker logic.LimitRangeCalculator - limitsChecker, err = logic.NewLimitsRangeCalculator(factory) + var limitRangeCalculator limitrange.LimitRangeCalculator + limitRangeCalculator, err = limitrange.NewLimitsRangeCalculator(factory) if err != nil { - klog.Errorf("Failed to create limitsChecker, falling back to not checking limits. Error message: %s", err) - limitsChecker = logic.NewNoopLimitsCalculator() + klog.Errorf("Failed to create limitRangeCalculator, falling back to not checking limits. Error message: %s", err) + limitRangeCalculator = limitrange.NewNoopLimitsCalculator() } - recommendationProvider := logic.NewRecommendationProvider(limitsChecker, vpa_api_util.NewCappingRecommendationProcessor(), targetSelectorFetcher, vpaLister) + recommendationProvider := logic.NewRecommendationProvider(limitRangeCalculator, vpa_api_util.NewCappingRecommendationProcessor(limitRangeCalculator), targetSelectorFetcher, vpaLister) - as := logic.NewAdmissionServer(recommendationProvider, podPreprocessor, limitsChecker) + as := logic.NewAdmissionServer(recommendationProvider, podPreprocessor, limitRangeCalculator) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { as.Serve(w, r) healthCheck.UpdateLastActivity() diff --git a/vertical-pod-autoscaler/pkg/updater/main.go b/vertical-pod-autoscaler/pkg/updater/main.go index f82eb96c294b..5826e0d1b832 100644 --- a/vertical-pod-autoscaler/pkg/updater/main.go +++ b/vertical-pod-autoscaler/pkg/updater/main.go @@ -18,6 +18,7 @@ package main import ( "flag" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "time" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target" @@ -71,8 +72,14 @@ func main() { target.NewVpaTargetSelectorFetcher(config, kubeClient, factory), target.NewBeta1TargetSelectorFetcher(config), ) + var limitRangeCalculator limitrange.LimitRangeCalculator + limitRangeCalculator, err = limitrange.NewLimitsRangeCalculator(factory) + if err != nil { + klog.Errorf("Failed to create limitRangeCalculator, falling back to not checking limits. Error message: %s", err) + limitRangeCalculator = limitrange.NewNoopLimitsCalculator() + } // TODO: use SharedInformerFactory in updater - updater, err := updater.NewUpdater(kubeClient, vpaClient, *minReplicas, *evictionToleranceFraction, vpa_api_util.NewCappingRecommendationProcessor(), nil, targetSelectorFetcher) + updater, err := updater.NewUpdater(kubeClient, vpaClient, *minReplicas, *evictionToleranceFraction, vpa_api_util.NewCappingRecommendationProcessor(limitRangeCalculator), nil, targetSelectorFetcher) if err != nil { klog.Fatalf("Failed to create updater: %v", err) } diff --git a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go index add40387fb7f..cbc184f8796f 100644 --- a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go +++ b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package logic +package limitrange import ( "fmt" diff --git a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go index c7f6a979db8f..d7e65e4d0f6c 100644 --- a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go +++ b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package logic +package limitrange import ( "testing" diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index 0703e90123d3..a8b8bfed3953 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -18,18 +18,17 @@ package api import ( "fmt" - apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" - + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "k8s.io/klog" ) // NewCappingRecommendationProcessor constructs new RecommendationsProcessor that adjusts recommendation // for given pod to obey VPA resources policy and container limits -func NewCappingRecommendationProcessor() RecommendationProcessor { - return &cappingRecommendationProcessor{} +func NewCappingRecommendationProcessor(limitsRangeCalculator limitrange.LimitRangeCalculator) RecommendationProcessor { + return &cappingRecommendationProcessor{limitsRangeCalculator: limitsRangeCalculator} } type cappingAction string @@ -43,7 +42,9 @@ func toCappingAnnotation(resourceName apiv1.ResourceName, action cappingAction) return fmt.Sprintf("%s %s", resourceName, action) } -type cappingRecommendationProcessor struct{} +type cappingRecommendationProcessor struct { + limitsRangeCalculator limitrange.LimitRangeCalculator +} // Apply returns a recommendation for the given pod, adjusted to obey policy and limits. func (c *cappingRecommendationProcessor) Apply( @@ -51,6 +52,7 @@ func (c *cappingRecommendationProcessor) Apply( policy *vpa_types.PodResourcePolicy, conditions []vpa_types.VerticalPodAutoscalerCondition, pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) { + // TODO: Annotate if request enforced by maintaining proportion with limit and allowed limit range is in conflict with policy. if podRecommendation == nil && policy == nil { // If there is no recommendation and no policies have been defined then no recommendation can be computed. @@ -62,7 +64,11 @@ func (c *cappingRecommendationProcessor) Apply( } updatedRecommendations := []vpa_types.RecommendedContainerResources{} containerToAnnotationsMap := ContainerToAnnotationsMap{} - for _, containerRecommendation := range podRecommendation.ContainerRecommendations { + limitAdjustedRecommendation, err := c.capProportionallyToMaxLimit(podRecommendation, pod, containerToAnnotationsMap) + if err != nil { + return nil, nil, err + } + for _, containerRecommendation := range limitAdjustedRecommendation { container := getContainer(containerRecommendation.ContainerName, pod) if container == nil { @@ -84,6 +90,53 @@ func (c *cappingRecommendationProcessor) Apply( return &vpa_types.RecommendedPodResources{ContainerRecommendations: updatedRecommendations}, containerToAnnotationsMap, nil } +func capSingleRecommendationProportionallyToMaxLimit(recommendation apiv1.ResourceList, container apiv1.Container, podLimitRange *apiv1.LimitRangeItem, containerToAnnotationsMap ContainerToAnnotationsMap) apiv1.ResourceList { + defaultCpu := podLimitRange.Default.Cpu() + defaultMem := podLimitRange.Default.Memory() + maxCpuLimit := podLimitRange.Max.Cpu() + maxMemLimit := podLimitRange.Max.Memory() + cpuLimit, _ := GetProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), recommendation.Cpu(), defaultCpu) + memLimit, _ := GetProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), recommendation.Memory(), defaultMem) + capped := ProportionallyCapResourcesToMaxLimit(recommendation, cpuLimit, memLimit, maxCpuLimit, maxMemLimit) + return apiv1.ResourceList{ + apiv1.ResourceCPU: *capped.Requests.Cpu(), + apiv1.ResourceMemory: *capped.Requests.Memory(), + } +} + +func (c *cappingRecommendationProcessor) capProportionallyToMaxLimit(podRecommendation *vpa_types.RecommendedPodResources, pod *apiv1.Pod, containerToAnnotationsMap ContainerToAnnotationsMap) ([]vpa_types.RecommendedContainerResources, error) { + podLimitRange, err := c.limitsRangeCalculator.GetContainerLimitRangeItem(pod.Namespace) + if err != nil { + return nil, fmt.Errorf("error obtaining limit range: %s", err) + } + if podLimitRange == nil { + return podRecommendation.ContainerRecommendations, nil + } + + updatedRecommendations := []vpa_types.RecommendedContainerResources{} + + for _, container := range pod.Spec.Containers { + recommendation := GetRecommendationForContainer(container.Name, podRecommendation) + if recommendation == nil { + klog.V(2).Infof("no matching recommendation found for container %s", container.Name) + continue + } + + scaledRecommendation := recommendation.DeepCopy() + scaledRecommendation.LowerBound = capSingleRecommendationProportionallyToMaxLimit(recommendation.LowerBound, container, podLimitRange, containerToAnnotationsMap) + scaledRecommendation.Target = capSingleRecommendationProportionallyToMaxLimit(recommendation.Target, container, podLimitRange, containerToAnnotationsMap) + scaledRecommendation.UpperBound = capSingleRecommendationProportionallyToMaxLimit(recommendation.UpperBound, container, podLimitRange, containerToAnnotationsMap) + if scaledRecommendation.Target.Cpu().MilliValue() != recommendation.Target.Cpu().MilliValue() { + containerToAnnotationsMap[container.Name] = append(containerToAnnotationsMap[container.Name], "changed CPU limit to fit within limit range") + } + if scaledRecommendation.Target.Memory().Value() != recommendation.Target.Memory().Value() { + containerToAnnotationsMap[container.Name] = append(containerToAnnotationsMap[container.Name], "changed memory limit to fit within limit range") + } + updatedRecommendations = append(updatedRecommendations, *scaledRecommendation) + } + return updatedRecommendations, nil +} + // getCappedRecommendationForContainer returns a recommendation for the given container, adjusted to obey policy and limits. func getCappedRecommendationForContainer( container apiv1.Container, diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index ad80dff1f5dc..6fa52d1f6257 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -41,7 +41,7 @@ func TestRecommendationNotAvailable(t *testing.T) { } policy := vpa_types.PodResourcePolicy{} - res, annotations, err := NewCappingRecommendationProcessor().Apply(&podRecommendation, &policy, nil, pod) + res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &policy, nil, pod) assert.Nil(t, err) assert.Empty(t, annotations) assert.Empty(t, res.ContainerRecommendations) @@ -84,7 +84,7 @@ func TestRecommendationCappedToMinMaxPolicy(t *testing.T) { }, } - res, annotations, err := NewCappingRecommendationProcessor().Apply(&podRecommendation, &policy, nil, pod) + res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &policy, nil, pod) assert.Nil(t, err) assert.Equal(t, apiv1.ResourceList{ apiv1.ResourceCPU: *resource.NewScaledQuantity(40, 1), @@ -146,7 +146,7 @@ func TestApply(t *testing.T) { pod := test.Pod().WithName("pod1").AddContainer(test.BuildTestContainer("ctr-name", "", "")).Get() for _, testCase := range applyTestCases { - res, _, err := NewCappingRecommendationProcessor().Apply( + res, _, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply( testCase.PodRecommendation, testCase.Policy, nil, pod) assert.Equal(t, testCase.ExpectedPodRecommendation, res) assert.Equal(t, testCase.ExpectedError, err) @@ -215,3 +215,77 @@ func TestApplyVpa(t *testing.T) { apiv1.ResourceMemory: *resource.NewScaledQuantity(4500, 1), }, res.ContainerRecommendations[0].UpperBound) } + +type fakeLimitRangeCalculator struct { + limitRange apiv1.LimitRangeItem +} + +func (nlrc *fakeLimitRangeCalculator) GetContainerLimitRangeItem(namespace string) (*apiv1.LimitRangeItem, error) { + return &nlrc.limitRange, nil +} + +func TestApplyCapsToLimitRange(t *testing.T) { + limitRange := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + } + recommendation := vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("2"), + apiv1.ResourceMemory: resource.MustParse("10G"), + }, + }, + }, + } + pod := apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Name: "container", + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + }, + }, + }, + } + expectedRecommendation := vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container", + LowerBound: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(0, resource.BinarySI), + }, + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1000m"), + apiv1.ResourceMemory: resource.MustParse("1000000000000m"), + }, + UpperBound: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI), + apiv1.ResourceMemory: *resource.NewQuantity(0, resource.BinarySI), + }, + }, + }, + } + + calculator := fakeLimitRangeCalculator{limitRange} + processor := NewCappingRecommendationProcessor(&calculator) + processedRecommendation, annotations, err := processor.Apply(&recommendation, nil, nil, &pod) + assert.NoError(t, err) + assert.Equal(t, map[string][]string{"container": {"changed CPU limit to fit within limit range", "changed memory limit to fit within limit range"}}, annotations) + assert.Equal(t, expectedRecommendation, *processedRecommendation) +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go index 5f2e19e74e75..2d4c5396aaf6 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go @@ -58,7 +58,7 @@ func GetProportionalLimit(originalLimit, originalRequest, recommendedRequest, de return &result, "" } result, capped := scaleQuantityProportionally( /*scaledQuantity=*/ originalLimit /*scaleBase=*/, originalRequest /*scaleResult=*/, recommendedRequest) - if capped { + if !capped { return result, "" } return result, fmt.Sprintf( @@ -92,7 +92,7 @@ func proportionallyCapLimitToMax(recommendedRequest, recommendedLimit, maxLimit return scaledRequest, maxLimit } -// ProportionallyCapResourcesToMaxLimit caps CPU and memory limit to maximu and scales requests to maintain limit/request ratio. +// ProportionallyCapResourcesToMaxLimit caps CPU and memory limit to maximum and scales requests to maintain limit/request ratio. func ProportionallyCapResourcesToMaxLimit(recommendedRequests v1.ResourceList, cpuLimit, memLimit, maxCpuLimit, maxMemLimit *resource.Quantity) ContainerResources { scaledCpuRequest, scaledCpuLimit := proportionallyCapLimitToMax(recommendedRequests.Cpu(), cpuLimit, maxCpuLimit) scaledMemRequest, scaledMemLimit := proportionallyCapLimitToMax(recommendedRequests.Memory(), memLimit, maxMemLimit) diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/limit_proportion_maintaining.go b/vertical-pod-autoscaler/pkg/utils/vpa/limit_proportion_maintaining.go deleted file mode 100644 index 20a842d0e650..000000000000 --- a/vertical-pod-autoscaler/pkg/utils/vpa/limit_proportion_maintaining.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package api - -import ( - "fmt" - - apiv1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" - - "k8s.io/klog" -) - -// NewLimitProportionMaintainingRecommendationProcessor constructs new RecommendationsProcessor that adjusts recommendation -// for given pod to maintain proportion to limit and restrictions on limits -func NewLimitProportionMaintainingRecommendationProcessor() RecommendationProcessor { - return &limitProportionMaintainingRecommendationProcessor{} -} - -type limitProportionMaintainingRecommendationProcessor struct { - limitsRangeCalculator LimitRangeCalculator -} - -// Apply returns a recommendation for the given pod, adjusted to obey policy and limits. -func (c *limitProportionMaintainingRecommendationProcessor) Apply( - podRecommendation *vpa_types.RecommendedPodResources, - policy *vpa_types.PodResourcePolicy, - conditions []vpa_types.VerticalPodAutoscalerCondition, - pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) { -} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go b/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go index 5c2b332a3a74..c697ee735551 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/recommendation_processor.go @@ -30,7 +30,7 @@ type RecommendationProcessor interface { // VPA policy and possibly other internal RecommendationProcessor context. // Must return a non-nil pointer to RecommendedPodResources or error. Apply(podRecommendation *vpa_types.RecommendedPodResources, - _ *vpa_types.PodResourcePolicy, - _ []vpa_types.VerticalPodAutoscalerCondition, + policy *vpa_types.PodResourcePolicy, + conditions []vpa_types.VerticalPodAutoscalerCondition, pod *v1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) } From ad852e0bd7aec65d41b7445ffd0607fa308a6a14 Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Wed, 29 May 2019 10:49:31 +0200 Subject: [PATCH 09/25] e2e for VPA keeping limit to request ratio constant --- .../e2e/v1beta1/admission_controller.go | 37 ++++++++++++++++++- vertical-pod-autoscaler/e2e/v1beta1/common.go | 11 ++++++ .../e2e/v1beta2/admission_controller.go | 31 ++++++++++++++++ vertical-pod-autoscaler/e2e/v1beta2/common.go | 11 ++++++ 4 files changed, 89 insertions(+), 1 deletion(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index a250f3fdd5bb..28df6187af9a 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -63,7 +63,9 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ ContainerName: "hamster", @@ -88,6 +90,39 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) + ginkgo.It("keeps limits to request ratio constant", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 250m CPU and 200Mi of memory. Limits to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + ginkgo.It("caps request to max set in VPA", func() { d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index 98db1b4d0e08..40cbdafd41de 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -136,6 +136,17 @@ func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuan return d } +// NewHamsterDeploymentWithResourcesAndLimits creates a simple hamster deployment with specific +// resource requests and limits for e2e test purposes. +func NewHamsterDeploymentWithResourcesAndLimits(f *framework.Framework, cpuQuantityRequest, memoryQuantityRequest, cpuQuantityLimit, memoryQuantityLimit resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeploymentWithResources(f, cpuQuantityRequest, memoryQuantityRequest) + d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantityLimit, + apiv1.ResourceMemory: memoryQuantityLimit, + } + return d +} + // GetHamsterPods returns running hamster pods (matched by hamsterLabels) func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index cb7f4d92074f..a51562add069 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -85,6 +85,37 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) + ginkgo.It("keeps limits to request ratio constant", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 250m CPU and 200Mi of memory. Limits to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + ginkgo.It("caps request to max set in VPA", func() { d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index 93d33906e311..0c58690b4835 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -143,6 +143,17 @@ func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuan return d } +// NewHamsterDeploymentWithResourcesAndLimits creates a simple hamster deployment with specific +// resource requests and limits for e2e test purposes. +func NewHamsterDeploymentWithResourcesAndLimits(f *framework.Framework, cpuQuantityRequest, memoryQuantityRequest, cpuQuantityLimit, memoryQuantityLimit resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeploymentWithResources(f, cpuQuantityRequest, memoryQuantityRequest) + d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantityLimit, + apiv1.ResourceMemory: memoryQuantityLimit, + } + return d +} + // GetHamsterPods returns running hamster pods (matched by hamsterLabels) func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) From 246e8c2cd5151cacdcc08e0d4847ec1d2b1d6eb7 Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Wed, 29 May 2019 12:06:35 +0200 Subject: [PATCH 10/25] e2e test for VPA respecting LimitRange min and max --- .../e2e/v1beta1/admission_controller.go | 88 +++++++++++++++++++ vertical-pod-autoscaler/e2e/v1beta1/common.go | 54 ++++++++++++ .../e2e/v1beta2/admission_controller.go | 84 ++++++++++++++++++ vertical-pod-autoscaler/e2e/v1beta2/common.go | 54 ++++++++++++ 4 files changed, 280 insertions(+) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index 28df6187af9a..31a736168c90 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -17,6 +17,8 @@ limitations under the License. package autoscaling import ( + "fmt" + appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -123,6 +125,92 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) + ginkgo.It("caps request according to max limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + cpuLimit := "300m" + memLimit := "1T" + ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", cpuLimit, memLimit)) + // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while + // recommendation is 250m + // Max memory limit is 1T and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, cpuLimit, memLimit) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 200m CPU (as this is the recommendation + // capped according to max limit in LimitRange) and 200Mi of memory, + // which is uncapped. Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("raises request according to min limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ + MatchLabels: d.Spec.Template.Labels, + }) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }}, + } + InstallVPA(f, vpaCRD) + + cpuLimit := "75m" + memLimit := "250Mi" + ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", cpuLimit, memLimit)) + // Min CPU limit is 75m and ratio is 1.5, so min request is 50m + // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while + // recommendation is 100Mi. + InstallLimitRangeWithMin(f, cpuLimit, memLimit) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 200Mi of memory, but admission controller + // should change it to 250m CPU and 125Mi of memory, since this is the lowest + // request that limitrange allows. + // Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + ginkgo.It("caps request to max set in VPA", func() { d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index 40cbdafd41de..a01b0d69e992 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -353,3 +353,57 @@ func WaitForConditionPresent(c *vpa_clientset.Clientset, vpa *vpa_types.Vertical return false }) } + +func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity) { + lr := &apiv1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: "hamster-lr", + }, + Spec: apiv1.LimitRangeSpec{ + Limits: []apiv1.LimitRangeItem{}, + }, + } + + if maxMemoryLimit != nil || maxCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{}, + } + if maxCpuLimit != nil { + lrItem.Max[apiv1.ResourceCPU] = *maxCpuLimit + } + if maxMemoryLimit != nil { + lrItem.Max[apiv1.ResourceMemory] = *maxMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + + if minMemoryLimit != nil || minCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{}, + } + if minCpuLimit != nil { + lrItem.Min[apiv1.ResourceCPU] = *minCpuLimit + } + if minMemoryLimit != nil { + lrItem.Min[apiv1.ResourceMemory] = *minMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) { + maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) + maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) + installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity) +} + +func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) { + minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) + minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) + installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil) +} diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index a51562add069..e218869e02e1 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -17,6 +17,8 @@ limitations under the License. package autoscaling import ( + "fmt" + appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" @@ -116,6 +118,88 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) + ginkgo.It("caps request according to max limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + InstallVPA(f, vpaCRD) + + cpuLimit := "300m" + memLimit := "1T" + ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", cpuLimit, memLimit)) + // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while + // recommendation is 250m + // Max memory limit is 1T and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, cpuLimit, memLimit) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 200m CPU (as this is the recommendation + // capped according to max limit in LimitRange) and 200Mi of memory, + // which is uncapped. Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("raises request according to min limit set in LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }}, + } + InstallVPA(f, vpaCRD) + + cpuLimit := "75m" + memLimit := "250Mi" + ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", cpuLimit, memLimit)) + // Min CPU limit is 75m and ratio is 1.5, so min request is 50m + // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while + // recommendation is 100Mi. + InstallLimitRangeWithMin(f, cpuLimit, memLimit) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 200Mi of memory, but admission controller + // should change it to 250m CPU and 125Mi of memory, since this is the lowest + // request that limitrange allows. + // Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + ginkgo.It("caps request to max set in VPA", func() { d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index 0c58690b4835..bafc2618cd9f 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -346,3 +346,57 @@ func WaitForRecommendationPresent(c *vpa_clientset.Clientset, vpa *vpa_types.Ver return vpa.Status.Recommendation != nil && len(vpa.Status.Recommendation.ContainerRecommendations) != 0 }) } + +func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity) { + lr := &apiv1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: f.Namespace.Name, + Name: "hamster-lr", + }, + Spec: apiv1.LimitRangeSpec{ + Limits: []apiv1.LimitRangeItem{}, + }, + } + + if maxMemoryLimit != nil || maxCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{}, + } + if maxCpuLimit != nil { + lrItem.Max[apiv1.ResourceCPU] = *maxCpuLimit + } + if maxMemoryLimit != nil { + lrItem.Max[apiv1.ResourceMemory] = *maxMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + + if minMemoryLimit != nil || minCpuLimit != nil { + lrItem := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Max: apiv1.ResourceList{}, + } + if minCpuLimit != nil { + lrItem.Min[apiv1.ResourceCPU] = *minCpuLimit + } + if minMemoryLimit != nil { + lrItem.Min[apiv1.ResourceMemory] = *minMemoryLimit + } + lr.Spec.Limits = append(lr.Spec.Limits, lrItem) + } + _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) +} + +func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) { + maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) + maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) + installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity) +} + +func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) { + minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) + minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) + installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil) +} From ae49381c36e542de5c37c77f90e4131ed56828dd Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Wed, 29 May 2019 13:29:38 +0200 Subject: [PATCH 11/25] e2e test for VPA updater observing limit range --- .../e2e/v1beta1/admission_controller.go | 16 +++----- vertical-pod-autoscaler/e2e/v1beta1/common.go | 4 ++ .../e2e/v1beta1/updater.go | 38 +++++++++++++++++++ .../e2e/v1beta2/admission_controller.go | 16 +++----- vertical-pod-autoscaler/e2e/v1beta2/common.go | 4 ++ .../e2e/v1beta2/updater.go | 38 +++++++++++++++++++ 6 files changed, 96 insertions(+), 20 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index 31a736168c90..d40687bb7fca 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -17,8 +17,6 @@ limitations under the License. package autoscaling import ( - "fmt" - appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -145,13 +143,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } InstallVPA(f, vpaCRD) - cpuLimit := "300m" - memLimit := "1T" - ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", cpuLimit, memLimit)) // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while // recommendation is 250m // Max memory limit is 1T and ratio is 2., so max request is 0.5T - InstallLimitRangeWithMax(f, cpuLimit, memLimit) + InstallLimitRangeWithMax(f, "300m", "1T") ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -163,6 +158,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("300m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("1T"))) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } @@ -188,13 +185,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } InstallVPA(f, vpaCRD) - cpuLimit := "75m" - memLimit := "250Mi" - ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", cpuLimit, memLimit)) // Min CPU limit is 75m and ratio is 1.5, so min request is 50m // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while // recommendation is 100Mi. - InstallLimitRangeWithMin(f, cpuLimit, memLimit) + InstallLimitRangeWithMin(f, "75m", "250Mi") ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -206,6 +200,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("75m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("250Mi"))) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index a01b0d69e992..4780be450db4 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -396,13 +396,17 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC gomega.Expect(err).NotTo(gomega.HaveOccurred()) } +// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory. func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit)) maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity) } +// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory. func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit)) minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/updater.go b/vertical-pod-autoscaler/e2e/v1beta1/updater.go index 33b66e1d49d2..d61949fd5ee3 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/updater.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/updater.go @@ -98,6 +98,44 @@ var _ = UpdaterE2eDescribe("Updater", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue()) }) + + ginkgo.It("observes max in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "200m", vpa_types.UpdateModeAuto) + + // Max CPU limit is 300m and ratio is 3., so max request is 100m, while + // recommendation is 200m + // Max memory limit is 1T and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, "300m", "1T") + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) + + ginkgo.It("observes min in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "50m", vpa_types.UpdateModeAuto) + + // Min CPU limit is 300m and ratio is 3., so min request is 100m, while + // recommendation is 200m + // Min memory limit is 0 and ratio is 2., so min request is 0 + InstallLimitRangeWithMin(f, "300m", "0") + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) }) func testEvictsPods(f *framework.Framework, controllerKind string) { diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index e218869e02e1..98d2306c4a78 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -17,8 +17,6 @@ limitations under the License. package autoscaling import ( - "fmt" - appsv1 "k8s.io/api/apps/v1" apiv1 "k8s.io/api/core/v1" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" @@ -136,13 +134,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } InstallVPA(f, vpaCRD) - cpuLimit := "300m" - memLimit := "1T" - ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", cpuLimit, memLimit)) // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while // recommendation is 250m // Max memory limit is 1T and ratio is 2., so max request is 0.5T - InstallLimitRangeWithMax(f, cpuLimit, memLimit) + InstallLimitRangeWithMax(f, "300m", "1T") ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -154,6 +149,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("300m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("1T"))) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } @@ -177,13 +174,10 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } InstallVPA(f, vpaCRD) - cpuLimit := "75m" - memLimit := "250Mi" - ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", cpuLimit, memLimit)) // Min CPU limit is 75m and ratio is 1.5, so min request is 50m // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while // recommendation is 100Mi. - InstallLimitRangeWithMin(f, cpuLimit, memLimit) + InstallLimitRangeWithMin(f, "75m", "250Mi") ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -195,6 +189,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("75m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("250Mi"))) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index bafc2618cd9f..b3980f7c9b83 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -389,13 +389,17 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC gomega.Expect(err).NotTo(gomega.HaveOccurred()) } +// InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory. func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit)) maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity) } +// InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory. func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) { + ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit)) minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/updater.go b/vertical-pod-autoscaler/e2e/v1beta2/updater.go index 67f9edb202d6..89b8492a726b 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/updater.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/updater.go @@ -119,6 +119,44 @@ var _ = UpdaterE2eDescribe("Updater", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue()) }) + + ginkgo.It("observes max in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef) + + // Max CPU limit is 300m and ratio is 3., so max request is 100m, while + // recommendation is 200m + // Max memory limit is 1T and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, "300m", "1T") + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) + + ginkgo.It("observes min in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPA(f, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef) + + // Min CPU limit is 300m and ratio is 3., so min request is 100m, while + // recommendation is 200m + // Min memory limit is 0 and ratio is 2., so min request is 0 + InstallLimitRangeWithMin(f, "300m", "0") + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) }) func testEvictsPods(f *framework.Framework, controller *autoscaling.CrossVersionObjectReference) { From 30c2f8ce9d66df4a7c1d20985212d776b6070c92 Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Thu, 30 May 2019 09:50:25 +0200 Subject: [PATCH 12/25] Fix initializing min in limitrange --- vertical-pod-autoscaler/e2e/v1beta1/common.go | 2 +- vertical-pod-autoscaler/e2e/v1beta2/common.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index 4780be450db4..fe516f4a77bf 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -382,7 +382,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC if minMemoryLimit != nil || minCpuLimit != nil { lrItem := apiv1.LimitRangeItem{ Type: apiv1.LimitTypeContainer, - Max: apiv1.ResourceList{}, + Min: apiv1.ResourceList{}, } if minCpuLimit != nil { lrItem.Min[apiv1.ResourceCPU] = *minCpuLimit diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index b3980f7c9b83..c5015eaba183 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -375,7 +375,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC if minMemoryLimit != nil || minCpuLimit != nil { lrItem := apiv1.LimitRangeItem{ Type: apiv1.LimitTypeContainer, - Max: apiv1.ResourceList{}, + Min: apiv1.ResourceList{}, } if minCpuLimit != nil { lrItem.Min[apiv1.ResourceCPU] = *minCpuLimit From 44b277c6ec3afb4853727428036023a251a37fd5 Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Thu, 30 May 2019 09:46:02 +0200 Subject: [PATCH 13/25] Support multiple limitrange items --- .../limitrange/limit_range_calculator.go | 55 ++++++++++++++--- .../limitrange/limit_range_calculator_test.go | 61 ++++++++++++++++--- .../pkg/utils/test/test_limit_range.go | 57 ++++++++++------- 3 files changed, 137 insertions(+), 36 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go index cbc184f8796f..7b666c83e7ff 100644 --- a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go +++ b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go @@ -19,7 +19,8 @@ package limitrange import ( "fmt" - "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/informers" listers "k8s.io/client-go/listers/core/v1" @@ -28,12 +29,12 @@ import ( // LimitRangeCalculator calculates limit range items that has the same effect as all limit range items present in the cluster. type LimitRangeCalculator interface { // GetContainerLimitRangeItem returns LimitRangeItem that describes limitation on container limits in the given namespace. - GetContainerLimitRangeItem(namespace string) (*v1.LimitRangeItem, error) + GetContainerLimitRangeItem(namespace string) (*core.LimitRangeItem, error) } type noopLimitsRangeCalculator struct{} -func (lc *noopLimitsRangeCalculator) GetContainerLimitRangeItem(namespace string) (*v1.LimitRangeItem, error) { +func (lc *noopLimitsRangeCalculator) GetContainerLimitRangeItem(namespace string) (*core.LimitRangeItem, error) { return nil, nil } @@ -64,19 +65,59 @@ func NewNoopLimitsCalculator() *noopLimitsRangeCalculator { return &noopLimitsRangeCalculator{} } -func (lc *limitsChecker) GetContainerLimitRangeItem(namespace string) (*v1.LimitRangeItem, error) { +func (lc *limitsChecker) GetContainerLimitRangeItem(namespace string) (*core.LimitRangeItem, error) { limitRanges, err := lc.limitRangeLister.LimitRanges(namespace).List(labels.Everything()) if err != nil { return nil, fmt.Errorf("error loading limit ranges: %s", err) } + updatedResult := func(result core.ResourceList, lrItem core.ResourceList, + resourceName core.ResourceName, picker func(q1, q2 resource.Quantity) resource.Quantity) core.ResourceList { + if lrItem == nil { + return result + } + if result == nil { + return lrItem.DeepCopy() + } + if lrResource, lrHas := lrItem[resourceName]; lrHas { + resultResource, resultHas := result[resourceName] + if !resultHas { + result[resourceName] = lrResource.DeepCopy() + } else { + result[resourceName] = picker(resultResource, lrResource) + } + } + return result + } + pickLowerMax := func(q1, q2 resource.Quantity) resource.Quantity { + if q1.Cmp(q2) < 0 { + return q1 + } + return q2 + } + chooseHigherMin := func(q1, q2 resource.Quantity) resource.Quantity { + if q1.Cmp(q2) > 0 { + return q1 + } + return q2 + } + + result := &core.LimitRangeItem{Type: core.LimitTypeContainer} for _, lr := range limitRanges { for _, lri := range lr.Spec.Limits { - if lri.Type == v1.LimitTypeContainer && (lri.Max != nil || lri.Default != nil) { - // TODO: handle multiple limit ranges matching a pod. - return &lri, nil + if lri.Type == core.LimitTypeContainer && (lri.Max != nil || lri.Default != nil || lri.Min != nil) { + if lri.Default != nil { + result.Default = lri.Default + } + result.Max = updatedResult(result.Max, lri.Max, core.ResourceCPU, pickLowerMax) + result.Max = updatedResult(result.Max, lri.Max, core.ResourceMemory, pickLowerMax) + result.Min = updatedResult(result.Min, lri.Min, core.ResourceCPU, chooseHigherMin) + result.Min = updatedResult(result.Min, lri.Min, core.ResourceMemory, chooseHigherMin) } } } + if result.Min != nil || result.Max != nil || result.Default != nil { + return result, nil + } return nil, nil } diff --git a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go index d7e65e4d0f6c..9e06f5c99938 100644 --- a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go +++ b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator_test.go @@ -20,6 +20,7 @@ import ( "testing" apiv1 "k8s.io/api/core/v1" + core "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" "k8s.io/client-go/informers" @@ -28,11 +29,11 @@ import ( "github.com/stretchr/testify/assert" ) -const defaultNamespace = "default" +const testNamespace = "test-namespace" func TestNewNoopLimitsChecker(t *testing.T) { nlc := NewNoopLimitsCalculator() - limitRange, err := nlc.GetContainerLimitRangeItem(defaultNamespace) + limitRange, err := nlc.GetContainerLimitRangeItem(testNamespace) assert.NoError(t, err) assert.Nil(t, limitRange) } @@ -43,15 +44,17 @@ func TestNoLimitRange(t *testing.T) { lc, err := NewLimitsRangeCalculator(factory) if assert.NoError(t, err) { - limitRange, err := lc.GetContainerLimitRangeItem(defaultNamespace) + limitRange, err := lc.GetContainerLimitRangeItem(testNamespace) assert.NoError(t, err) assert.Nil(t, limitRange) } } func TestGetContainerLimitRangeItem(t *testing.T) { - containerLimitRangeWithMax := test.LimitRange().WithName("default").WithNamespace(defaultNamespace).WithType(apiv1.LimitTypeContainer).WithMax(test.Resources("2", "2")).Get() - containerLimitRangeWithDefault := test.LimitRange().WithName("default").WithNamespace(defaultNamespace).WithType(apiv1.LimitTypeContainer).WithDefault(test.Resources("2", "2")).Get() + baseContainerLimitRange := test.LimitRange().WithName("test-lr").WithNamespace(testNamespace).WithType(apiv1.LimitTypeContainer) + containerLimitRangeWithMax := baseContainerLimitRange.WithMax(test.Resources("2", "2")).Get() + containerLimitRangeWithDefault := baseContainerLimitRange.WithDefault(test.Resources("2", "2")).Get() + containerLimitRangeWithMin := baseContainerLimitRange.WithMin(test.Resources("2", "2")).Get() testCases := []struct { name string limitRanges []runtime.Object @@ -62,7 +65,7 @@ func TestGetContainerLimitRangeItem(t *testing.T) { name: "no matching limit ranges", limitRanges: []runtime.Object{ test.LimitRange().WithName("different-namespace").WithNamespace("different").WithType(apiv1.LimitTypeContainer).WithMax(test.Resources("2", "2")).Get(), - test.LimitRange().WithName("differen-type").WithNamespace(defaultNamespace).WithType(apiv1.LimitTypePersistentVolumeClaim).WithMax(test.Resources("2", "2")).Get(), + test.LimitRange().WithName("different-type").WithNamespace(testNamespace).WithType(apiv1.LimitTypePersistentVolumeClaim).WithMax(test.Resources("2", "2")).Get(), }, expectedErr: nil, expectedLimits: nil, @@ -83,6 +86,50 @@ func TestGetContainerLimitRangeItem(t *testing.T) { expectedErr: nil, expectedLimits: &containerLimitRangeWithDefault.Spec.Limits[0], }, + { + name: "respects min", + limitRanges: []runtime.Object{ + containerLimitRangeWithMin, + }, + expectedErr: nil, + expectedLimits: &containerLimitRangeWithMin.Spec.Limits[0], + }, + { + name: "multiple items", + limitRanges: []runtime.Object{ + baseContainerLimitRange.WithMax(test.Resources("2", "2")).WithDefault(test.Resources("1.5", "1.5")). + WithMin(test.Resources("1", "1")).Get(), + }, + expectedErr: nil, + expectedLimits: &core.LimitRangeItem{ + Type: core.LimitTypeContainer, + Min: test.Resources("1", "1"), + Max: test.Resources("2", "2"), + Default: test.Resources("1.5", "1.5"), + }, + }, + { + name: "takes lowest max", + limitRanges: []runtime.Object{ + baseContainerLimitRange.WithMax(test.Resources("1.5", "1.5")).WithMax(test.Resources("2.", "2.")).Get(), + }, + expectedErr: nil, + expectedLimits: &core.LimitRangeItem{ + Type: core.LimitTypeContainer, + Max: test.Resources("1.5", "1.5"), + }, + }, + { + name: "takes highest min", + limitRanges: []runtime.Object{ + baseContainerLimitRange.WithMin(test.Resources("1.5", "1.5")).WithMin(test.Resources("1.", "1.")).Get(), + }, + expectedErr: nil, + expectedLimits: &core.LimitRangeItem{ + Type: core.LimitTypeContainer, + Min: test.Resources("1.5", "1.5"), + }, + }, } for _, tc := range testCases { @@ -91,7 +138,7 @@ func TestGetContainerLimitRangeItem(t *testing.T) { factory := informers.NewSharedInformerFactory(cs, 0) lc, err := NewLimitsRangeCalculator(factory) if assert.NoError(t, err) { - limitRange, err := lc.GetContainerLimitRangeItem(defaultNamespace) + limitRange, err := lc.GetContainerLimitRangeItem(testNamespace) if tc.expectedErr == nil { assert.NoError(t, err) } else { diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go b/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go index e62efe059bfe..52cbc0e271dc 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_limit_range.go @@ -17,8 +17,8 @@ limitations under the License. package test import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) // LimitRange returns an object that helps build a LimitRangeItem object for tests. @@ -29,9 +29,10 @@ func LimitRange() *limitRangeBuilder { type limitRangeBuilder struct { namespace string name string - rangeType v1.LimitType - defaultValues *v1.ResourceList - max *v1.ResourceList + rangeType core.LimitType + defaultValues []*core.ResourceList + maxValues []*core.ResourceList + minValues []*core.ResourceList } func (lrb *limitRangeBuilder) WithName(name string) *limitRangeBuilder { @@ -46,46 +47,58 @@ func (lrb *limitRangeBuilder) WithNamespace(namespace string) *limitRangeBuilder return &result } -func (lrb *limitRangeBuilder) WithType(rangeType v1.LimitType) *limitRangeBuilder { +func (lrb *limitRangeBuilder) WithType(rangeType core.LimitType) *limitRangeBuilder { result := *lrb result.rangeType = rangeType return &result } -func (lrb *limitRangeBuilder) WithDefault(defaultValues v1.ResourceList) *limitRangeBuilder { +func (lrb *limitRangeBuilder) WithDefault(defaultValues core.ResourceList) *limitRangeBuilder { result := *lrb - result.defaultValues = &defaultValues + result.defaultValues = append(result.defaultValues, &defaultValues) return &result } -func (lrb *limitRangeBuilder) WithMax(max v1.ResourceList) *limitRangeBuilder { +func (lrb *limitRangeBuilder) WithMax(max core.ResourceList) *limitRangeBuilder { result := *lrb - result.max = &max + result.maxValues = append(result.maxValues, &max) return &result } -func (lrb *limitRangeBuilder) Get() *v1.LimitRange { - result := v1.LimitRange{ - ObjectMeta: metav1.ObjectMeta{ +func (lrb *limitRangeBuilder) WithMin(min core.ResourceList) *limitRangeBuilder { + result := *lrb + result.minValues = append(result.minValues, &min) + return &result +} + +func (lrb *limitRangeBuilder) Get() *core.LimitRange { + result := core.LimitRange{ + ObjectMeta: meta.ObjectMeta{ Namespace: lrb.namespace, Name: lrb.name, }, } - if lrb.defaultValues != nil || lrb.max != nil { - result.Spec = v1.LimitRangeSpec{ - Limits: []v1.LimitRangeItem{}, + if len(lrb.defaultValues) > 0 || len(lrb.maxValues) > 0 || len(lrb.minValues) > 0 { + result.Spec = core.LimitRangeSpec{ + Limits: []core.LimitRangeItem{}, } } - if lrb.defaultValues != nil { - result.Spec.Limits = append(result.Spec.Limits, v1.LimitRangeItem{ + for _, v := range lrb.defaultValues { + result.Spec.Limits = append(result.Spec.Limits, core.LimitRangeItem{ Type: lrb.rangeType, - Default: *lrb.defaultValues, + Default: *v, + }) + } + for _, v := range lrb.maxValues { + result.Spec.Limits = append(result.Spec.Limits, core.LimitRangeItem{ + Type: lrb.rangeType, + Max: *v, }) } - if lrb.max != nil { - result.Spec.Limits = append(result.Spec.Limits, v1.LimitRangeItem{ + for _, v := range lrb.minValues { + result.Spec.Limits = append(result.Spec.Limits, core.LimitRangeItem{ Type: lrb.rangeType, - Max: *lrb.max, + Min: *v, }) } return &result From 3b82ab5393e8416e4cf31a834400fd57038a23f0 Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Thu, 30 May 2019 12:06:55 +0200 Subject: [PATCH 14/25] Add LimitRange to VPA rbac Conflicts: vertical-pod-autoscaler/deploy/vpa-rbac.yaml --- vertical-pod-autoscaler/deploy/vpa-rbac.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/vertical-pod-autoscaler/deploy/vpa-rbac.yaml b/vertical-pod-autoscaler/deploy/vpa-rbac.yaml index ff1fe8690a83..6aa52d167b2d 100644 --- a/vertical-pod-autoscaler/deploy/vpa-rbac.yaml +++ b/vertical-pod-autoscaler/deploy/vpa-rbac.yaml @@ -22,6 +22,7 @@ rules: resources: - pods - nodes + - limitranges verbs: - get - list @@ -231,6 +232,7 @@ rules: - pods - configmaps - nodes + - limitranges verbs: - get - list From 90530982359115a3837063fe221e9593fc0d8112 Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Thu, 30 May 2019 13:24:56 +0200 Subject: [PATCH 15/25] Fix number comparison for limits --- .../e2e/v1beta1/admission_controller.go | 12 ++++++------ .../e2e/v1beta2/admission_controller.go | 12 ++++++------ 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index d40687bb7fca..cef71e89445e 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -145,8 +145,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while // recommendation is 250m - // Max memory limit is 1T and ratio is 2., so max request is 0.5T - InstallLimitRangeWithMax(f, "300m", "1T") + // Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi + InstallLimitRangeWithMax(f, "300m", "1Gi") ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -158,8 +158,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("300m"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("1T"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", 300)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } @@ -200,8 +200,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("75m"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("250Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index 98d2306c4a78..4ac3c25a9b87 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -136,8 +136,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while // recommendation is 250m - // Max memory limit is 1T and ratio is 2., so max request is 0.5T - InstallLimitRangeWithMax(f, "300m", "1T") + // Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi + InstallLimitRangeWithMax(f, "300m", "1Gi") ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -149,8 +149,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("300m"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically("<=", ParseQuantityOrDie("1T"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", 300)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } @@ -189,8 +189,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("75m"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.BeNumerically(">=", ParseQuantityOrDie("250Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) } From cde0bf00edbfb02454827f7e61397ba7d88b93cb Mon Sep 17 00:00:00 2001 From: Beata Skiba Date: Thu, 30 May 2019 13:19:18 +0200 Subject: [PATCH 16/25] Account for min Limit from LimitRange. Refactoring to make the code consistent. Removed applying to upper and lower bound - I was mistaken that it was needed >.<. Note that since the recommendation is already capped to max/min limit, there is no additional need for capping in the GetContainerResources method. --- .../logic/recommendation_provider.go | 39 ++--- .../logic/recommendation_provider_test.go | 34 +--- .../pkg/utils/vpa/capping.go | 160 ++++++++++-------- .../pkg/utils/vpa/capping_test.go | 21 +-- .../utils/vpa/limit_and_request_scaling.go | 87 ++++++---- .../vpa/limit_and_request_scaling_test.go | 11 +- 6 files changed, 181 insertions(+), 171 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go index 77b43d7b6a16..52487dd6c6c4 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go @@ -18,21 +18,20 @@ package logic import ( "fmt" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta2" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" "k8s.io/klog" ) // RecommendationProvider gets current recommendation, annotations and vpaName for the given pod. type RecommendationProvider interface { - GetContainersResourcesForPod(pod *v1.Pod) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) + GetContainersResourcesForPod(pod *core.Pod) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) } type recommendationProvider struct { @@ -54,36 +53,32 @@ func NewRecommendationProvider(calculator limitrange.LimitRangeCalculator, recom } // GetContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec. -func GetContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources, limitRange *v1.LimitRangeItem, +func GetContainersResources(pod *core.Pod, podRecommendation vpa_types.RecommendedPodResources, limitRange *core.LimitRangeItem, annotations vpa_api_util.ContainerToAnnotationsMap) []vpa_api_util.ContainerResources { resources := make([]vpa_api_util.ContainerResources, len(pod.Spec.Containers)) - var defaultCpu, defaultMem, maxCpuLimit, maxMemLimit *resource.Quantity - if limitRange != nil { - defaultCpu = limitRange.Default.Cpu() - defaultMem = limitRange.Default.Memory() - maxCpuLimit = limitRange.Max.Cpu() - maxMemLimit = limitRange.Max.Memory() - } for i, container := range pod.Spec.Containers { recommendation := vpa_api_util.GetRecommendationForContainer(container.Name, &podRecommendation) if recommendation == nil { klog.V(2).Infof("no matching recommendation found for container %s", container.Name) continue } - cpuLimit, annotation := vpa_api_util.GetProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), recommendation.Target.Cpu(), defaultCpu) - if annotation != "" { - annotations[container.Name] = append(annotations[container.Name], fmt.Sprintf("CPU: %s", annotation)) + resources[i].Requests = recommendation.Target + defaultLimit := core.ResourceList{} + if limitRange != nil { + defaultLimit = limitRange.Default } - memLimit, annotation := vpa_api_util.GetProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), recommendation.Target.Memory(), defaultMem) - if annotation != "" { - annotations[container.Name] = append(annotations[container.Name], fmt.Sprintf("memory: %s", annotation)) + proportionalLimits, limitAnnotations := vpa_api_util.GetProportionalLimit(container.Resources.Limits, container.Resources.Requests, recommendation.Target, defaultLimit) + if proportionalLimits != nil { + resources[i].Limits = proportionalLimits + if len(limitAnnotations) > 0 { + annotations[container.Name] = append(annotations[container.Name], limitAnnotations...) + } } - resources[i] = vpa_api_util.ProportionallyCapResourcesToMaxLimit(recommendation.Target, cpuLimit, memLimit, maxCpuLimit, maxMemLimit) } return resources } -func (p *recommendationProvider) getMatchingVPA(pod *v1.Pod) *vpa_types.VerticalPodAutoscaler { +func (p *recommendationProvider) getMatchingVPA(pod *core.Pod) *vpa_types.VerticalPodAutoscaler { configs, err := p.vpaLister.VerticalPodAutoscalers(pod.Namespace).List(labels.Everything()) if err != nil { klog.Errorf("failed to get vpa configs: %v", err) @@ -114,7 +109,7 @@ func (p *recommendationProvider) getMatchingVPA(pod *v1.Pod) *vpa_types.Vertical // GetContainersResourcesForPod returns recommended request for a given pod, annotations and name of controlling VPA. // The returned slice corresponds 1-1 to containers in the Pod. -func (p *recommendationProvider) GetContainersResourcesForPod(pod *v1.Pod) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { +func (p *recommendationProvider) GetContainersResourcesForPod(pod *core.Pod) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { klog.V(2).Infof("updating requirements for pod %s.", pod.Name) vpaConfig := p.getMatchingVPA(pod) if vpaConfig == nil { diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index 218a42fdea87..9b32bccbee48 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -18,21 +18,21 @@ package logic import ( "fmt" - "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "math" "testing" - "github.com/golang/mock/gomock" - "github.com/stretchr/testify/assert" apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" target_mock "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target/mock" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" - "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" + + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/assert" ) func parseLabelSelector(selector string) labels.Selector { @@ -262,8 +262,8 @@ func TestUpdateResourceRequests(t *testing.T) { labelSelector: "app = testingApp", annotations: vpa_api_util.ContainerToAnnotationsMap{ containerName: []string{ - "CPU: failed to keep limit to request proportion of 10 to 1 with recommended request of 1Ei; doesn't fit in int64. Capping limit to MaxInt64 milliunits", - "memory: failed to keep limit to request proportion of 1000Mi to 100Mi with recommended request of 1Ei; doesn't fit in int64. Capping limit to MaxInt64 milliunits", + "cpu: failed to keep limit to request ratio; capping limit to int64", + "memory: failed to keep limit to request ratio; capping limit to int64", }, }, }, @@ -293,24 +293,6 @@ func TestUpdateResourceRequests(t *testing.T) { }, }, }, - { - name: "cap limits to max", - pod: limitsMatchRequestsPod, - vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, - expectedAction: true, - expectedCPU: resource.MustParse("1.5"), - expectedMem: resource.MustParse("150Mi"), - expectedCPULimit: mustParseResourcePointer("1.5"), - expectedMemLimit: mustParseResourcePointer("150Mi"), - labelSelector: "app = testingApp", - limitRange: &apiv1.LimitRangeItem{ - Type: apiv1.LimitTypeContainer, - Max: apiv1.ResourceList{ - apiv1.ResourceCPU: resource.MustParse("1.5"), - apiv1.ResourceMemory: resource.MustParse("150Mi"), - }, - }, - }, } for _, tc := range testCases { @@ -330,7 +312,7 @@ func TestUpdateResourceRequests(t *testing.T) { recommendationProvider := &recommendationProvider{ vpaLister: vpaLister, - recommendationProcessor: api.NewCappingRecommendationProcessor(limitrange.NewNoopLimitsCalculator()), + recommendationProcessor: vpa_api_util.NewCappingRecommendationProcessor(limitrange.NewNoopLimitsCalculator()), selectorFetcher: mockSelectorFetcher, limitsRangeCalculator: &fakeLimitRangeCalculator{ tc.limitRange, @@ -366,7 +348,7 @@ func TestUpdateResourceRequests(t *testing.T) { if tc.expectedMemLimit == nil { assert.False(t, memLimitPresent, "expected no memory limit, got %s", memLimit.String()) } else { - if assert.True(t, memLimitPresent, "expected cpu limit, but it's missing") { + if assert.True(t, memLimitPresent, "expected memory limit, but it's missing") { assert.Equal(t, tc.expectedMemLimit.MilliValue(), memLimit.MilliValue(), "memory limit doesn't match") } } diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index a8b8bfed3953..b10fbacc19a4 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -18,6 +18,7 @@ package api import ( "fmt" + apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" @@ -34,8 +35,10 @@ func NewCappingRecommendationProcessor(limitsRangeCalculator limitrange.LimitRan type cappingAction string var ( - cappedToMinAllowed cappingAction = "capped to minAllowed" - cappedToMaxAllowed cappingAction = "capped to maxAllowed" + cappedToMinAllowed cappingAction = "capped to minAllowed" + cappedToMaxAllowed cappingAction = "capped to maxAllowed" + cappedProportionallyToMaxLimit cappingAction = "capped to fit Max in container LimitRange" + cappedProportionallyToMinLimit cappingAction = "capped to fit Min in container LimitRange" ) func toCappingAnnotation(resourceName apiv1.ResourceName, action cappingAction) string { @@ -64,19 +67,20 @@ func (c *cappingRecommendationProcessor) Apply( } updatedRecommendations := []vpa_types.RecommendedContainerResources{} containerToAnnotationsMap := ContainerToAnnotationsMap{} - limitAdjustedRecommendation, err := c.capProportionallyToMaxLimit(podRecommendation, pod, containerToAnnotationsMap) - if err != nil { - return nil, nil, err - } - for _, containerRecommendation := range limitAdjustedRecommendation { + for _, containerRecommendation := range podRecommendation.ContainerRecommendations { container := getContainer(containerRecommendation.ContainerName, pod) if container == nil { klog.V(2).Infof("no matching Container found for recommendation %s", containerRecommendation.ContainerName) continue } + + containerLimitRange, err := c.limitsRangeCalculator.GetContainerLimitRangeItem(pod.Namespace) + if err != nil { + klog.Warningf("failed to fetch LimitRange for %v namespace", pod.Namespace) + } updatedContainerResources, containerAnnotations, err := getCappedRecommendationForContainer( - *container, &containerRecommendation, policy) + *container, &containerRecommendation, policy, containerLimitRange) if len(containerAnnotations) != 0 { containerToAnnotationsMap[containerRecommendation.ContainerName] = containerAnnotations @@ -90,58 +94,11 @@ func (c *cappingRecommendationProcessor) Apply( return &vpa_types.RecommendedPodResources{ContainerRecommendations: updatedRecommendations}, containerToAnnotationsMap, nil } -func capSingleRecommendationProportionallyToMaxLimit(recommendation apiv1.ResourceList, container apiv1.Container, podLimitRange *apiv1.LimitRangeItem, containerToAnnotationsMap ContainerToAnnotationsMap) apiv1.ResourceList { - defaultCpu := podLimitRange.Default.Cpu() - defaultMem := podLimitRange.Default.Memory() - maxCpuLimit := podLimitRange.Max.Cpu() - maxMemLimit := podLimitRange.Max.Memory() - cpuLimit, _ := GetProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), recommendation.Cpu(), defaultCpu) - memLimit, _ := GetProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), recommendation.Memory(), defaultMem) - capped := ProportionallyCapResourcesToMaxLimit(recommendation, cpuLimit, memLimit, maxCpuLimit, maxMemLimit) - return apiv1.ResourceList{ - apiv1.ResourceCPU: *capped.Requests.Cpu(), - apiv1.ResourceMemory: *capped.Requests.Memory(), - } -} - -func (c *cappingRecommendationProcessor) capProportionallyToMaxLimit(podRecommendation *vpa_types.RecommendedPodResources, pod *apiv1.Pod, containerToAnnotationsMap ContainerToAnnotationsMap) ([]vpa_types.RecommendedContainerResources, error) { - podLimitRange, err := c.limitsRangeCalculator.GetContainerLimitRangeItem(pod.Namespace) - if err != nil { - return nil, fmt.Errorf("error obtaining limit range: %s", err) - } - if podLimitRange == nil { - return podRecommendation.ContainerRecommendations, nil - } - - updatedRecommendations := []vpa_types.RecommendedContainerResources{} - - for _, container := range pod.Spec.Containers { - recommendation := GetRecommendationForContainer(container.Name, podRecommendation) - if recommendation == nil { - klog.V(2).Infof("no matching recommendation found for container %s", container.Name) - continue - } - - scaledRecommendation := recommendation.DeepCopy() - scaledRecommendation.LowerBound = capSingleRecommendationProportionallyToMaxLimit(recommendation.LowerBound, container, podLimitRange, containerToAnnotationsMap) - scaledRecommendation.Target = capSingleRecommendationProportionallyToMaxLimit(recommendation.Target, container, podLimitRange, containerToAnnotationsMap) - scaledRecommendation.UpperBound = capSingleRecommendationProportionallyToMaxLimit(recommendation.UpperBound, container, podLimitRange, containerToAnnotationsMap) - if scaledRecommendation.Target.Cpu().MilliValue() != recommendation.Target.Cpu().MilliValue() { - containerToAnnotationsMap[container.Name] = append(containerToAnnotationsMap[container.Name], "changed CPU limit to fit within limit range") - } - if scaledRecommendation.Target.Memory().Value() != recommendation.Target.Memory().Value() { - containerToAnnotationsMap[container.Name] = append(containerToAnnotationsMap[container.Name], "changed memory limit to fit within limit range") - } - updatedRecommendations = append(updatedRecommendations, *scaledRecommendation) - } - return updatedRecommendations, nil -} - // getCappedRecommendationForContainer returns a recommendation for the given container, adjusted to obey policy and limits. func getCappedRecommendationForContainer( container apiv1.Container, containerRecommendation *vpa_types.RecommendedContainerResources, - policy *vpa_types.PodResourcePolicy) (*vpa_types.RecommendedContainerResources, []string, error) { + policy *vpa_types.PodResourcePolicy, limitRange *apiv1.LimitRangeItem) (*vpa_types.RecommendedContainerResources, []string, error) { if containerRecommendation == nil { return nil, nil, fmt.Errorf("no recommendation available for container name %v", container.Name) } @@ -153,8 +110,11 @@ func getCappedRecommendationForContainer( cappingAnnotations := make([]string, 0) process := func(recommendation apiv1.ResourceList, genAnnotations bool) { + // TODO: Add anotation if limitRange is conflicting with VPA policy. + limitAnnotations := applyContainerLimitRange(recommendation, container, limitRange) annotations := applyVPAPolicy(recommendation, containerPolicy) if genAnnotations { + cappingAnnotations = append(cappingAnnotations, limitAnnotations...) cappingAnnotations = append(cappingAnnotations, annotations...) } } @@ -173,12 +133,12 @@ func applyVPAPolicy(recommendation apiv1.ResourceList, policy *vpa_types.Contain } annotations := make([]string, 0) for resourceName, recommended := range recommendation { - cappedToMin, isCapped := maybeCapToMin(recommended, resourceName, policy) + cappedToMin, isCapped := maybeCapToPolicyMin(recommended, resourceName, policy) recommendation[resourceName] = cappedToMin if isCapped { annotations = append(annotations, toCappingAnnotation(resourceName, cappedToMinAllowed)) } - cappedToMax, isCapped := maybeCapToMax(cappedToMin, resourceName, policy) + cappedToMax, isCapped := maybeCapToPolicyMax(cappedToMin, resourceName, policy) recommendation[resourceName] = cappedToMax if isCapped { annotations = append(annotations, toCappingAnnotation(resourceName, cappedToMaxAllowed)) @@ -202,9 +162,9 @@ func applyVPAPolicyForContainer(containerName string, process := func(recommendation apiv1.ResourceList) { for resourceName, recommended := range recommendation { - cappedToMin, _ := maybeCapToMin(recommended, resourceName, containerPolicy) + cappedToMin, _ := maybeCapToPolicyMin(recommended, resourceName, containerPolicy) recommendation[resourceName] = cappedToMin - cappedToMax, _ := maybeCapToMax(cappedToMin, resourceName, containerPolicy) + cappedToMax, _ := maybeCapToPolicyMax(cappedToMin, resourceName, containerPolicy) recommendation[resourceName] = cappedToMax } } @@ -216,20 +176,30 @@ func applyVPAPolicyForContainer(containerName string, return cappedRecommendations, nil } -func maybeCapToMin(recommended resource.Quantity, resourceName apiv1.ResourceName, +func maybeCapToPolicyMin(recommended resource.Quantity, resourceName apiv1.ResourceName, containerPolicy *vpa_types.ContainerResourcePolicy) (resource.Quantity, bool) { - min, found := containerPolicy.MinAllowed[resourceName] - if found && !min.IsZero() && recommended.Cmp(min) < 0 { - return min, true + return maybeCapToMin(recommended, resourceName, containerPolicy.MinAllowed) +} + +func maybeCapToPolicyMax(recommended resource.Quantity, resourceName apiv1.ResourceName, + containerPolicy *vpa_types.ContainerResourcePolicy) (resource.Quantity, bool) { + return maybeCapToMax(recommended, resourceName, containerPolicy.MaxAllowed) +} + +func maybeCapToMax(recommended resource.Quantity, resourceName apiv1.ResourceName, + max apiv1.ResourceList) (resource.Quantity, bool) { + maxResource, found := max[resourceName] + if found && !maxResource.IsZero() && recommended.Cmp(maxResource) > 0 { + return maxResource, true } return recommended, false } -func maybeCapToMax(recommended resource.Quantity, resourceName apiv1.ResourceName, - containerPolicy *vpa_types.ContainerResourcePolicy) (resource.Quantity, bool) { - max, found := containerPolicy.MaxAllowed[resourceName] - if found && !max.IsZero() && recommended.Cmp(max) > 0 { - return max, true +func maybeCapToMin(recommended resource.Quantity, resourceName apiv1.ResourceName, + min apiv1.ResourceList) (resource.Quantity, bool) { + minResource, found := min[resourceName] + if found && !minResource.IsZero() && recommended.Cmp(minResource) < 0 { + return minResource, true } return recommended, false } @@ -278,3 +248,55 @@ func getContainer(containerName string, pod *apiv1.Pod) *apiv1.Container { } return nil } + +// applyContainerLimitRange updates recommendation if recommended resources are outside of limits defined in VPA resources policy +func applyContainerLimitRange(recommendation apiv1.ResourceList, container apiv1.Container, limitRange *apiv1.LimitRangeItem) []string { + annotations := make([]string, 0) + if limitRange == nil { + return annotations + } + maxAllowedRecommendation := getMaxAllowedRecommendation(recommendation, container, limitRange) + minAllowedRecommendation := getMinAllowedRecommendation(recommendation, container, limitRange) + for resourceName, recommended := range recommendation { + cappedToMin, isCapped := maybeCapToMin(recommended, resourceName, minAllowedRecommendation) + recommendation[resourceName] = cappedToMin + if isCapped { + annotations = append(annotations, toCappingAnnotation(resourceName, cappedProportionallyToMinLimit)) + } + cappedToMax, isCapped := maybeCapToMax(cappedToMin, resourceName, maxAllowedRecommendation) + recommendation[resourceName] = cappedToMax + if isCapped { + annotations = append(annotations, toCappingAnnotation(resourceName, cappedProportionallyToMaxLimit)) + } + } + return annotations +} + +func getMaxAllowedRecommendation(recommendation apiv1.ResourceList, container apiv1.Container, + podLimitRange *apiv1.LimitRangeItem) apiv1.ResourceList { + if podLimitRange == nil { + return apiv1.ResourceList{} + } + return getBoundaryRecommendation(recommendation, container, podLimitRange.Max, podLimitRange.Default) +} + +func getMinAllowedRecommendation(recommendation apiv1.ResourceList, container apiv1.Container, + podLimitRange *apiv1.LimitRangeItem) apiv1.ResourceList { + if podLimitRange == nil { + return apiv1.ResourceList{} + } + return getBoundaryRecommendation(recommendation, container, podLimitRange.Min, podLimitRange.Default) +} + +func getBoundaryRecommendation(recommendation apiv1.ResourceList, container apiv1.Container, + boundaryLimit, defaultLimit apiv1.ResourceList) apiv1.ResourceList { + if boundaryLimit == nil { + return apiv1.ResourceList{} + } + cpuMaxRequest := GetBoundaryRequest(container.Resources.Requests.Cpu(), container.Resources.Limits.Cpu(), boundaryLimit.Cpu(), defaultLimit.Cpu()) + memMaxRequest := GetBoundaryRequest(container.Resources.Requests.Memory(), container.Resources.Limits.Memory(), boundaryLimit.Memory(), defaultLimit.Memory()) + return apiv1.ResourceList{ + apiv1.ResourceCPU: *cpuMaxRequest, + apiv1.ResourceMemory: *memMaxRequest, + } +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index 6fa52d1f6257..efbf4b06a44c 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -228,8 +228,10 @@ func TestApplyCapsToLimitRange(t *testing.T) { limitRange := apiv1.LimitRangeItem{ Type: apiv1.LimitTypeContainer, Max: apiv1.ResourceList{ - apiv1.ResourceCPU: resource.MustParse("1"), - apiv1.ResourceMemory: resource.MustParse("1G"), + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Min: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("500M"), }, } recommendation := vpa_types.RecommendedPodResources{ @@ -238,7 +240,7 @@ func TestApplyCapsToLimitRange(t *testing.T) { ContainerName: "container", Target: apiv1.ResourceList{ apiv1.ResourceCPU: resource.MustParse("2"), - apiv1.ResourceMemory: resource.MustParse("10G"), + apiv1.ResourceMemory: resource.MustParse("200M"), }, }, }, @@ -266,17 +268,9 @@ func TestApplyCapsToLimitRange(t *testing.T) { ContainerRecommendations: []vpa_types.RecommendedContainerResources{ { ContainerName: "container", - LowerBound: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(0, resource.BinarySI), - }, Target: apiv1.ResourceList{ apiv1.ResourceCPU: resource.MustParse("1000m"), - apiv1.ResourceMemory: resource.MustParse("1000000000000m"), - }, - UpperBound: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewQuantity(0, resource.DecimalSI), - apiv1.ResourceMemory: *resource.NewQuantity(0, resource.BinarySI), + apiv1.ResourceMemory: resource.MustParse("500000000000m"), }, }, }, @@ -286,6 +280,7 @@ func TestApplyCapsToLimitRange(t *testing.T) { processor := NewCappingRecommendationProcessor(&calculator) processedRecommendation, annotations, err := processor.Apply(&recommendation, nil, nil, &pod) assert.NoError(t, err) - assert.Equal(t, map[string][]string{"container": {"changed CPU limit to fit within limit range", "changed memory limit to fit within limit range"}}, annotations) + assert.Contains(t, annotations, "container") + assert.ElementsMatch(t, []string{"cpu capped to fit Max in container LimitRange", "memory capped to fit Min in container LimitRange"}, annotations["container"]) assert.Equal(t, expectedRecommendation, *processedRecommendation) } diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go index 2d4c5396aaf6..bbd9cf2c9851 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling.go @@ -18,27 +18,51 @@ package api import ( "fmt" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" "math" "math/big" + + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" ) // ContainerResources holds resources request for container type ContainerResources struct { - Limits v1.ResourceList - Requests v1.ResourceList + Limits core.ResourceList + Requests core.ResourceList } func newContainerResources() ContainerResources { return ContainerResources{ - Requests: v1.ResourceList{}, - Limits: v1.ResourceList{}, + Requests: core.ResourceList{}, + Limits: core.ResourceList{}, } } // GetProportionalLimit returns limit that will be in the same proportion to recommended request as original limit had to original request. -func GetProportionalLimit(originalLimit, originalRequest, recommendedRequest, defaultLimit *resource.Quantity) (*resource.Quantity, string) { +func GetProportionalLimit(originalLimit, originalRequest, recommendation, defaultLimit core.ResourceList) (core.ResourceList, []string) { + annotations := []string{} + cpuLimit, annotation := getProportionalResourceLimit(core.ResourceCPU, originalLimit.Cpu(), originalRequest.Cpu(), recommendation.Cpu(), defaultLimit.Cpu()) + if annotation != "" { + annotations = append(annotations, annotation) + } + memLimit, annotation := getProportionalResourceLimit(core.ResourceMemory, originalLimit.Memory(), originalRequest.Memory(), recommendation.Memory(), defaultLimit.Memory()) + if annotation != "" { + annotations = append(annotations, annotation) + } + if memLimit == nil && cpuLimit == nil { + return nil, []string{} + } + result := core.ResourceList{} + if cpuLimit != nil { + result[core.ResourceCPU] = *cpuLimit + } + if memLimit != nil { + result[core.ResourceMemory] = *memLimit + } + return result, annotations +} + +func getProportionalResourceLimit(resourceName core.ResourceName, originalLimit, originalRequest, recommendedRequest, defaultLimit *resource.Quantity) (*resource.Quantity, string) { if originalLimit == nil || originalLimit.Value() == 0 && defaultLimit != nil { originalLimit = defaultLimit } @@ -62,8 +86,25 @@ func GetProportionalLimit(originalLimit, originalRequest, recommendedRequest, de return result, "" } return result, fmt.Sprintf( - "failed to keep limit to request proportion of %s to %s with recommended request of %s; doesn't fit in int64. Capping limit to MaxInt64 milliunits", - originalLimit, originalRequest, recommendedRequest) + "%v: failed to keep limit to request ratio; capping limit to int64", resourceName) +} + +// GetBoundaryRequest returns the boundary (min/max) request that can be specified with +// preserving the original limit to request ratio. Returns nil if no boundary exists +func GetBoundaryRequest(originalRequest, originalLimit, boundaryLimit, defaultLimit *resource.Quantity) *resource.Quantity { + if originalLimit == nil || originalLimit.Value() == 0 && defaultLimit != nil { + originalLimit = defaultLimit + } + // originalLimit not set, no boundary + if originalLimit == nil || originalLimit.Value() == 0 { + return nil + } + // originalLimit set but originalRequest not set - K8s will treat the pod as if they were equal + if originalRequest == nil || originalRequest.Value() == 0 { + return boundaryLimit + } + result, _ := scaleQuantityProportionally(originalRequest /* scaledQuantity */, originalLimit /*scaleBase*/, boundaryLimit /*scaleResult*/) + return result } // scaleQuantityProportionally returns value which has the same proportion to scaledQuantity as scaleResult has to scaleBase @@ -80,31 +121,3 @@ func scaleQuantityProportionally(scaledQuantity, scaleBase, scaleResult *resourc } return resource.NewMilliQuantity(math.MaxInt64, scaledQuantity.Format), true } - -func proportionallyCapLimitToMax(recommendedRequest, recommendedLimit, maxLimit *resource.Quantity) (request, limit *resource.Quantity) { - if recommendedLimit == nil || maxLimit == nil || maxLimit.IsZero() { - return recommendedRequest, recommendedLimit - } - if recommendedLimit.Cmp(*maxLimit) <= 0 { - return recommendedRequest, recommendedLimit - } - scaledRequest, _ := scaleQuantityProportionally(recommendedRequest, recommendedLimit, maxLimit) - return scaledRequest, maxLimit -} - -// ProportionallyCapResourcesToMaxLimit caps CPU and memory limit to maximum and scales requests to maintain limit/request ratio. -func ProportionallyCapResourcesToMaxLimit(recommendedRequests v1.ResourceList, cpuLimit, memLimit, maxCpuLimit, maxMemLimit *resource.Quantity) ContainerResources { - scaledCpuRequest, scaledCpuLimit := proportionallyCapLimitToMax(recommendedRequests.Cpu(), cpuLimit, maxCpuLimit) - scaledMemRequest, scaledMemLimit := proportionallyCapLimitToMax(recommendedRequests.Memory(), memLimit, maxMemLimit) - result := newContainerResources() - - result.Requests[v1.ResourceCPU] = *scaledCpuRequest - result.Requests[v1.ResourceMemory] = *scaledMemRequest - if scaledCpuLimit != nil { - result.Limits[v1.ResourceCPU] = *scaledCpuLimit - } - if scaledMemLimit != nil { - result.Limits[v1.ResourceMemory] = *scaledMemLimit - } - return result -} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling_test.go index 151706f29cb4..c334279c6a4a 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/limit_and_request_scaling_test.go @@ -17,10 +17,13 @@ limitations under the License. package api import ( - "github.com/stretchr/testify/assert" - "k8s.io/apimachinery/pkg/api/resource" "math" "testing" + + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/stretchr/testify/assert" ) func mustParseToPointer(str string) *resource.Quantity { @@ -28,7 +31,7 @@ func mustParseToPointer(str string) *resource.Quantity { return &val } -func TestGetProportionalLimit(t *testing.T) { +func TestGetProportionalResourceLimit(t *testing.T) { tests := []struct { name string originalLimit *resource.Quantity @@ -82,7 +85,7 @@ func TestGetProportionalLimit(t *testing.T) { } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - gotLimit, gotAnnotation := GetProportionalLimit(tc.originalLimit, tc.originalRequest, tc.recommendedRequest, tc.defaultLimit) + gotLimit, gotAnnotation := getProportionalResourceLimit(core.ResourceCPU, tc.originalLimit, tc.originalRequest, tc.recommendedRequest, tc.defaultLimit) if tc.expectLimit == nil { assert.Nil(t, gotLimit) } else { From c7653b371708a94e35235d355c54458acc899547 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 30 May 2019 13:33:50 +0200 Subject: [PATCH 17/25] Add method for getting pod limit range --- .../logic/recommendation_provider_test.go | 16 +++++++++++----- .../utils/limitrange/limit_range_calculator.go | 18 ++++++++++++++++-- .../pkg/utils/vpa/capping_test.go | 11 ++++++++--- 3 files changed, 35 insertions(+), 10 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index 9b32bccbee48..f422fe9fe598 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -47,12 +47,18 @@ func mustParseResourcePointer(val string) *resource.Quantity { } type fakeLimitRangeCalculator struct { - limitRange *apiv1.LimitRangeItem - err error + containerLimitRange *apiv1.LimitRangeItem + containerErr error + podLimitRange *apiv1.LimitRangeItem + podErr error } func (nlrc *fakeLimitRangeCalculator) GetContainerLimitRangeItem(namespace string) (*apiv1.LimitRangeItem, error) { - return nlrc.limitRange, nlrc.err + return nlrc.containerLimitRange, nlrc.containerErr +} + +func (nlrc *fakeLimitRangeCalculator) GetPodLimitRangeItem(namespace string) (*apiv1.LimitRangeItem, error) { + return nlrc.podLimitRange, nlrc.podErr } func TestUpdateResourceRequests(t *testing.T) { @@ -315,8 +321,8 @@ func TestUpdateResourceRequests(t *testing.T) { recommendationProcessor: vpa_api_util.NewCappingRecommendationProcessor(limitrange.NewNoopLimitsCalculator()), selectorFetcher: mockSelectorFetcher, limitsRangeCalculator: &fakeLimitRangeCalculator{ - tc.limitRange, - tc.limitRangeCalcErr, + containerLimitRange: tc.limitRange, + containerErr: tc.limitRangeCalcErr, }, } diff --git a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go index 7b666c83e7ff..77dc394979cc 100644 --- a/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go +++ b/vertical-pod-autoscaler/pkg/utils/limitrange/limit_range_calculator.go @@ -30,6 +30,8 @@ import ( type LimitRangeCalculator interface { // GetContainerLimitRangeItem returns LimitRangeItem that describes limitation on container limits in the given namespace. GetContainerLimitRangeItem(namespace string) (*core.LimitRangeItem, error) + // GetPodLimitRangeItem returns LimitRangeItem that describes limitation on pod limits in the given namespace. + GetPodLimitRangeItem(namespace string) (*core.LimitRangeItem, error) } type noopLimitsRangeCalculator struct{} @@ -38,6 +40,10 @@ func (lc *noopLimitsRangeCalculator) GetContainerLimitRangeItem(namespace string return nil, nil } +func (lc *noopLimitsRangeCalculator) GetPodLimitRangeItem(namespace string) (*core.LimitRangeItem, error) { + return nil, nil +} + type limitsChecker struct { limitRangeLister listers.LimitRangeLister } @@ -66,6 +72,14 @@ func NewNoopLimitsCalculator() *noopLimitsRangeCalculator { } func (lc *limitsChecker) GetContainerLimitRangeItem(namespace string) (*core.LimitRangeItem, error) { + return lc.getLimitRangeItem(namespace, core.LimitTypeContainer) +} + +func (lc *limitsChecker) GetPodLimitRangeItem(namespace string) (*core.LimitRangeItem, error) { + return lc.getLimitRangeItem(namespace, core.LimitTypePod) +} + +func (lc *limitsChecker) getLimitRangeItem(namespace string, limitType core.LimitType) (*core.LimitRangeItem, error) { limitRanges, err := lc.limitRangeLister.LimitRanges(namespace).List(labels.Everything()) if err != nil { return nil, fmt.Errorf("error loading limit ranges: %s", err) @@ -102,10 +116,10 @@ func (lc *limitsChecker) GetContainerLimitRangeItem(namespace string) (*core.Lim return q2 } - result := &core.LimitRangeItem{Type: core.LimitTypeContainer} + result := &core.LimitRangeItem{Type: limitType} for _, lr := range limitRanges { for _, lri := range lr.Spec.Limits { - if lri.Type == core.LimitTypeContainer && (lri.Max != nil || lri.Default != nil || lri.Min != nil) { + if lri.Type == limitType && (lri.Max != nil || lri.Default != nil || lri.Min != nil) { if lri.Default != nil { result.Default = lri.Default } diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index efbf4b06a44c..decb40e47ed8 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -217,11 +217,16 @@ func TestApplyVpa(t *testing.T) { } type fakeLimitRangeCalculator struct { - limitRange apiv1.LimitRangeItem + containerLimitRange apiv1.LimitRangeItem + podLimitRange apiv1.LimitRangeItem } func (nlrc *fakeLimitRangeCalculator) GetContainerLimitRangeItem(namespace string) (*apiv1.LimitRangeItem, error) { - return &nlrc.limitRange, nil + return &nlrc.containerLimitRange, nil +} + +func (nlrc *fakeLimitRangeCalculator) GetPodLimitRangeItem(namespace string) (*apiv1.LimitRangeItem, error) { + return &nlrc.podLimitRange, nil } func TestApplyCapsToLimitRange(t *testing.T) { @@ -276,7 +281,7 @@ func TestApplyCapsToLimitRange(t *testing.T) { }, } - calculator := fakeLimitRangeCalculator{limitRange} + calculator := fakeLimitRangeCalculator{containerLimitRange: limitRange} processor := NewCappingRecommendationProcessor(&calculator) processedRecommendation, annotations, err := processor.Apply(&recommendation, nil, nil, &pod) assert.NoError(t, err) From 53fc2cbdbeddaf9227e1a74b15365d7a56efebeb Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 30 May 2019 16:04:18 +0200 Subject: [PATCH 18/25] Capping to pod limit range --- .../pkg/utils/vpa/capping.go | 57 ++++- .../pkg/utils/vpa/capping_test.go | 207 ++++++++++++++++++ 2 files changed, 263 insertions(+), 1 deletion(-) diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index b10fbacc19a4..38b33056a19a 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -67,7 +67,11 @@ func (c *cappingRecommendationProcessor) Apply( } updatedRecommendations := []vpa_types.RecommendedContainerResources{} containerToAnnotationsMap := ContainerToAnnotationsMap{} - for _, containerRecommendation := range podRecommendation.ContainerRecommendations { + limitAdjustedRecommendation, err := c.capProportionallyToPodLimitRange(podRecommendation.ContainerRecommendations, pod) + if err != nil { + return nil, nil, err + } + for _, containerRecommendation := range limitAdjustedRecommendation { container := getContainer(containerRecommendation.ContainerName, pod) if container == nil { @@ -300,3 +304,54 @@ func getBoundaryRecommendation(recommendation apiv1.ResourceList, container apiv apiv1.ResourceMemory: *memMaxRequest, } } + +func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, + pod *apiv1.Pod, limitRange apiv1.LimitRangeItem, resourceName apiv1.ResourceName) []vpa_types.RecommendedContainerResources { + minLimit := limitRange.Min[resourceName] + maxLimit := limitRange.Max[resourceName] + defaultLimit := limitRange.Default[resourceName] + + var sumLimit resource.Quantity + for i, container := range pod.Spec.Containers { + if i >= len(resources) { + continue + } + limit := container.Resources.Limits[resourceName] + request := container.Resources.Requests[resourceName] + recommendation := resources[i].Target[resourceName] + containerLimit, _ := getProportionalResourceLimit(resourceName, &limit, &request, &recommendation, &defaultLimit) + if containerLimit != nil { + sumLimit.Add(*containerLimit) + } + } + if minLimit.Cmp(sumLimit) <= 0 && (maxLimit.IsZero() || maxLimit.Cmp(sumLimit) >= 0) { + return resources + } + var targetTotalLimit resource.Quantity + if minLimit.Cmp(sumLimit) > 0 { + targetTotalLimit = minLimit + } + if !maxLimit.IsZero() && maxLimit.Cmp(sumLimit) < 0 { + targetTotalLimit = maxLimit + } + for i := range pod.Spec.Containers { + limit := resources[i].Target[resourceName] + cappedContainerRequest, _ := scaleQuantityProportionally(&limit, &sumLimit, &targetTotalLimit) + resources[i].Target[resourceName] = *cappedContainerRequest + } + return resources +} + +func (c *cappingRecommendationProcessor) capProportionallyToPodLimitRange( + containerRecommendations []vpa_types.RecommendedContainerResources, pod *apiv1.Pod) ([]vpa_types.RecommendedContainerResources, error) { + podLimitRange, err := c.limitsRangeCalculator.GetPodLimitRangeItem(pod.Namespace) + if err != nil { + return nil, fmt.Errorf("error obtaining limit range: %s", err) + } + if podLimitRange == nil { + return containerRecommendations, nil + } + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory) + return containerRecommendations, nil +} diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index decb40e47ed8..69f7c25d9c1f 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -289,3 +289,210 @@ func TestApplyCapsToLimitRange(t *testing.T) { assert.ElementsMatch(t, []string{"cpu capped to fit Max in container LimitRange", "memory capped to fit Min in container LimitRange"}, annotations["container"]) assert.Equal(t, expectedRecommendation, *processedRecommendation) } + +func TestApplyPodLimitRange(t *testing.T) { + tests := []struct { + name string + resources []vpa_types.RecommendedContainerResources + pod apiv1.Pod + limitRange apiv1.LimitRangeItem + resourceName apiv1.ResourceName + expect []vpa_types.RecommendedContainerResources + }{ + { + name: "cap target cpu to max", + resources: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + pod: apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + }, + limitRange: apiv1.LimitRangeItem{ + Max: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + resourceName: apiv1.ResourceCPU, + expect: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + }, + }, + { + name: "cap cpu to max", + resources: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + pod: apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + }, + }, + }, + limitRange: apiv1.LimitRangeItem{ + Max: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + }, + }, + resourceName: apiv1.ResourceCPU, + expect: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), + }, + }, + }, + }, + { + name: "cap mem to min", + resources: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + }, + pod: apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + }, + }, + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + }, + }, + }, + }, + }, + limitRange: apiv1.LimitRangeItem{ + Min: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("4G"), + }, + }, + resourceName: apiv1.ResourceMemory, + expect: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2000000000000m"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2000000000000m"), + }, + }, + }, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + got := applyPodLimitRange(tc.resources, &tc.pod, tc.limitRange, tc.resourceName) + assert.Equal(t, tc.expect, got) + }) + } +} From fd02fbd99c9986deaaa0f082c479940a4a978667 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Mon, 3 Jun 2019 14:49:50 +0200 Subject: [PATCH 19/25] Minimums of limit range apply also to requests --- .../pkg/admission-controller/main.go | 3 +- .../pkg/utils/vpa/capping.go | 31 ++++- .../pkg/utils/vpa/capping_test.go | 125 ++++++++++++++++++ 3 files changed, 155 insertions(+), 4 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/main.go b/vertical-pod-autoscaler/pkg/admission-controller/main.go index 80e0ad7dd4a0..901ccdbaae8b 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/main.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/main.go @@ -19,7 +19,7 @@ package main import ( "flag" "fmt" - "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" + "net/http" "os" "time" @@ -29,6 +29,7 @@ import ( "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/admission-controller/logic" vpa_clientset "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/clientset/versioned" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/target" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange" "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics" metrics_admission "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/admission" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index 38b33056a19a..cc4cd43d19b8 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -286,10 +286,24 @@ func getMaxAllowedRecommendation(recommendation apiv1.ResourceList, container ap func getMinAllowedRecommendation(recommendation apiv1.ResourceList, container apiv1.Container, podLimitRange *apiv1.LimitRangeItem) apiv1.ResourceList { + // Both limit and request must be higher than min set in the limit range: + // https://github.com/kubernetes/kubernetes/blob/016e9d5c06089774c6286fd825302cbae661a446/plugin/pkg/admission/limitranger/admission.go#L303 if podLimitRange == nil { return apiv1.ResourceList{} } - return getBoundaryRecommendation(recommendation, container, podLimitRange.Min, podLimitRange.Default) + minForLimit := getBoundaryRecommendation(recommendation, container, podLimitRange.Min, podLimitRange.Default) + minForRequest := podLimitRange.Min + if minForRequest == nil { + return minForLimit + } + result := minForLimit + if minForRequest.Cpu() != nil && minForRequest.Cpu().Cmp(*minForLimit.Cpu()) > 0 { + result[apiv1.ResourceCPU] = *minForRequest.Cpu() + } + if minForRequest.Memory() != nil && minForRequest.Memory().Cmp(*minForLimit.Memory()) > 0 { + result[apiv1.ResourceMemory] = *minForRequest.Memory() + } + return result } func getBoundaryRecommendation(recommendation apiv1.ResourceList, container apiv1.Container, @@ -311,7 +325,7 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, maxLimit := limitRange.Max[resourceName] defaultLimit := limitRange.Default[resourceName] - var sumLimit resource.Quantity + var sumLimit, sumRecommendation resource.Quantity for i, container := range pod.Spec.Containers { if i >= len(resources) { continue @@ -323,10 +337,21 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, if containerLimit != nil { sumLimit.Add(*containerLimit) } + sumRecommendation.Add(recommendation) + } + if minLimit.Cmp(sumLimit) <= 0 && minLimit.Cmp(sumRecommendation) <= 0 && (maxLimit.IsZero() || maxLimit.Cmp(sumLimit) >= 0) { + return resources } - if minLimit.Cmp(sumLimit) <= 0 && (maxLimit.IsZero() || maxLimit.Cmp(sumLimit) >= 0) { + + if minLimit.Cmp(sumRecommendation) > 0 { + for i := range pod.Spec.Containers { + limit := resources[i].Target[resourceName] + cappedContainerRequest, _ := scaleQuantityProportionally(&limit, &sumRecommendation, &minLimit) + resources[i].Target[resourceName] = *cappedContainerRequest + } return resources } + var targetTotalLimit resource.Quantity if minLimit.Cmp(sumLimit) > 0 { targetTotalLimit = minLimit diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index 69f7c25d9c1f..35c44e3e8ca3 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -488,6 +488,73 @@ func TestApplyPodLimitRange(t *testing.T) { }, }, }, + { + name: "cap mem request to min", + resources: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1G"), + }, + }, + }, + pod: apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2"), + }, + }, + }, + { + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("1"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2"), + }, + }, + }, + }, + }, + }, + limitRange: apiv1.LimitRangeItem{ + Type: apiv1.LimitTypePod, + Max: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("10G"), + }, + Min: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("4G"), + }, + }, + resourceName: apiv1.ResourceMemory, + expect: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container1", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2000000000000m"), + }, + }, + { + ContainerName: "container2", + Target: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("2000000000000m"), + }, + }, + }, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { @@ -496,3 +563,61 @@ func TestApplyPodLimitRange(t *testing.T) { }) } } + +func TestApplyLimitRangeMinToRequest(t *testing.T) { + limitRange := apiv1.LimitRangeItem{ + Type: apiv1.LimitTypeContainer, + Min: apiv1.ResourceList{ + apiv1.ResourceMemory: resource.MustParse("500M"), + }, + } + recommendation := vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("200M"), + }, + }, + }, + } + pod := apiv1.Pod{ + Spec: apiv1.PodSpec{ + Containers: []apiv1.Container{ + { + Name: "container", + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("50M"), + }, + Limits: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("100M"), + }, + }, + }, + }, + }, + } + expectedRecommendation := vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "container", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: resource.MustParse("1"), + apiv1.ResourceMemory: resource.MustParse("500M"), + }, + }, + }, + } + + calculator := fakeLimitRangeCalculator{containerLimitRange: limitRange} + processor := NewCappingRecommendationProcessor(&calculator) + processedRecommendation, annotations, err := processor.Apply(&recommendation, nil, nil, &pod) + assert.NoError(t, err) + assert.Contains(t, annotations, "container") + assert.ElementsMatch(t, []string{"memory capped to fit Min in container LimitRange"}, annotations["container"]) + assert.Equal(t, expectedRecommendation, *processedRecommendation) +} From 487b5eb13c9398aa06fd9e6bda5fbf45c887da33 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Tue, 4 Jun 2019 11:13:54 +0200 Subject: [PATCH 20/25] Fix e2e testing min from limit range Min from limit range applies to both limit and request, adjust e2e tests to work with that. --- vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go | 5 +++-- vertical-pod-autoscaler/e2e/v1beta2/updater.go | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index 4ac3c25a9b87..d4da5385ae50 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -174,10 +174,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } InstallVPA(f, vpaCRD) - // Min CPU limit is 75m and ratio is 1.5, so min request is 50m + // Min CPU from limit range is 50m and ratio is 1.5. Min applies to both limit and request so min + // request is 50m and min limit is 75 // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while // recommendation is 100Mi. - InstallLimitRangeWithMin(f, "75m", "250Mi") + InstallLimitRangeWithMin(f, "50m", "250Mi") ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/updater.go b/vertical-pod-autoscaler/e2e/v1beta2/updater.go index 89b8492a726b..7f438ac0f28b 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/updater.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/updater.go @@ -149,10 +149,10 @@ var _ = UpdaterE2eDescribe("Updater", func() { ginkgo.By("Setting up a VPA CRD") SetupVPA(f, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef) - // Min CPU limit is 300m and ratio is 3., so min request is 100m, while - // recommendation is 200m + // Min CPU from limit range is 100m and ratio is 3. Min applies both to limit and request so min + // request is 100m request and 300m limit // Min memory limit is 0 and ratio is 2., so min request is 0 - InstallLimitRangeWithMin(f, "300m", "0") + InstallLimitRangeWithMin(f, "100m", "0") ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) CheckNoPodsEvicted(f, MakePodSet(podList)) From 18e3ad2c58f50ed80f1a8cbf6a081a8f4fa1d883 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Tue, 4 Jun 2019 11:42:57 +0200 Subject: [PATCH 21/25] Ee2 test for pod limit range support --- .../e2e/v1beta2/admission_controller.go | 109 +++++++++++++++++- vertical-pod-autoscaler/e2e/v1beta2/common.go | 39 ++++++- .../e2e/v1beta2/updater.go | 50 +++++++- 3 files changed, 184 insertions(+), 14 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index d4da5385ae50..54c3891b9c6c 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -116,7 +116,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("caps request according to max limit set in LimitRange", func() { + ginkgo.It("caps request according to container max limit set in LimitRange", func() { d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) @@ -137,7 +137,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // Max CPU limit is 300m and ratio is 1.5, so max request is 200m, while // recommendation is 250m // Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi - InstallLimitRangeWithMax(f, "300m", "1Gi") + InstallLimitRangeWithMax(f, "300m", "1Gi", apiv1.LimitTypeContainer) ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) @@ -156,7 +156,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("raises request according to min limit set in LimitRange", func() { + ginkgo.It("raises request according to container min limit set in LimitRange", func() { d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) @@ -178,7 +178,108 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // request is 50m and min limit is 75 // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while // recommendation is 100Mi. - InstallLimitRangeWithMin(f, "50m", "250Mi") + InstallLimitRangeWithMin(f, "50m", "250Mi", apiv1.LimitTypeContainer) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 200Mi of memory, but admission controller + // should change it to 250m CPU and 125Mi of memory, since this is the lowest + // request that limitrange allows. + // Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("caps request according to pod max limit set in pod LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }, + { + ContainerName: "hamster2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }, + }, + } + InstallVPA(f, vpaCRD) + + // Max CPU limit is 600m for pod, 300 per container and ratio is 1.5, so max request is 200m, + // while recommendation is 250m + // Max memory limit is 1Gi and ratio is 2., so max request is 0.5Gi + InstallLimitRangeWithMax(f, "600m", "1Gi", apiv1.LimitTypePod) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 200m CPU (as this is the recommendation + // capped according to max limit in LimitRange) and 200Mi of memory, + // which is uncapped. Limit to request ratio should stay unchanged. + for _, pod := range podList.Items { + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("200m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically("<=", 300)) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically("<=", 1024*1024*1024)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) + gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Memory().Value()) / float64(pod.Spec.Containers[0].Resources.Requests.Memory().Value())).To(gomega.BeNumerically("~", 2.)) + } + }) + + ginkgo.It("raises request according to pod min limit set in pod LimitRange", func() { + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }, + { + ContainerName: "hamster2", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled + }, + }, + }, + } + InstallVPA(f, vpaCRD) + + // Min CPU from limit range is 100m, 50m per pod and ratio is 1.5. Min applies to both limit and + // request so min request is 50m and min limit is 75 + // Min memory limit is 500Mi per pod, 250 per container and ratio is 2., so min request is 125Mi, while + // recommendation is 100Mi. + InstallLimitRangeWithMin(f, "100m", "500Mi", apiv1.LimitTypePod) ginkgo.By("Setting up a hamster deployment") podList := startDeploymentPods(f, d) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index c5015eaba183..211d8f0b8489 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -182,6 +182,33 @@ func SetupVPA(f *framework.Framework, cpu string, mode vpa_types.UpdateMode, tar InstallVPA(f, vpaCRD) } +// SetupVPAForTwoHamsters creates and installs a simple pod with two hamster containers for e2e test purposes. +func SetupVPAForTwoHamsters(f *framework.Framework, cpu string, mode vpa_types.UpdateMode, targetRef *autoscaling.CrossVersionObjectReference) { + vpaCRD := NewVPA(f, "hamster-vpa", targetRef) + vpaCRD.Spec.UpdatePolicy.UpdateMode = &mode + + cpuQuantity := ParseQuantityOrDie(cpu) + resourceList := apiv1.ResourceList{apiv1.ResourceCPU: cpuQuantity} + + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{ + { + ContainerName: "hamster", + Target: resourceList, + LowerBound: resourceList, + UpperBound: resourceList, + }, + { + ContainerName: "hamster2", + Target: resourceList, + LowerBound: resourceList, + UpperBound: resourceList, + }, + }, + } + InstallVPA(f, vpaCRD) +} + // NewVPA creates a VPA object for e2e test purposes. func NewVPA(f *framework.Framework, name string, targetRef *autoscaling.CrossVersionObjectReference) *vpa_types.VerticalPodAutoscaler { updateMode := vpa_types.UpdateModeAuto @@ -347,7 +374,7 @@ func WaitForRecommendationPresent(c *vpa_clientset.Clientset, vpa *vpa_types.Ver }) } -func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity) { +func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxCpuLimit, maxMemoryLimit *resource.Quantity, lrType apiv1.LimitType) { lr := &apiv1.LimitRange{ ObjectMeta: metav1.ObjectMeta{ Namespace: f.Namespace.Name, @@ -360,7 +387,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC if maxMemoryLimit != nil || maxCpuLimit != nil { lrItem := apiv1.LimitRangeItem{ - Type: apiv1.LimitTypeContainer, + Type: lrType, Max: apiv1.ResourceList{}, } if maxCpuLimit != nil { @@ -390,17 +417,17 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC } // InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory. -func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string) { +func InstallLimitRangeWithMax(f *framework.Framework, maxCpuLimit, maxMemoryLimit string, lrType apiv1.LimitType) { ginkgo.By(fmt.Sprintf("Setting up LimitRange with max limits - CPU: %v, memory: %v", maxCpuLimit, maxMemoryLimit)) maxCpuLimitQuantity := ParseQuantityOrDie(maxCpuLimit) maxMemoryLimitQuantity := ParseQuantityOrDie(maxMemoryLimit) - installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity) + installLimitRange(f, nil, nil, &maxCpuLimitQuantity, &maxMemoryLimitQuantity, lrType) } // InstallLimitRangeWithMin installs a LimitRange with a minimum limit for CPU and memory. -func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string) { +func InstallLimitRangeWithMin(f *framework.Framework, minCpuLimit, minMemoryLimit string, lrType apiv1.LimitType) { ginkgo.By(fmt.Sprintf("Setting up LimitRange with min limits - CPU: %v, memory: %v", minCpuLimit, minMemoryLimit)) minCpuLimitQuantity := ParseQuantityOrDie(minCpuLimit) minMemoryLimitQuantity := ParseQuantityOrDie(minMemoryLimit) - installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil) + installLimitRange(f, &minCpuLimitQuantity, &minMemoryLimitQuantity, nil, nil, lrType) } diff --git a/vertical-pod-autoscaler/e2e/v1beta2/updater.go b/vertical-pod-autoscaler/e2e/v1beta2/updater.go index 7f438ac0f28b..012adee959bf 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/updater.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/updater.go @@ -120,7 +120,7 @@ var _ = UpdaterE2eDescribe("Updater", func() { gomega.Expect(evictedCount >= permissiveMaxUnavailable).To(gomega.BeTrue()) }) - ginkgo.It("observes max in LimitRange", func() { + ginkgo.It("observes container max in LimitRange", func() { ginkgo.By("Setting up a hamster deployment") d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ @@ -133,13 +133,13 @@ var _ = UpdaterE2eDescribe("Updater", func() { // Max CPU limit is 300m and ratio is 3., so max request is 100m, while // recommendation is 200m // Max memory limit is 1T and ratio is 2., so max request is 0.5T - InstallLimitRangeWithMax(f, "300m", "1T") + InstallLimitRangeWithMax(f, "300m", "1T", apiv1.LimitTypeContainer) ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) CheckNoPodsEvicted(f, MakePodSet(podList)) }) - ginkgo.It("observes min in LimitRange", func() { + ginkgo.It("observes container min in LimitRange", func() { ginkgo.By("Setting up a hamster deployment") d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ @@ -152,7 +152,49 @@ var _ = UpdaterE2eDescribe("Updater", func() { // Min CPU from limit range is 100m and ratio is 3. Min applies both to limit and request so min // request is 100m request and 300m limit // Min memory limit is 0 and ratio is 2., so min request is 0 - InstallLimitRangeWithMin(f, "100m", "0") + InstallLimitRangeWithMin(f, "100m", "0", apiv1.LimitTypeContainer) + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) + + ginkgo.It("observes pod max in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPAForTwoHamsters(f, "200m", vpa_types.UpdateModeAuto, hamsterTargetRef) + + // Max CPU limit is 600m per pod, 300m per container and ratio is 3., so max request is 100m, + // while recommendation is 200m + // Max memory limit is 2T per pod, 1T per container and ratio is 2., so max request is 0.5T + InstallLimitRangeWithMax(f, "600m", "2T", apiv1.LimitTypePod) + + ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) + CheckNoPodsEvicted(f, MakePodSet(podList)) + }) + + ginkgo.It("observes pod min in LimitRange", func() { + ginkgo.By("Setting up a hamster deployment") + d := NewHamsterDeploymentWithResourcesAndLimits(f, + ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ + ParseQuantityOrDie("300m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) + d.Spec.Template.Spec.Containers = append(d.Spec.Template.Spec.Containers, d.Spec.Template.Spec.Containers[0]) + d.Spec.Template.Spec.Containers[1].Name = "hamster2" + podList := startDeploymentPods(f, d) + + ginkgo.By("Setting up a VPA CRD") + SetupVPAForTwoHamsters(f, "50m", vpa_types.UpdateModeAuto, hamsterTargetRef) + + // Min CPU from limit range is 200m per pod, 100m per container and ratio is 3. Min applies both + // to limit and request so min request is 100m request and 300m limit + // Min memory limit is 0 and ratio is 2., so min request is 0 + InstallLimitRangeWithMin(f, "200m", "0", apiv1.LimitTypePod) ginkgo.By(fmt.Sprintf("Waiting for pods to be evicted, hoping it won't happen, sleep for %s", VpaEvictionTimeout.String())) CheckNoPodsEvicted(f, MakePodSet(podList)) From 670aeb5c1b62894819226bb4129876c6c6c03f3e Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Wed, 5 Jun 2019 15:12:55 +0200 Subject: [PATCH 22/25] Clean up e2e vpa tests --- .../e2e/v1beta2/admission_controller.go | 4 ++-- vertical-pod-autoscaler/e2e/v1beta2/common.go | 18 +++++++++--------- .../pkg/utils/vpa/capping.go | 4 ++-- .../pkg/utils/vpa/capping_test.go | 4 ++-- 4 files changed, 15 insertions(+), 15 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index 54c3891b9c6c..a50322ff6d7d 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -197,7 +197,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("caps request according to pod max limit set in pod LimitRange", func() { + ginkgo.It("caps request according to pod max limit set in LimitRange", func() { d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/ ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("200Mi") /*memory limit*/) @@ -247,7 +247,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("raises request according to pod min limit set in pod LimitRange", func() { + ginkgo.It("raises request according to pod min limit set in LimitRange", func() { d := NewHamsterDeploymentWithResourcesAndLimits(f, ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("200Mi"), /*memory request*/ ParseQuantityOrDie("150m") /*cpu limit*/, ParseQuantityOrDie("400Mi") /*memory limit*/) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index 211d8f0b8489..ddbdf9ec46cc 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -103,9 +103,9 @@ func SetupHamsterDeployment(f *framework.Framework, cpu, memory string, replicas d := NewHamsterDeploymentWithResources(f, cpuQuantity, memoryQuantity) d.Spec.Replicas = &replicas d, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).Create(d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when starting deployment creation") err = framework.WaitForDeploymentComplete(f.ClientSet, d) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error waiting for deployment creation to finish") return d } @@ -234,18 +234,18 @@ func NewVPA(f *framework.Framework, name string, targetRef *autoscaling.CrossVer func InstallVPA(f *framework.Framework, vpa *vpa_types.VerticalPodAutoscaler) { ns := f.Namespace.Name config, err := framework.LoadConfig() - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error loading framework") vpaClientSet := vpa_clientset.NewForConfigOrDie(config) vpaClient := vpaClientSet.AutoscalingV1beta2() _, err = vpaClient.VerticalPodAutoscalers(ns).Create(vpa) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error creating VPA") } // ParseQuantityOrDie parses quantity from string and dies with an error if // unparsable. func ParseQuantityOrDie(text string) resource.Quantity { quantity, err := resource.ParseQuantity(text) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error parsing quantity: %s", text) return quantity } @@ -337,9 +337,9 @@ func GetEvictedPodsCount(currentPodSet PodSet, initialPodSet PodSet) int { func CheckNoPodsEvicted(f *framework.Framework, initialPodSet PodSet) { time.Sleep(VpaEvictionTimeout) currentPodList, err := GetHamsterPods(f) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when listing hamster pods to check number of pod evictions") restarted := GetEvictedPodsCount(MakePodSet(currentPodList), initialPodSet) - gomega.Expect(restarted).To(gomega.Equal(0)) + gomega.Expect(restarted).To(gomega.Equal(0), "there should be no pod evictions") } // WaitForVPAMatch pools VPA object until match function returns true. Returns @@ -401,7 +401,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC if minMemoryLimit != nil || minCpuLimit != nil { lrItem := apiv1.LimitRangeItem{ - Type: apiv1.LimitTypeContainer, + Type: lrType, Min: apiv1.ResourceList{}, } if minCpuLimit != nil { @@ -413,7 +413,7 @@ func installLimitRange(f *framework.Framework, minCpuLimit, minMemoryLimit, maxC lr.Spec.Limits = append(lr.Spec.Limits, lrItem) } _, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Create(lr) - gomega.Expect(err).NotTo(gomega.HaveOccurred()) + gomega.Expect(err).NotTo(gomega.HaveOccurred(), "unexpected error when creating limit range") } // InstallLimitRangeWithMax installs a LimitRange with a maximum limit for CPU and memory. diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index cc4cd43d19b8..78772c1755bd 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -345,8 +345,8 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, if minLimit.Cmp(sumRecommendation) > 0 { for i := range pod.Spec.Containers { - limit := resources[i].Target[resourceName] - cappedContainerRequest, _ := scaleQuantityProportionally(&limit, &sumRecommendation, &minLimit) + request := resources[i].Target[resourceName] + cappedContainerRequest, _ := scaleQuantityProportionally(&request, &sumRecommendation, &minLimit) resources[i].Target[resourceName] = *cappedContainerRequest } return resources diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index 35c44e3e8ca3..6ccc3d32d15e 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -489,7 +489,7 @@ func TestApplyPodLimitRange(t *testing.T) { }, }, { - name: "cap mem request to min", + name: "cap mem request to pod min", resources: []vpa_types.RecommendedContainerResources{ { ContainerName: "container1", @@ -533,7 +533,7 @@ func TestApplyPodLimitRange(t *testing.T) { limitRange: apiv1.LimitRangeItem{ Type: apiv1.LimitTypePod, Max: apiv1.ResourceList{ - apiv1.ResourceCPU: resource.MustParse("10G"), + apiv1.ResourceMemory: resource.MustParse("10G"), }, Min: apiv1.ResourceList{ apiv1.ResourceMemory: resource.MustParse("4G"), From 439c40719757d28a4b2c05b6f55c64095fe94bdd Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 6 Jun 2019 11:04:02 +0200 Subject: [PATCH 23/25] Fix pod limit range e2e tests Looks like target not in [lower bound, upper bound] is what was breaking e2e tests. --- .../pkg/utils/vpa/capping.go | 23 ++++++++++++++----- .../pkg/utils/vpa/capping_test.go | 3 ++- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index 78772c1755bd..19a17a367d9b 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -320,7 +320,8 @@ func getBoundaryRecommendation(recommendation apiv1.ResourceList, container apiv } func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, - pod *apiv1.Pod, limitRange apiv1.LimitRangeItem, resourceName apiv1.ResourceName) []vpa_types.RecommendedContainerResources { + pod *apiv1.Pod, limitRange apiv1.LimitRangeItem, resourceName apiv1.ResourceName, + fieldGetter func(vpa_types.RecommendedContainerResources) *apiv1.ResourceList) []vpa_types.RecommendedContainerResources { minLimit := limitRange.Min[resourceName] maxLimit := limitRange.Max[resourceName] defaultLimit := limitRange.Default[resourceName] @@ -332,7 +333,7 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, } limit := container.Resources.Limits[resourceName] request := container.Resources.Requests[resourceName] - recommendation := resources[i].Target[resourceName] + recommendation := (*fieldGetter(resources[i]))[resourceName] containerLimit, _ := getProportionalResourceLimit(resourceName, &limit, &request, &recommendation, &defaultLimit) if containerLimit != nil { sumLimit.Add(*containerLimit) @@ -345,9 +346,9 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, if minLimit.Cmp(sumRecommendation) > 0 { for i := range pod.Spec.Containers { - request := resources[i].Target[resourceName] + request := (*fieldGetter(resources[i]))[resourceName] cappedContainerRequest, _ := scaleQuantityProportionally(&request, &sumRecommendation, &minLimit) - resources[i].Target[resourceName] = *cappedContainerRequest + (*fieldGetter(resources[i]))[resourceName] = *cappedContainerRequest } return resources } @@ -376,7 +377,17 @@ func (c *cappingRecommendationProcessor) capProportionallyToPodLimitRange( if podLimitRange == nil { return containerRecommendations, nil } - containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU) - containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory) + getTarget := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.Target } + getUpper := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.UpperBound } + getLower := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.LowerBound } + + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getUpper) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getUpper) + + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getTarget) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getTarget) + + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceCPU, getLower) + containerRecommendations = applyPodLimitRange(containerRecommendations, pod, *podLimitRange, apiv1.ResourceMemory, getLower) return containerRecommendations, nil } diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index 6ccc3d32d15e..3ae3258f876a 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -556,9 +556,10 @@ func TestApplyPodLimitRange(t *testing.T) { }, }, } + getTarget := func(rl vpa_types.RecommendedContainerResources) *apiv1.ResourceList { return &rl.Target } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - got := applyPodLimitRange(tc.resources, &tc.pod, tc.limitRange, tc.resourceName) + got := applyPodLimitRange(tc.resources, &tc.pod, tc.limitRange, tc.resourceName, getTarget) assert.Equal(t, tc.expect, got) }) } From 8530bb449e993ea545bcfdd4d452f93a4cd1dbc4 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 6 Jun 2019 13:49:43 +0200 Subject: [PATCH 24/25] Fix limit support --- .../e2e/v1beta2/admission_controller.go | 12 ++++++------ vertical-pod-autoscaler/pkg/utils/vpa/capping.go | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index a50322ff6d7d..49a5d12fbc58 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -176,8 +176,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // Min CPU from limit range is 50m and ratio is 1.5. Min applies to both limit and request so min // request is 50m and min limit is 75 - // Min memory limit is 250Mi and ratio is 2., so min request is 125Mi, while - // recommendation is 100Mi. + // Min memory limit is 250Mi and it applies to both limit and request. Recommendation is 100Mi. + // It should be scaled up to 250Mi. InstallLimitRangeWithMin(f, "50m", "250Mi", apiv1.LimitTypeContainer) ginkgo.By("Setting up a hamster deployment") @@ -189,7 +189,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // Limit to request ratio should stay unchanged. for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi"))) gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) @@ -277,8 +277,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // Min CPU from limit range is 100m, 50m per pod and ratio is 1.5. Min applies to both limit and // request so min request is 50m and min limit is 75 - // Min memory limit is 500Mi per pod, 250 per container and ratio is 2., so min request is 125Mi, while - // recommendation is 100Mi. + // Min memory limit is 500Mi per pod, 250 per container and it applies to both limit and request. + // Recommendation is 100Mi it should be scaled up to 250Mi. InstallLimitRangeWithMin(f, "100m", "500Mi", apiv1.LimitTypePod) ginkgo.By("Setting up a hamster deployment") @@ -290,7 +290,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // Limit to request ratio should stay unchanged. for _, pod := range podList.Items { gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("125Mi"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi"))) gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) gomega.Expect(float64(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()) / float64(pod.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())).To(gomega.BeNumerically("~", 1.5)) diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index 19a17a367d9b..15d5c06d49e7 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -361,9 +361,9 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, targetTotalLimit = maxLimit } for i := range pod.Spec.Containers { - limit := resources[i].Target[resourceName] + limit := (*fieldGetter(resources[i]))[resourceName] cappedContainerRequest, _ := scaleQuantityProportionally(&limit, &sumLimit, &targetTotalLimit) - resources[i].Target[resourceName] = *cappedContainerRequest + (*fieldGetter(resources[i]))[resourceName] = *cappedContainerRequest } return resources } From 5697620a08789486a078cd20047e9818f3e72d4f Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 6 Jun 2019 16:51:12 +0200 Subject: [PATCH 25/25] Fix raises request according to pod min limit set in LimitRange test Admisson controller was crashing because it was tryig to divide by 0 so it dind't make any changes to pod. Adter I fixed that only 1 pod would fit in the cluster so I lowered recommendation. --- vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go | 6 +++--- vertical-pod-autoscaler/pkg/utils/vpa/capping.go | 6 +++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index 49a5d12fbc58..dd9711a863c8 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -260,14 +260,14 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { { ContainerName: "hamster", Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceCPU: ParseQuantityOrDie("120m"), apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled }, }, { ContainerName: "hamster2", Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceCPU: ParseQuantityOrDie("120m"), apiv1.ResourceMemory: ParseQuantityOrDie("100Mi"), // memory is downscaled }, }, @@ -289,7 +289,7 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { // request that limitrange allows. // Limit to request ratio should stay unchanged. for _, pod := range podList.Items { - gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("120m"))) gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("250Mi"))) gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Cpu().MilliValue()).To(gomega.BeNumerically(">=", 75)) gomega.Expect(pod.Spec.Containers[0].Resources.Limits.Memory().Value()).To(gomega.BeNumerically(">=", 250*1024*1024)) diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index 15d5c06d49e7..2744156a2edd 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -344,7 +344,7 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, return resources } - if minLimit.Cmp(sumRecommendation) > 0 { + if minLimit.Cmp(sumRecommendation) > 0 && !sumLimit.IsZero() { for i := range pod.Spec.Containers { request := (*fieldGetter(resources[i]))[resourceName] cappedContainerRequest, _ := scaleQuantityProportionally(&request, &sumRecommendation, &minLimit) @@ -353,6 +353,10 @@ func applyPodLimitRange(resources []vpa_types.RecommendedContainerResources, return resources } + if sumLimit.IsZero() { + return resources + } + var targetTotalLimit resource.Quantity if minLimit.Cmp(sumLimit) > 0 { targetTotalLimit = minLimit