diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go new file mode 100644 index 000000000000..8518896021ea --- /dev/null +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker.go @@ -0,0 +1,245 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logic + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/informers" + + v1_listers "k8s.io/client-go/listers/core/v1" +) + +// LimitsHints provide hinted limits that respect limit range max ratio +type LimitsHints interface { + IsNil() bool + RequestsExceedsRatio(indexOfContainer int, resourceName v1.ResourceName) bool + HintedLimit(indexOfContainer int, resourceName v1.ResourceName) resource.Quantity +} + +// LimitRangeHints implements LimitsHints interface +type LimitRangeHints struct { + requestsExceedsRatio []map[v1.ResourceName]bool + limitsRespectingRatio []v1.ResourceList +} + +var _ LimitsHints = &LimitRangeHints{} + +// LimitsChecker checks for LimitRange and if container needs limits to be set +type LimitsChecker interface { + NeedsLimits(*v1.Pod, []ContainerResources) LimitsHints +} + +// IsNil return true if there is no hints to set limits +func (lrh *LimitRangeHints) IsNil() bool { + return lrh == (*LimitRangeHints)(nil) +} + +// RequestsExceedsRatio return true if limits have to be set to respect limit range max ratio +func (lrh *LimitRangeHints) RequestsExceedsRatio(indexOfContainer int, resourceName v1.ResourceName) bool { + if !lrh.IsNil() { + yes, ok := lrh.requestsExceedsRatio[indexOfContainer][resourceName] + return ok && yes + } + return false +} + +// HintedLimit return the limit Quantity that respect the limit range max ration +func (lrh *LimitRangeHints) HintedLimit(indexOfContainer int, resourceName v1.ResourceName) resource.Quantity { + if !lrh.IsNil() { + limit, ok := lrh.limitsRespectingRatio[indexOfContainer][resourceName] + if ok { + return limit + } + return resource.Quantity{} + } + return resource.Quantity{} +} + +type neverNeedsLimitsChecker struct{} + +var _ LimitsChecker = &neverNeedsLimitsChecker{} + +func (lc *neverNeedsLimitsChecker) NeedsLimits(pod *v1.Pod, containersResources []ContainerResources) LimitsHints { + return LimitsHints((*LimitRangeHints)(nil)) +} + +type limitsChecker struct { + limitrangeLister v1_listers.LimitRangeLister +} + +var _ LimitsChecker = &limitsChecker{} + +// NewLimitsChecker create a LimitsChecker +func NewLimitsChecker(f informers.SharedInformerFactory) LimitsChecker { + if f != nil { + limitrangeLister := f.Core().V1().LimitRanges().Lister() + stopCh := make(chan struct{}) + f.Start(stopCh) + for _, ok := range f.WaitForCacheSync(stopCh) { + if !ok { + if ok := f.Core().V1().LimitRanges().Informer().HasSynced(); !ok { + return &neverNeedsLimitsChecker{} + } + } + } + return &limitsChecker{limitrangeLister} + } + return &neverNeedsLimitsChecker{} +} + +type interestingData struct { + MaxLimitRequestRatio v1.ResourceList + Default v1.ResourceList +} + +func (id *interestingData) parse(lri *v1.LimitRangeItem) { + if value, hasCPU := lri.MaxLimitRequestRatio[v1.ResourceCPU]; hasCPU { + if id.MaxLimitRequestRatio == nil { + id.MaxLimitRequestRatio = make(v1.ResourceList) + } + if maxRatio, exists := id.MaxLimitRequestRatio[v1.ResourceCPU]; !exists || maxRatio.Cmp(value) > 0 { + id.MaxLimitRequestRatio[v1.ResourceCPU] = *value.Copy() + } + } + if value, hasMemory := lri.MaxLimitRequestRatio[v1.ResourceMemory]; hasMemory { + if id.MaxLimitRequestRatio == nil { + id.MaxLimitRequestRatio = make(v1.ResourceList) + } + if maxRatio, exists := id.MaxLimitRequestRatio[v1.ResourceMemory]; !exists || maxRatio.Cmp(value) > 0 { + id.MaxLimitRequestRatio[v1.ResourceMemory] = *value.Copy() + } + } + if value, hasCPU := lri.Default[v1.ResourceCPU]; hasCPU { + if id.Default == nil { + id.Default = make(v1.ResourceList) + } + if _, exists := id.Default[v1.ResourceCPU]; !exists { + id.Default[v1.ResourceCPU] = *value.Copy() + } + } + if value, hasMemory := lri.Default[v1.ResourceMemory]; hasMemory { + if id.Default == nil { + id.Default = make(v1.ResourceList) + } + if _, exists := id.Default[v1.ResourceMemory]; !exists { + id.Default[v1.ResourceMemory] = *value.Copy() + } + } +} + +func (lc *limitsChecker) getLimitRangeItem(pod *v1.Pod) (ret *v1.LimitRangeItem) { + ret = nil + limitranges, err := lc.limitrangeLister. + LimitRanges(pod.GetNamespace()). + List(labels.Everything()) + if err == nil { + id := &interestingData{} + foundInterstingData := false + for _, lr := range limitranges { + for _, lri := range lr.Spec.Limits { + if lri.Type != v1.LimitTypeContainer && lri.Type != v1.LimitTypePod { + continue + } + if lri.MaxLimitRequestRatio == nil && + lri.Default == nil { + continue + } + foundInterstingData = true + id.parse(&lri) + } + } + if foundInterstingData { + ret = &v1.LimitRangeItem{ + MaxLimitRequestRatio: id.MaxLimitRequestRatio, + Default: id.Default, + } + } + } + return ret +} + +func (lc *limitsChecker) NeedsLimits(pod *v1.Pod, containersResources []ContainerResources) LimitsHints { + lri := lc.getLimitRangeItem(pod) + + if lri == (*v1.LimitRangeItem)(nil) { + return LimitsHints((*LimitRangeHints)(nil)) + } + + lrh := &LimitRangeHints{ + requestsExceedsRatio: make([]map[v1.ResourceName]bool, len(containersResources)), + limitsRespectingRatio: make([]v1.ResourceList, len(containersResources)), + } + needsLimits := false + + for i, cr := range containersResources { + lrh.requestsExceedsRatio[i] = make(map[v1.ResourceName]bool) + lrh.limitsRespectingRatio[i] = make(v1.ResourceList) + for name, value := range cr.Requests { + var ctrLimit *resource.Quantity + if pod.Spec.Containers[i].Resources.Limits != nil { + if q, hasLimit := pod.Spec.Containers[i].Resources.Limits[name]; hasLimit { + ctrLimit = &q + } + } + if q, hasDefault := lri.Default[name]; hasDefault && ctrLimit == nil { + ctrLimit = &q + } + if ctrLimit == nil { + // no limits for this container, neither default will be set + continue + } + + if ratio, hasRatio := lri.MaxLimitRequestRatio[name]; hasRatio { + dl := *ctrLimit + dlv := dl.Value() + vv := value.Value() + useMilli := false + if dlv <= resource.MaxMilliValue && + vv <= resource.MaxMilliValue && + name == v1.ResourceCPU { + dlv = dl.MilliValue() + vv = value.MilliValue() + useMilli = true + } + + futureRatio := float64(dlv) / float64(vv) + maxRatio := float64(ratio.Value()) + + if futureRatio > maxRatio { + needsLimits = true + lrh.requestsExceedsRatio[i][name] = true + l := int64(float64(vv) * maxRatio) + if useMilli { + if l > resource.MaxMilliValue { + l = resource.MaxMilliValue + } + lrh.limitsRespectingRatio[i][name] = *resource.NewMilliQuantity(l, value.Format) + } else { + lrh.limitsRespectingRatio[i][name] = *resource.NewQuantity(l, value.Format) + } + } + } + } + } + + if !needsLimits { + lrh = nil + } + return LimitsHints(lrh) +} diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go new file mode 100644 index 000000000000..95298778c1a2 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/limitrange_checker_test.go @@ -0,0 +1,236 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logic + +import ( + "fmt" + "testing" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + + "github.com/stretchr/testify/assert" + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test" +) + +func TestUpdateResourceLimits(t *testing.T) { + type testCase struct { + pod *apiv1.Pod + containerResources []ContainerResources + limitRanges []runtime.Object + requestsExceedsRatioCPU bool + requestsExceedsRatioMemory bool + limitsRespectingRatioCPU resource.Quantity + limitsRespectingRatioMemory resource.Quantity + } + containerName := "container1" + vpaName := "vpa1" + labels := map[string]string{"app": "testingApp"} + + minRatio := test.Resources("5", "5") + + limitranges := []runtime.Object{ + &apiv1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "limitRange-with-default-and-ratio", + }, + Spec: apiv1.LimitRangeSpec{ + Limits: []apiv1.LimitRangeItem{ + { + Type: apiv1.LimitTypeContainer, + Default: test.Resources("2000m", "2Gi"), + }, + { + Type: apiv1.LimitTypePod, + MaxLimitRequestRatio: test.Resources("10", "10"), + }, + }, + }, + }, + &apiv1.LimitRange{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "default", + Name: "limitRange-with-only-ratio", + }, + Spec: apiv1.LimitRangeSpec{ + Limits: []apiv1.LimitRangeItem{ + { + Type: apiv1.LimitTypePod, + MaxLimitRequestRatio: minRatio, + }, + }, + }, + }, + } + + uninitialized := test.Pod().WithName("test_uninitialized").AddContainer(test.BuildTestContainer(containerName, "", "")).Get() + uninitialized.ObjectMeta.Labels = labels + + initialized := test.Pod().WithName("test_initialized").AddContainer(test.BuildTestContainer(containerName, "1", "100Mi")).Get() + initialized.ObjectMeta.Labels = labels + + withLimits := test.Pod().WithName("test_initialized").AddContainer(test.BuildTestContainer(containerName, "1", "100Mi")).Get() + withLimits.ObjectMeta.Labels = labels + withLimits.Spec.Containers[0].Resources.Limits = test.Resources("1500m", "800Mi") + + withHugeMemLimits := test.Pod().WithName("test_initialized").AddContainer(test.BuildTestContainer(containerName, "1", "10Gi")).Get() + withHugeMemLimits.ObjectMeta.Labels = labels + withHugeMemLimits.Spec.Containers[0].Resources.Limits = test.Resources("1500m", "80Gi") + + vpaBuilder := test.VerticalPodAutoscaler(). + WithName(vpaName). + WithContainer(containerName). + WithTarget("20m", "200Mi") + vpa := vpaBuilder.Get() + + vpaWithHighMemory := vpaBuilder.WithTarget("2", "3Gi").Get() + + // short circuit recommendation provider + vpaContainersResources := []ContainerResources{{ + Requests: vpa.Status.Recommendation.ContainerRecommendations[0].Target, + }} + vpaHighMemContainersResources := []ContainerResources{{ + Requests: vpaWithHighMemory.Status.Recommendation.ContainerRecommendations[0].Target, + }} + + expectedMemory := func(crs []ContainerResources, ratio apiv1.ResourceList) resource.Quantity { + return *resource.NewQuantity( + int64(float64( + crs[0].Requests.Memory().Value())*float64(ratio.Memory().Value())), + crs[0].Requests.Memory().Format) + } + expectedCPU := func(crs []ContainerResources, ratio apiv1.ResourceList) resource.Quantity { + return *resource.NewMilliQuantity( + int64(float64( + crs[0].Requests.Cpu().MilliValue())*float64(ratio.Cpu().Value())), + crs[0].Requests.Cpu().Format) + } + + testCases := []testCase{{ + pod: uninitialized, + containerResources: vpaContainersResources, + limitRanges: limitranges, + requestsExceedsRatioCPU: true, + requestsExceedsRatioMemory: true, + limitsRespectingRatioCPU: expectedCPU(vpaContainersResources, minRatio), + limitsRespectingRatioMemory: expectedMemory(vpaContainersResources, minRatio), + }, { + pod: initialized, + containerResources: vpaContainersResources, + limitRanges: limitranges, + requestsExceedsRatioCPU: true, + requestsExceedsRatioMemory: true, + limitsRespectingRatioCPU: expectedCPU(vpaContainersResources, minRatio), + limitsRespectingRatioMemory: expectedMemory(vpaContainersResources, minRatio), + }, { + pod: withLimits, + containerResources: vpaContainersResources, + limitRanges: limitranges, + requestsExceedsRatioCPU: true, + requestsExceedsRatioMemory: false, + limitsRespectingRatioCPU: expectedCPU(vpaContainersResources, minRatio), + limitsRespectingRatioMemory: resource.Quantity{}, + }, { + pod: withHugeMemLimits, + containerResources: vpaHighMemContainersResources, + limitRanges: limitranges, + requestsExceedsRatioCPU: false, + requestsExceedsRatioMemory: true, + limitsRespectingRatioCPU: resource.Quantity{}, + limitsRespectingRatioMemory: expectedMemory(vpaHighMemContainersResources, minRatio), + }} + + // if admission controller is not allowed to adjust limits + // the limits checher have to return always: + // - no needed limits + // - RequestsExceedsRatio always return false + t.Run("test case for neverNeedsLimitsChecker", func(t *testing.T) { + nlc := NewLimitsChecker(nil) + hints := nlc.NeedsLimits(uninitialized, vpaContainersResources) + hintsPtr, _ := hints.(*LimitRangeHints) + if hintsPtr != nil { + t.Errorf("%v NeedsLimits didn't not return nil: %v", nlc, hints) + } + if !hints.IsNil() { + t.Errorf("%v NeedsLimits returned a LimitsHints not nil: %v", nlc, hints) + } + if hints.RequestsExceedsRatio(0, apiv1.ResourceMemory) != false { + t.Errorf("%v RequestsExceedsRatio didn't not return false", hints) + } + hinted := hints.HintedLimit(0, apiv1.ResourceMemory) + if !(&hinted).IsZero() { + t.Errorf("%v RequestsExceedsRatio didn't not return zero quantity", hints) + } + }) + + t.Run("test case for no Limit Range", func(t *testing.T) { + cs := fake.NewSimpleClientset() + factory := informers.NewSharedInformerFactory(cs, 0) + lc := NewLimitsChecker(factory) + hints := lc.NeedsLimits(uninitialized, vpaContainersResources) + hintsPtr, _ := hints.(*LimitRangeHints) + if hintsPtr != nil { + t.Errorf("%v NeedsLimits didn't not return nil: %v", lc, hints) + } + if !hints.IsNil() { + t.Errorf("%v NeedsLimits returned a LimitsHints not nil: %v", lc, hints) + } + if hints.RequestsExceedsRatio(0, apiv1.ResourceMemory) != false { + t.Errorf("%v RequestsExceedsRatio didn't not return false", hints) + } + hinted := hints.HintedLimit(0, apiv1.ResourceMemory) + if !(&hinted).IsZero() { + t.Errorf("%v RequestsExceedsRatio didn't not return zero quantity", hints) + } + }) + + for i, tc := range testCases { + + t.Run(fmt.Sprintf("test case number: %d", i), func(t *testing.T) { + cs := fake.NewSimpleClientset(tc.limitRanges...) + factory := informers.NewSharedInformerFactory(cs, 0) + lc := NewLimitsChecker(factory) + resources := tc.containerResources + + hints := lc.NeedsLimits(tc.pod, resources) + assert.NotNil(t, hints, fmt.Sprintf("hints is: %+v", hints)) + + if tc.requestsExceedsRatioCPU { + assert.True(t, hints.RequestsExceedsRatio(0, apiv1.ResourceCPU)) + } else { + assert.False(t, hints.RequestsExceedsRatio(0, apiv1.ResourceCPU)) + } + + if tc.requestsExceedsRatioMemory { + assert.True(t, hints.RequestsExceedsRatio(0, apiv1.ResourceMemory)) + } else { + assert.False(t, hints.RequestsExceedsRatio(0, apiv1.ResourceMemory)) + } + + hintedCPULimits := hints.HintedLimit(0, apiv1.ResourceCPU) + hintedMemoryLimits := hints.HintedLimit(0, apiv1.ResourceMemory) + assert.EqualValues(t, tc.limitsRespectingRatioCPU.Value(), hintedCPULimits.Value(), fmt.Sprintf("cpu limits doesn't match: %v != %v\n", tc.limitsRespectingRatioCPU.Value(), hintedCPULimits.Value())) + assert.EqualValues(t, tc.limitsRespectingRatioMemory.Value(), hintedMemoryLimits.Value(), fmt.Sprintf("memory limits doesn't match: %v != %v\n", tc.limitsRespectingRatioMemory.Value(), hintedMemoryLimits.Value())) + }) + + } +} diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index 34e584cd9a80..f5e732eb6ee5 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -39,11 +39,12 @@ type AdmissionServer struct { recommendationProvider RecommendationProvider podPreProcessor PodPreProcessor vpaPreProcessor VpaPreProcessor + limitsChecker LimitsChecker } // NewAdmissionServer constructs new AdmissionServer -func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, vpaPreProcessor VpaPreProcessor) *AdmissionServer { - return &AdmissionServer{recommendationProvider, podPreProcessor, vpaPreProcessor} +func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, vpaPreProcessor VpaPreProcessor, limitsChecker LimitsChecker) *AdmissionServer { + return &AdmissionServer{recommendationProvider, podPreProcessor, vpaPreProcessor, limitsChecker} } type patchRecord struct { @@ -73,10 +74,13 @@ func (s *AdmissionServer) getPatchesForPodResourceRequest(raw []byte, namespace if annotationsPerContainer == nil { annotationsPerContainer = vpa_api_util.ContainerToAnnotationsMap{} } + + limitsHints := s.limitsChecker.NeedsLimits(&pod, containersResources) + patches := []patchRecord{} updatesAnnotation := []string{} for i, containerResources := range containersResources { - newPatches, newUpdatesAnnotation := s.getContainerPatch(pod, i, "requests", annotationsPerContainer, containerResources) + newPatches, newUpdatesAnnotation := s.getContainerPatch(pod, i, annotationsPerContainer, containerResources, limitsHints) patches = append(patches, newPatches...) updatesAnnotation = append(updatesAnnotation, newUpdatesAnnotation) } @@ -120,7 +124,7 @@ func getAddResourceRequirementValuePatch(i int, kind string, resource v1.Resourc Value: quantity.String()} } -func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, patchKind string, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources ContainerResources) ([]patchRecord, string) { +func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources ContainerResources, limitsHints LimitsHints) ([]patchRecord, string) { var patches []patchRecord // Add empty resources object if missing if pod.Spec.Containers[i].Resources.Limits == nil && @@ -133,6 +137,26 @@ func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, patchKind string, annotations = make([]string, 0) } + if !limitsHints.IsNil() { + var resources v1.ResourceList + resourceNames := []v1.ResourceName{"cpu", "memory"} + for _, resource := range resourceNames { + if limitsHints.RequestsExceedsRatio(i, resource) { + // we need just to take care of max ratio + // setting limits to request*maxRatio, + // It's needed when we are lowering requests too much + limit := limitsHints.HintedLimit(i, resource) + if resources == nil { + resources = make(v1.ResourceList) + } + resources[resource] = limit + annotations = append(annotations, fmt.Sprintf("%s limit decreased to respect ratio", resource)) + } + } + if len(resources) > 0 { + containerResources.Limits = resources + } + } patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Requests, i, containerResources.Requests, "requests", "request") patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Limits, i, containerResources.Limits, "limits", "limit") diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go index 049c6ba7cf0f..83ad62dce2eb 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -320,7 +320,8 @@ func TestGetPatchesForResourceRequest(t *testing.T) { fppp := fakePodPreProcessor{e: tc.podPreProcessorError} fvpp := fakeVpaPreProcessor{} frp := fakeRecommendationProvider{tc.recommendResources, tc.recommendAnnotations, tc.recommendName, tc.recommendError} - s := NewAdmissionServer(&frp, &fppp, &fvpp) + lc := NewLimitsChecker(nil) + s := NewAdmissionServer(&frp, &fppp, &fvpp, lc) patches, err := s.getPatchesForPodResourceRequest(tc.podJson, tc.namespace) if tc.expectError == nil { assert.NoError(t, err) @@ -367,7 +368,8 @@ func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { }`) recommendAnnotations := vpa_api_util.ContainerToAnnotationsMap{} frp := fakeRecommendationProvider{recommendResources, recommendAnnotations, "name", nil} - s := NewAdmissionServer(&frp, &fppp, &fvpp) + lc := NewLimitsChecker(nil) + s := NewAdmissionServer(&frp, &fppp, &fvpp, lc) patches, err := s.getPatchesForPodResourceRequest(podJson, "default") assert.NoError(t, err) // Order of updates for cpu and unobtanium depends on order of iterating a map, both possible results are valid. diff --git a/vertical-pod-autoscaler/pkg/admission-controller/main.go b/vertical-pod-autoscaler/pkg/admission-controller/main.go index 74a2a22d7bcd..3d6313b05754 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/main.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/main.go @@ -48,12 +48,15 @@ var ( tlsPrivateKey: flag.String("tls-private-key", "/etc/tls-certs/serverKey.pem", "Path to server certificate key PEM file."), } - port = flag.Int("port", 8000, "The port to listen on.") - address = flag.String("address", ":8944", "The address to expose Prometheus metrics.") - namespace = os.Getenv("NAMESPACE") - webhookAddress = flag.String("webhook-address", "", "Address under which webhook is registered. Used when registerByURL is set to true.") - webhookPort = flag.String("webhook-port", "", "Server Port for Webhook") - registerByURL = flag.Bool("register-by-url", false, "If set to true, admission webhook will be registered by URL (webhookAddress:webhookPort) instead of by service name") + port = flag.Int("port", 8000, "The port to listen on.") + address = flag.String("address", ":8944", "The address to expose Prometheus metrics.") + namespace = os.Getenv("NAMESPACE") + webhookAddress = flag.String("webhook-address", "", "Address under which webhook is registered. Used when registerByURL is set to true.") + webhookPort = flag.String("webhook-port", "", "Server Port for Webhook") + registerByURL = flag.Bool("register-by-url", false, "If set to true, admission webhook will be registered by URL (webhookAddress:webhookPort) instead of by service name") + allowToAdjustLimits = flag.Bool("allow-to-adjust-limits", false, "If set to true, admission webhook will set limits per container too if needed") + + factoryForLimitsChecker interface{} ) func main() { @@ -83,7 +86,11 @@ func main() { recommendationProvider := logic.NewRecommendationProvider(vpaLister, vpa_api_util.NewCappingRecommendationProcessor(), targetSelectorFetcher) podPreprocessor := logic.NewDefaultPodPreProcessor() vpaPreprocessor := logic.NewDefaultVpaPreProcessor() - as := logic.NewAdmissionServer(recommendationProvider, podPreprocessor, vpaPreprocessor) + limitsChecker := logic.NewLimitsChecker(nil) + if *allowToAdjustLimits { + limitsChecker = logic.NewLimitsChecker(factory) + } + as := logic.NewAdmissionServer(recommendationProvider, podPreprocessor, vpaPreprocessor, limitsChecker) http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { as.Serve(w, r) healthCheck.UpdateLastActivity()