From 0006a07422ae068d96f9348dcd22c3385c262f50 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Wed, 10 Apr 2019 16:44:30 +0200 Subject: [PATCH 01/12] Basic unit tests for VPA server --- .../admission-controller/logic/server_test.go | 288 ++++++++++++++++++ 1 file changed, 288 insertions(+) create mode 100644 vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go new file mode 100644 index 000000000000..108b7a36534f --- /dev/null +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -0,0 +1,288 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logic + +import ( + "encoding/json" + "fmt" + "github.com/stretchr/testify/assert" + "strings" + "testing" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" +) + +const ( + cpu = "cpu" + unobtanium = "unobtanium" +) + +type fakePodPreProcessor struct { + e error +} + +func (fpp *fakePodPreProcessor) Process(pod apiv1.Pod) (apiv1.Pod, error) { + return pod, fpp.e +} + +type fakeRecommendationProvider struct { + resources []ContainerResources + containerToAnnotations vpa_api_util.ContainerToAnnotationsMap + name string + e error +} + +func (frp *fakeRecommendationProvider) GetContainersResourcesForPod(pod *apiv1.Pod) ([]ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { + return frp.resources, frp.containerToAnnotations, frp.name, frp.e +} + +func addResourcesPatch(idx int) patchRecord { + return patchRecord{ + "add", + fmt.Sprintf("/spec/containers/%d/resources", idx), + apiv1.ResourceRequirements{}, + } +} + +func addRequestsPatch(idx int) patchRecord { + return patchRecord{ + "add", + fmt.Sprintf("/spec/containers/%d/resources/requests", idx), + apiv1.ResourceList{}, + } +} + +func addResourceRequestPatch(index int, res, amount string) patchRecord { + return patchRecord{ + "add", + fmt.Sprintf("/spec/containers/%d/resources/requests/%s", index, res), + resource.MustParse(amount), + } +} + +func addAnnotationRequest(updateResources [][]string) patchRecord { + requests := make([]string, 0) + for idx, podResources := range updateResources { + podRequests := make([]string, 0) + for _, resource := range podResources { + podRequests = append(podRequests, resource+" request") + } + requests = append(requests, fmt.Sprintf("container %d: %s", idx, strings.Join(podRequests, ", "))) + } + + vpaUpdates := fmt.Sprintf("Pod resources updated by name: %s", strings.Join(requests, "; ")) + return patchRecord{ + "add", + "/metadata/annotations", + map[string]string{ + "vpaUpdates": vpaUpdates, + }, + } +} + +func eqPatch(a, b patchRecord) bool { + aJson, aErr := json.Marshal(a) + bJson, bErr := json.Marshal(b) + return string(aJson) == string(bJson) && aErr == bErr +} + +func TestGetPatchesForResourceRequest(t *testing.T) { + tests := []struct { + name string + podJson []byte + namespace string + preProcessorError error + recommendResources []ContainerResources + recommendAnnotations vpa_api_util.ContainerToAnnotationsMap + recommendName string + recommendError error + expectPatches []patchRecord + expectError error + }{ + { + name: "invalid JSON", + podJson: []byte("{"), + namespace: "default", + preProcessorError: nil, + recommendResources: []ContainerResources{}, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectError: fmt.Errorf("unexpected end of JSON input"), + }, + { + name: "invalid pod", + podJson: []byte("{}"), + namespace: "default", + preProcessorError: fmt.Errorf("bad pod"), + recommendResources: []ContainerResources{}, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectError: fmt.Errorf("bad pod"), + }, + { + name: "new cpu recommendation", + podJson: []byte( + `{ + "spec": { + "containers": [{}] + } + }`), + namespace: "default", + recommendResources: []ContainerResources{ + { + apiv1.ResourceList{ + cpu: resource.MustParse("1"), + }, + }, + }, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectPatches: []patchRecord{ + addResourcesPatch(0), + addRequestsPatch(0), + addResourceRequestPatch(0, cpu, "1"), + addAnnotationRequest([][]string{{cpu}}), + }, + }, + { + name: "replacement cpu recommendation", + podJson: []byte( + `{ + "spec": { + "containers": [ + { + "resources": { + "requests": { + "cpu": "0" + } + } + } + ] + } + }`), + namespace: "default", + recommendResources: []ContainerResources{ + { + apiv1.ResourceList{ + cpu: resource.MustParse("1"), + }, + }, + }, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectPatches: []patchRecord{ + addResourceRequestPatch(0, cpu, "1"), + addAnnotationRequest([][]string{{cpu}}), + }, + }, + { + name: "two replacement resources", + podJson: []byte( + `{ + "spec": { + "containers": [ + { + "resources": { + "requests": { + "cpu": "0" + } + } + } + ] + } + }`), + namespace: "default", + recommendResources: []ContainerResources{ + { + apiv1.ResourceList{ + cpu: resource.MustParse("1"), + unobtanium: resource.MustParse("2"), + }, + }, + }, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectPatches: []patchRecord{ + addResourceRequestPatch(0, cpu, "1"), + addResourceRequestPatch(0, unobtanium, "2"), + addAnnotationRequest([][]string{{cpu, unobtanium}}), + }, + }, + { + name: "two containers", + podJson: []byte( + `{ + "spec": { + "containers": [ + { + "resources": { + "requests": { + "cpu": "0" + } + } + }, + {} + ] + } + }`), + namespace: "default", + recommendResources: []ContainerResources{ + { + apiv1.ResourceList{ + cpu: resource.MustParse("1"), + }, + }, + { + apiv1.ResourceList{ + cpu: resource.MustParse("2"), + }, + }, + }, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectPatches: []patchRecord{ + addResourceRequestPatch(0, cpu, "1"), + addResourcesPatch(1), + addRequestsPatch(1), + addResourceRequestPatch(1, cpu, "2"), + addAnnotationRequest([][]string{{cpu}, {cpu}}), + }, + }, + } + for _, tc := range tests { + t.Run(fmt.Sprintf("test case: %s", tc.name), func(t *testing.T) { + fppp := fakePodPreProcessor{e: tc.preProcessorError} + frp := fakeRecommendationProvider{tc.recommendResources, tc.recommendAnnotations, tc.recommendName, tc.recommendError} + s := NewAdmissionServer(&frp, &fppp) + patches, err := s.getPatchesForPodResourceRequest(tc.podJson, tc.namespace) + if tc.expectError == nil { + assert.Nil(t, err) + } else { + assert.Errorf(t, err, tc.expectError.Error()) + } + if assert.Equal(t, len(tc.expectPatches), len(patches)) { + for i, gotPatch := range patches { + if !eqPatch(gotPatch, tc.expectPatches[i]) { + t.Errorf("Expected patch at position %d to be %+v, got %+v", i, tc.expectPatches[i], gotPatch) + } + } + } + }) + } +} From 64436ae4c722831b7c672c29473eb098cf5f1bba Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 11 Apr 2019 15:16:57 +0200 Subject: [PATCH 02/12] Fix comparing errors, make flaky test case a separate test The test relied on order of iterating through map. --- .../admission-controller/logic/server_test.go | 80 +++++++++++-------- 1 file changed, 45 insertions(+), 35 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go index 108b7a36534f..04b1ddf4ed68 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -191,39 +191,6 @@ func TestGetPatchesForResourceRequest(t *testing.T) { addAnnotationRequest([][]string{{cpu}}), }, }, - { - name: "two replacement resources", - podJson: []byte( - `{ - "spec": { - "containers": [ - { - "resources": { - "requests": { - "cpu": "0" - } - } - } - ] - } - }`), - namespace: "default", - recommendResources: []ContainerResources{ - { - apiv1.ResourceList{ - cpu: resource.MustParse("1"), - unobtanium: resource.MustParse("2"), - }, - }, - }, - recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, - recommendName: "name", - expectPatches: []patchRecord{ - addResourceRequestPatch(0, cpu, "1"), - addResourceRequestPatch(0, unobtanium, "2"), - addAnnotationRequest([][]string{{cpu, unobtanium}}), - }, - }, { name: "two containers", podJson: []byte( @@ -272,9 +239,11 @@ func TestGetPatchesForResourceRequest(t *testing.T) { s := NewAdmissionServer(&frp, &fppp) patches, err := s.getPatchesForPodResourceRequest(tc.podJson, tc.namespace) if tc.expectError == nil { - assert.Nil(t, err) + assert.NoError(t, err) } else { - assert.Errorf(t, err, tc.expectError.Error()) + if assert.Error(t, err) { + assert.Equal(t, tc.expectError.Error(), err.Error()) + } } if assert.Equal(t, len(tc.expectPatches), len(patches)) { for i, gotPatch := range patches { @@ -286,3 +255,44 @@ func TestGetPatchesForResourceRequest(t *testing.T) { }) } } + +func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { + + fppp := fakePodPreProcessor{} + recommendResources := []ContainerResources{ + { + apiv1.ResourceList{ + cpu: resource.MustParse("1"), + unobtanium: resource.MustParse("2"), + }, + }, + } + podJson := []byte( + `{ + "spec": { + "containers": [ + { + "resources": { + "requests": { + "cpu": "0" + } + } + } + ] + } + }`) + recommendAnnotations := vpa_api_util.ContainerToAnnotationsMap{} + frp := fakeRecommendationProvider{recommendResources, recommendAnnotations, "name", nil} + s := NewAdmissionServer(&frp, &fppp) + patches, err := s.getPatchesForPodResourceRequest(podJson, "default") + assert.NoError(t, err) + // Order of updates for cpu and unobtanium depends on order of iterating a map, both possible results are valid. + if assert.Equal(t, len(patches), 3) { + cpuUpdate := addResourceRequestPatch(0, cpu, "1") + unobtaniumUpdate := addResourceRequestPatch(0, unobtanium, "2") + assert.True(t, eqPatch(patches[0], cpuUpdate) || eqPatch(patches[0], unobtaniumUpdate)) + assert.True(t, eqPatch(patches[1], cpuUpdate) || eqPatch(patches[1], unobtaniumUpdate)) + assert.False(t, eqPatch(patches[0], patches[1])) + assert.True(t, eqPatch(patches[2], addAnnotationRequest([][]string{{cpu, unobtanium}})) || eqPatch(patches[2], addAnnotationRequest([][]string{{unobtanium, cpu}}))) + } +} From 7b78116d1674a4124cac931bbe9083a1daa4ac24 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 11 Apr 2019 14:29:22 +0200 Subject: [PATCH 03/12] Add test for VPA validation in Admission Controller --- .../admission-controller/logic/server_test.go | 129 ++++++++++++++++++ 1 file changed, 129 insertions(+) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go index 04b1ddf4ed68..19499c38b4ba 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -25,6 +25,7 @@ import ( apiv1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" ) @@ -296,3 +297,131 @@ func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { assert.True(t, eqPatch(patches[2], addAnnotationRequest([][]string{{cpu, unobtanium}})) || eqPatch(patches[2], addAnnotationRequest([][]string{{unobtanium, cpu}}))) } } + +func TestValidateVPA(t *testing.T) { + badUpdateMode := vpa_types.UpdateMode("bad") + validUpdateMode := vpa_types.UpdateModeOff + badScalingMode := vpa_types.ContainerScalingMode("bad") + validScalingMode := vpa_types.ContainerScalingModeAuto + tests := []struct { + name string + vpa vpa_types.VerticalPodAutoscaler + isCreate bool + expectError error + }{ + { + name: "empty update", + vpa: vpa_types.VerticalPodAutoscaler{}, + }, + { + name: "empty create", + vpa: vpa_types.VerticalPodAutoscaler{}, + isCreate: true, + expectError: fmt.Errorf("TargetRef is required. If you're using v1beta1 version of the API, please migrate to v1beta2."), + }, + { + name: "no update mode", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + UpdatePolicy: &vpa_types.PodUpdatePolicy{}, + }, + }, + expectError: fmt.Errorf("UpdateMode is required if UpdatePolicy is used"), + }, + { + name: "bad update mode", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + UpdatePolicy: &vpa_types.PodUpdatePolicy{ + UpdateMode: &badUpdateMode, + }, + }, + }, + expectError: fmt.Errorf("unexpected UpdateMode value bad"), + }, + { + name: "no policy name", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + ResourcePolicy: &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{{}}, + }, + }, + }, + expectError: fmt.Errorf("ContainerPolicies.ContainerName is required"), + }, + { + name: "invalid scaling mode", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + ResourcePolicy: &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{ + { + ContainerName: "loot box", + Mode: &badScalingMode, + }, + }, + }, + }, + }, + expectError: fmt.Errorf("unexpected Mode value bad"), + }, + { + name: "bad limits", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + ResourcePolicy: &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{ + { + ContainerName: "loot box", + MinAllowed: apiv1.ResourceList{ + cpu: resource.MustParse("100"), + }, + MaxAllowed: apiv1.ResourceList{ + cpu: resource.MustParse("10"), + }, + }, + }, + }, + }, + }, + expectError: fmt.Errorf("max resource for cpu is lower than min"), + }, + { + name: "all valid", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + ResourcePolicy: &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{ + { + ContainerName: "loot box", + Mode: &validScalingMode, + MinAllowed: apiv1.ResourceList{ + cpu: resource.MustParse("10"), + }, + MaxAllowed: apiv1.ResourceList{ + cpu: resource.MustParse("100"), + }, + }, + }, + }, + UpdatePolicy: &vpa_types.PodUpdatePolicy{ + UpdateMode: &validUpdateMode, + }, + }, + }, + }, + } + for _, tc := range tests { + t.Run(fmt.Sprintf("test case: %s", tc.name), func(t *testing.T) { + err := validateVPA(&tc.vpa, tc.isCreate) + if tc.expectError == nil { + assert.NoError(t, err) + } else { + if assert.Error(t, err) { + assert.Equal(t, tc.expectError.Error(), err.Error()) + } + } + }) + } +} From e6b848cf3b572531b09657dc375a4c24abbd765d Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Mon, 15 Apr 2019 15:07:59 +0200 Subject: [PATCH 04/12] Break getPatchesForPodResourceRequest into smaller functions Prepare for setting limits --- .../pkg/admission-controller/logic/server.go | 86 +++++++++++-------- 1 file changed, 52 insertions(+), 34 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index 0031bae00afb..9c1b7148f95e 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -26,6 +26,7 @@ import ( "k8s.io/api/admission/v1beta1" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" metrics_admission "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/admission" @@ -74,42 +75,12 @@ func (s *AdmissionServer) getPatchesForPodResourceRequest(raw []byte, namespace patches := []patchRecord{} updatesAnnotation := []string{} for i, containerResources := range containersResources { - - // Add resources empty object if missing - if pod.Spec.Containers[i].Resources.Limits == nil && - pod.Spec.Containers[i].Resources.Requests == nil { - patches = append(patches, patchRecord{ - Op: "add", - Path: fmt.Sprintf("/spec/containers/%d/resources", i), - Value: v1.ResourceRequirements{}, - }) - } - - // Add request empty map if missing - if pod.Spec.Containers[i].Resources.Requests == nil { - patches = append(patches, patchRecord{ - Op: "add", - Path: fmt.Sprintf("/spec/containers/%d/resources/requests", i), - Value: v1.ResourceList{}}) - } - - annotations, found := annotationsPerContainer[pod.Spec.Containers[i].Name] - if !found { - annotations = make([]string, 0) - } - for resource, request := range containerResources.Requests { - // Set request - patches = append(patches, patchRecord{ - Op: "add", - Path: fmt.Sprintf("/spec/containers/%d/resources/requests/%s", i, resource), - Value: request.String()}) - annotations = append(annotations, fmt.Sprintf("%s request", resource)) - } - - updatesAnnotation = append(updatesAnnotation, fmt.Sprintf("container %d: ", i)+strings.Join(annotations, ", ")) + newPatches, newUpdatesAnnotation := s.getContainerPatch(pod, i, "requests", annotationsPerContainer, containerResources) + patches = append(patches, newPatches...) + updatesAnnotation = append(updatesAnnotation, newUpdatesAnnotation) } if len(updatesAnnotation) > 0 { - vpaAnnotationValue := fmt.Sprintf("Pod resources updated by %s: ", vpaName) + strings.Join(updatesAnnotation, "; ") + vpaAnnotationValue := fmt.Sprintf("Pod resources updated by %s: %s", vpaName, strings.Join(updatesAnnotation, "; ")) if pod.Annotations == nil { patches = append(patches, patchRecord{ Op: "add", @@ -125,6 +96,53 @@ func (s *AdmissionServer) getPatchesForPodResourceRequest(raw []byte, namespace return patches, nil } +func getPatchInitializingEmptyResources(i int) patchRecord { + return patchRecord{ + Op: "add", + Path: fmt.Sprintf("/spec/containers/%d/resources", i), + Value: v1.ResourceRequirements{}, + } +} + +func getPatchInitializingEmptyResourcesSubfield(i int, kind string) patchRecord { + return patchRecord{ + Op: "add", + Path: fmt.Sprintf("/spec/containers/%d/resources/%s", i, kind), + Value: v1.ResourceList{}, + } +} + +func getAddResourceRequirementValuePatch(i int, kind string, resource v1.ResourceName, quantity resource.Quantity) patchRecord { + return patchRecord{ + Op: "add", + Path: fmt.Sprintf("/spec/containers/%d/resources/%s/%s", i, kind, resource), + Value: quantity.String()} +} + +func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, patchKind string, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources ContainerResources) ([]patchRecord, string) { + var patches []patchRecord + // Add empty resources object if missing + if pod.Spec.Containers[i].Resources.Limits == nil && + pod.Spec.Containers[i].Resources.Requests == nil { + patches = append(patches, getPatchInitializingEmptyResources(i)) + } + // Add empty resources requests object if missing + if pod.Spec.Containers[i].Resources.Requests == nil { + patches = append(patches, getPatchInitializingEmptyResourcesSubfield(i, "requests")) + } + annotations, found := annotationsPerContainer[pod.Spec.Containers[i].Name] + if !found { + annotations = make([]string, 0) + } + for resource, request := range containerResources.Requests { + // Set request + patches = append(patches, getAddResourceRequirementValuePatch(i, "requests", resource, request)) + annotations = append(annotations, fmt.Sprintf("%s request", resource)) + } + updatesAnnotation := fmt.Sprintf("container %d: ", i) + strings.Join(annotations, ", ") + return patches, updatesAnnotation +} + func parseVPA(raw []byte) (*vpa_types.VerticalPodAutoscaler, error) { vpa := vpa_types.VerticalPodAutoscaler{} if err := json.Unmarshal(raw, &vpa); err != nil { From 3c40d617e4ce9e01d2f1dd93786b7d01e8f5b2fd Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Mon, 6 May 2019 17:11:12 +0200 Subject: [PATCH 05/12] Clean up VPA recommendation provider unit tests - Name test cases, - Drop duplicated test case, - Simpler error handling of invalid expected resource amounts, - Format code more in line with other tests. --- .../logic/recommendation_provider_test.go | 192 +++++++++--------- 1 file changed, 101 insertions(+), 91 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index 00d1b46e750f..a5c4df2eca36 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -40,15 +40,6 @@ func parseLabelSelector(selector string) labels.Selector { } func TestUpdateResourceRequests(t *testing.T) { - type testCase struct { - pod *apiv1.Pod - vpas []*vpa_types.VerticalPodAutoscaler - expectedAction bool - expectedMem string - expectedCPU string - annotations vpa_api_util.ContainerToAnnotationsMap - labelSelector string - } containerName := "container1" vpaName := "vpa1" labels := map[string]string{"app": "testingApp"} @@ -78,89 +69,110 @@ func TestUpdateResourceRequests(t *testing.T) { vpaWithNilRecommendation := vpaBuilder.Get() vpaWithNilRecommendation.Status.Recommendation = nil - testCases := []testCase{{ - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, - expectedAction: true, - expectedMem: "200Mi", - expectedCPU: "2", - labelSelector: "app = testingApp", - }, { + testCases := []struct { + name string + pod *apiv1.Pod + vpas []*vpa_types.VerticalPodAutoscaler + expectedAction bool + expectedMem resource.Quantity + expectedCPU resource.Quantity + annotations vpa_api_util.ContainerToAnnotationsMap + labelSelector string + }{{ + name: "uninitialized pod", pod: uninitialized, vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, expectedAction: true, - expectedMem: "200Mi", - expectedCPU: "2", + expectedMem: resource.MustParse("200Mi"), + expectedCPU: resource.MustParse("2"), labelSelector: "app = testingApp", - }, { - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{targetBelowMinVPA}, - expectedAction: true, - expectedMem: "300Mi", // MinMemory is expected to be used - expectedCPU: "4", // MinCpu is expected to be used - annotations: vpa_api_util.ContainerToAnnotationsMap{ - containerName: []string{"cpu capped to minAllowed", "memory capped to minAllowed"}, + }, + { + name: "target below min", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{targetBelowMinVPA}, + expectedAction: true, + expectedMem: resource.MustParse("300Mi"), // MinMemory is expected to be used + expectedCPU: resource.MustParse("4"), // MinCpu is expected to be used + annotations: vpa_api_util.ContainerToAnnotationsMap{ + containerName: []string{"cpu capped to minAllowed", "memory capped to minAllowed"}, + }, + labelSelector: "app = testingApp", }, - labelSelector: "app = testingApp", - }, { - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{targetAboveMaxVPA}, - expectedAction: true, - expectedMem: "1Gi", // MaxMemory is expected to be used - expectedCPU: "5", // MaxCpu is expected to be used - annotations: vpa_api_util.ContainerToAnnotationsMap{ - containerName: []string{"cpu capped to maxAllowed", "memory capped to maxAllowed"}, + { + name: "target above max", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{targetAboveMaxVPA}, + expectedAction: true, + expectedMem: resource.MustParse("1Gi"), // MaxMemory is expected to be used + expectedCPU: resource.MustParse("5"), // MaxCpu is expected to be used + annotations: vpa_api_util.ContainerToAnnotationsMap{ + containerName: []string{"cpu capped to maxAllowed", "memory capped to maxAllowed"}, + }, + labelSelector: "app = testingApp", }, - labelSelector: "app = testingApp", - }, { - pod: initialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, - expectedAction: true, - expectedMem: "200Mi", - expectedCPU: "2", - labelSelector: "app = testingApp", - }, { - pod: initialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithHighMemory}, - expectedAction: true, - expectedMem: "1000Mi", - expectedCPU: "2", - labelSelector: "app = testingApp", - }, { - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, - expectedAction: false, - labelSelector: "app = differentApp", - }, { - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{offVPA}, - expectedAction: false, - labelSelector: "app = testingApp", - }, { - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{offVPA, vpa}, - expectedAction: true, - expectedMem: "200Mi", - expectedCPU: "2", - labelSelector: "app = testingApp", - }, { - pod: initialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithEmptyRecommendation}, - expectedAction: true, - expectedMem: "0", - expectedCPU: "0", - labelSelector: "app = testingApp", - }, { - pod: initialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithNilRecommendation}, - expectedAction: true, - expectedMem: "0", - expectedCPU: "0", - labelSelector: "app = testingApp", - }} - for i, tc := range testCases { + { + name: "initialized pod", + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedMem: resource.MustParse("200Mi"), + expectedCPU: resource.MustParse("2"), + labelSelector: "app = testingApp", + }, + { + name: "high memory", + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithHighMemory}, + expectedAction: true, + expectedMem: resource.MustParse("1000Mi"), + expectedCPU: resource.MustParse("2"), + labelSelector: "app = testingApp", + }, + { + name: "not matching selecetor", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: false, + labelSelector: "app = differentApp", + }, + { + name: "off mode", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{offVPA}, + expectedAction: false, + labelSelector: "app = testingApp", + }, + { + name: "two vpas one in off mode", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{offVPA, vpa}, + expectedAction: true, + expectedMem: resource.MustParse("200Mi"), + expectedCPU: resource.MustParse("2"), + labelSelector: "app = testingApp", + }, + { + name: "empty recommendation", + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithEmptyRecommendation}, + expectedAction: true, + expectedMem: resource.MustParse("0"), + expectedCPU: resource.MustParse("0"), + labelSelector: "app = testingApp", + }, + { + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithNilRecommendation}, + expectedAction: true, + expectedMem: resource.MustParse("0"), + expectedCPU: resource.MustParse("0"), + labelSelector: "app = testingApp", + }, + } + for _, tc := range testCases { - t.Run(fmt.Sprintf("test case number: %d", i), func(t *testing.T) { + t.Run(fmt.Sprintf(tc.name), func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -186,14 +198,12 @@ func TestUpdateResourceRequests(t *testing.T) { assert.Equal(t, vpaName, name) assert.Nil(t, err) assert.Equal(t, len(resources), 1) - expectedCPU, err := resource.ParseQuantity(tc.expectedCPU) - assert.NoError(t, err) + cpuRequest := resources[0].Requests[apiv1.ResourceCPU] - assert.Equal(t, expectedCPU.Value(), cpuRequest.Value(), "cpu request doesn't match") - expectedMemory, err := resource.ParseQuantity(tc.expectedMem) - assert.NoError(t, err) + assert.Equal(t, tc.expectedCPU.Value(), cpuRequest.Value(), "cpu request doesn't match") + memoryRequest := resources[0].Requests[apiv1.ResourceMemory] - assert.Equal(t, expectedMemory.Value(), memoryRequest.Value(), "memory request doesn't match") + assert.Equal(t, tc.expectedMem.Value(), memoryRequest.Value(), "memory request doesn't match") assert.Len(t, annotations, len(tc.annotations)) if len(tc.annotations) > 0 { for annotationKey, annotationValues := range tc.annotations { From f317621759a41ae5f7be8172e8b5fdb1847bbb71 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Mon, 6 May 2019 12:56:04 +0200 Subject: [PATCH 06/12] If user set limit to the same value as request keep them in sync --- .../logic/recommendation_provider.go | 19 +++- .../logic/recommendation_provider_test.go | 93 +++++++++++++----- .../pkg/admission-controller/logic/server.go | 26 +++-- .../admission-controller/logic/server_test.go | 98 ++++++++++++++++--- .../pkg/utils/test/test_container.go | 88 +++++++++++++++++ .../pkg/utils/test/test_pod.go | 18 ++-- .../pkg/utils/test/test_utils.go | 17 ++-- 7 files changed, 297 insertions(+), 62 deletions(-) create mode 100644 vertical-pod-autoscaler/pkg/utils/test/test_container.go diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go index 68447ac206c8..5dc412d8f89a 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go @@ -17,7 +17,7 @@ limitations under the License. package logic import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" @@ -29,11 +29,15 @@ import ( // ContainerResources holds resources request for container type ContainerResources struct { + Limits v1.ResourceList Requests v1.ResourceList } func newContainerResources() ContainerResources { - return ContainerResources{Requests: v1.ResourceList{}} + return ContainerResources{ + Requests: v1.ResourceList{}, + Limits: v1.ResourceList{}, + } } // RecommendationProvider gets current recommendation, annotations and vpaName for the given pod. @@ -68,6 +72,17 @@ func getContainersResources(pod *v1.Pod, podRecommendation vpa_types.Recommended continue } resources[i].Requests = recommendation.Target + + // If user set containers limit and request for a resource to the same value then keep the limit in sync. + if container.Resources.Limits.Cpu() != nil && container.Resources.Limits.Cpu().Value() != 0 && + container.Resources.Requests.Cpu() != nil && *container.Resources.Limits.Cpu() == *container.Resources.Requests.Cpu() { + resources[i].Limits[v1.ResourceCPU] = *resources[i].Requests.Cpu() + } + + if container.Resources.Limits.Memory() != nil && container.Resources.Limits.Memory().Value() != 0 && + container.Resources.Requests.Memory() != nil && *container.Resources.Limits.Memory() == *container.Resources.Requests.Memory() { + resources[i].Limits[v1.ResourceMemory] = *resources[i].Requests.Memory() + } } return resources } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index a5c4df2eca36..945173c5b36a 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -39,6 +39,11 @@ func parseLabelSelector(selector string) labels.Selector { return parsedSelector } +func mustParseResourcePointer(val string) *resource.Quantity { + q := resource.MustParse(val) + return &q +} + func TestUpdateResourceRequests(t *testing.T) { containerName := "container1" vpaName := "vpa1" @@ -51,11 +56,20 @@ func TestUpdateResourceRequests(t *testing.T) { WithMaxAllowed("3", "1Gi") vpa := vpaBuilder.Get() - uninitialized := test.Pod().WithName("test_uninitialized").AddContainer(test.BuildTestContainer(containerName, "", "")).Get() - uninitialized.ObjectMeta.Labels = labels + uninitialized := test.Pod().WithName("test_uninitialized"). + AddContainer(test.Container().WithName(containerName).Get()). + WithLabels(labels).Get() + + initializedContainer := test.Container().WithName(containerName). + WithCPURequest(resource.MustParse("1")).WithMemRequest(resource.MustParse("100Mi")).Get() + initialized := test.Pod().WithName("test_initialized"). + AddContainer(initializedContainer).WithLabels(labels).Get() - initialized := test.Pod().WithName("test_initialized").AddContainer(test.BuildTestContainer(containerName, "1", "100Mi")).Get() - initialized.ObjectMeta.Labels = labels + guaranteedCPUContainer := test.Container().WithName(containerName). + WithCPURequest(resource.MustParse("2")).WithCPULimit(resource.MustParse("2")). + WithMemLimit(resource.MustParse("200Mi")).WithMemRequest(resource.MustParse("200Mi")).Get() + guaranteedCPUPod := test.Pod().WithName("test_initialized"). + AddContainer(guaranteedCPUContainer).WithLabels(labels).Get() offVPA := vpaBuilder.WithUpdateMode(vpa_types.UpdateModeOff).Get() @@ -70,23 +84,26 @@ func TestUpdateResourceRequests(t *testing.T) { vpaWithNilRecommendation.Status.Recommendation = nil testCases := []struct { - name string - pod *apiv1.Pod - vpas []*vpa_types.VerticalPodAutoscaler - expectedAction bool - expectedMem resource.Quantity - expectedCPU resource.Quantity - annotations vpa_api_util.ContainerToAnnotationsMap - labelSelector string - }{{ - name: "uninitialized pod", - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, - expectedAction: true, - expectedMem: resource.MustParse("200Mi"), - expectedCPU: resource.MustParse("2"), - labelSelector: "app = testingApp", - }, + name string + pod *apiv1.Pod + vpas []*vpa_types.VerticalPodAutoscaler + expectedAction bool + expectedMem resource.Quantity + expectedCPU resource.Quantity + expectedCPULimit *resource.Quantity + expectedMemLimit *resource.Quantity + annotations vpa_api_util.ContainerToAnnotationsMap + labelSelector string + }{ + { + name: "uninitialized pod", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedMem: resource.MustParse("200Mi"), + expectedCPU: resource.MustParse("2"), + labelSelector: "app = testingApp", + }, { name: "target below min", pod: uninitialized, @@ -169,6 +186,17 @@ func TestUpdateResourceRequests(t *testing.T) { expectedCPU: resource.MustParse("0"), labelSelector: "app = testingApp", }, + { + name: "guaranteed resources", + pod: guaranteedCPUPod, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedMem: resource.MustParse("200Mi"), + expectedCPU: resource.MustParse("2"), + expectedCPULimit: mustParseResourcePointer("2"), + expectedMemLimit: mustParseResourcePointer("200Mi"), + labelSelector: "app = testingApp", + }, } for _, tc := range testCases { @@ -197,13 +225,34 @@ func TestUpdateResourceRequests(t *testing.T) { if tc.expectedAction { assert.Equal(t, vpaName, name) assert.Nil(t, err) - assert.Equal(t, len(resources), 1) + if !assert.Equal(t, len(resources), 1) { + return + } cpuRequest := resources[0].Requests[apiv1.ResourceCPU] assert.Equal(t, tc.expectedCPU.Value(), cpuRequest.Value(), "cpu request doesn't match") memoryRequest := resources[0].Requests[apiv1.ResourceMemory] assert.Equal(t, tc.expectedMem.Value(), memoryRequest.Value(), "memory request doesn't match") + + cpuLimit, cpuLimitPresent := resources[0].Limits[apiv1.ResourceCPU] + if tc.expectedCPULimit == nil { + assert.False(t, cpuLimitPresent, "expected no cpu limit, got %s", cpuLimit.String()) + } else { + if assert.True(t, cpuLimitPresent, "expected cpu limit, but it's missing") { + assert.Equal(t, *tc.expectedCPULimit, cpuLimit, "cpu limit doesn't match") + } + } + + memLimit, memLimitPresent := resources[0].Limits[apiv1.ResourceMemory] + if tc.expectedMemLimit == nil { + assert.False(t, memLimitPresent, "expected no memory limit, got %s", memLimit.String()) + } else { + if assert.True(t, memLimitPresent, "expected cpu limit, but it's missing") { + assert.Equal(t, *tc.expectedMemLimit, memLimit, "memory limit doesn't match") + } + } + assert.Len(t, annotations, len(tc.annotations)) if len(tc.annotations) > 0 { for annotationKey, annotationValues := range tc.annotations { diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index 9c1b7148f95e..885dae569147 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -126,23 +126,31 @@ func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, patchKind string, pod.Spec.Containers[i].Resources.Requests == nil { patches = append(patches, getPatchInitializingEmptyResources(i)) } - // Add empty resources requests object if missing - if pod.Spec.Containers[i].Resources.Requests == nil { - patches = append(patches, getPatchInitializingEmptyResourcesSubfield(i, "requests")) - } + annotations, found := annotationsPerContainer[pod.Spec.Containers[i].Name] if !found { annotations = make([]string, 0) } - for resource, request := range containerResources.Requests { - // Set request - patches = append(patches, getAddResourceRequirementValuePatch(i, "requests", resource, request)) - annotations = append(annotations, fmt.Sprintf("%s request", resource)) - } + + patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Requests, i, containerResources.Requests, "requests", "request") + patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Limits, i, containerResources.Limits, "limits", "limit") + updatesAnnotation := fmt.Sprintf("container %d: ", i) + strings.Join(annotations, ", ") return patches, updatesAnnotation } +func appendPatchesAndAnnotations(patches []patchRecord, annotations []string, current v1.ResourceList, containerIndex int, resources v1.ResourceList, fieldName, resourceName string) ([]patchRecord, []string) { + // Add empty object if it's missing and we're about to fill it. + if current == nil && len(resources) > 0 { + patches = append(patches, getPatchInitializingEmptyResourcesSubfield(containerIndex, fieldName)) + } + for resource, request := range resources { + patches = append(patches, getAddResourceRequirementValuePatch(containerIndex, fieldName, resource, request)) + annotations = append(annotations, fmt.Sprintf("%s %s", resource, resourceName)) + } + return patches, annotations +} + func parseVPA(raw []byte) (*vpa_types.VerticalPodAutoscaler, error) { vpa := vpa_types.VerticalPodAutoscaler{} if err := json.Unmarshal(raw, &vpa); err != nil { diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go index 19499c38b4ba..35ed1e7ff9ce 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -32,6 +32,8 @@ import ( const ( cpu = "cpu" unobtanium = "unobtanium" + limit = "limit" + request = "request" ) type fakePodPreProcessor struct { @@ -69,6 +71,14 @@ func addRequestsPatch(idx int) patchRecord { } } +func addLimitsPatch(idx int) patchRecord { + return patchRecord{ + "add", + fmt.Sprintf("/spec/containers/%d/resources/limits", idx), + apiv1.ResourceList{}, + } +} + func addResourceRequestPatch(index int, res, amount string) patchRecord { return patchRecord{ "add", @@ -77,12 +87,20 @@ func addResourceRequestPatch(index int, res, amount string) patchRecord { } } -func addAnnotationRequest(updateResources [][]string) patchRecord { +func addResourceLimitPatch(index int, res, amount string) patchRecord { + return patchRecord{ + "add", + fmt.Sprintf("/spec/containers/%d/resources/limits/%s", index, res), + resource.MustParse(amount), + } +} + +func addAnnotationRequest(updateResources [][]string, kind string) patchRecord { requests := make([]string, 0) for idx, podResources := range updateResources { podRequests := make([]string, 0) for _, resource := range podResources { - podRequests = append(podRequests, resource+" request") + podRequests = append(podRequests, resource+" "+kind) } requests = append(requests, fmt.Sprintf("container %d: %s", idx, strings.Join(podRequests, ", "))) } @@ -147,7 +165,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { namespace: "default", recommendResources: []ContainerResources{ { - apiv1.ResourceList{ + Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), }, }, @@ -158,7 +176,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { addResourcesPatch(0), addRequestsPatch(0), addResourceRequestPatch(0, cpu, "1"), - addAnnotationRequest([][]string{{cpu}}), + addAnnotationRequest([][]string{{cpu}}, request), }, }, { @@ -180,7 +198,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { namespace: "default", recommendResources: []ContainerResources{ { - apiv1.ResourceList{ + Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), }, }, @@ -189,7 +207,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { recommendName: "name", expectPatches: []patchRecord{ addResourceRequestPatch(0, cpu, "1"), - addAnnotationRequest([][]string{{cpu}}), + addAnnotationRequest([][]string{{cpu}}, request), }, }, { @@ -212,12 +230,12 @@ func TestGetPatchesForResourceRequest(t *testing.T) { namespace: "default", recommendResources: []ContainerResources{ { - apiv1.ResourceList{ + Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), }, }, { - apiv1.ResourceList{ + Requests: apiv1.ResourceList{ cpu: resource.MustParse("2"), }, }, @@ -229,7 +247,63 @@ func TestGetPatchesForResourceRequest(t *testing.T) { addResourcesPatch(1), addRequestsPatch(1), addResourceRequestPatch(1, cpu, "2"), - addAnnotationRequest([][]string{{cpu}, {cpu}}), + addAnnotationRequest([][]string{{cpu}, {cpu}}, request), + }, + }, + { + name: "new cpu limit", + podJson: []byte( + `{ + "spec": { + "containers": [{}] + } + }`), + namespace: "default", + recommendResources: []ContainerResources{ + { + Limits: apiv1.ResourceList{ + cpu: resource.MustParse("1"), + }, + }, + }, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectPatches: []patchRecord{ + addResourcesPatch(0), + addLimitsPatch(0), + addResourceLimitPatch(0, cpu, "1"), + addAnnotationRequest([][]string{{cpu}}, limit), + }, + }, + { + name: "replacement cpu limit", + podJson: []byte( + `{ + "spec": { + "containers": [ + { + "resources": { + "limits": { + "cpu": "0" + } + } + } + ] + } + }`), + namespace: "default", + recommendResources: []ContainerResources{ + { + Limits: apiv1.ResourceList{ + cpu: resource.MustParse("1"), + }, + }, + }, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectPatches: []patchRecord{ + addResourceLimitPatch(0, cpu, "1"), + addAnnotationRequest([][]string{{cpu}}, limit), }, }, } @@ -246,7 +320,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) { assert.Equal(t, tc.expectError.Error(), err.Error()) } } - if assert.Equal(t, len(tc.expectPatches), len(patches)) { + if assert.Equal(t, len(tc.expectPatches), len(patches), fmt.Sprintf("got %+v, want %+v", patches, tc.expectPatches)) { for i, gotPatch := range patches { if !eqPatch(gotPatch, tc.expectPatches[i]) { t.Errorf("Expected patch at position %d to be %+v, got %+v", i, tc.expectPatches[i], gotPatch) @@ -262,7 +336,7 @@ func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { fppp := fakePodPreProcessor{} recommendResources := []ContainerResources{ { - apiv1.ResourceList{ + Requests: apiv1.ResourceList{ cpu: resource.MustParse("1"), unobtanium: resource.MustParse("2"), }, @@ -294,7 +368,7 @@ func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { assert.True(t, eqPatch(patches[0], cpuUpdate) || eqPatch(patches[0], unobtaniumUpdate)) assert.True(t, eqPatch(patches[1], cpuUpdate) || eqPatch(patches[1], unobtaniumUpdate)) assert.False(t, eqPatch(patches[0], patches[1])) - assert.True(t, eqPatch(patches[2], addAnnotationRequest([][]string{{cpu, unobtanium}})) || eqPatch(patches[2], addAnnotationRequest([][]string{{unobtanium, cpu}}))) + assert.True(t, eqPatch(patches[2], addAnnotationRequest([][]string{{cpu, unobtanium}}, request)) || eqPatch(patches[2], addAnnotationRequest([][]string{{unobtanium, cpu}}, request))) } } diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_container.go b/vertical-pod-autoscaler/pkg/utils/test/test_container.go new file mode 100644 index 000000000000..a2336fdcb818 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/test/test_container.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +type containerBuilder struct { + name string + cpuRequest *resource.Quantity + memRequest *resource.Quantity + cpuLimit *resource.Quantity + memLimit *resource.Quantity +} + +// Container returns object that helps build containers for tests. +func Container() *containerBuilder { + return &containerBuilder{} +} + +func (cb *containerBuilder) WithName(name string) *containerBuilder { + r := *cb + r.name = name + return &r +} + +func (cb *containerBuilder) WithCPURequest(cpuRequest resource.Quantity) *containerBuilder { + r := *cb + r.cpuRequest = &cpuRequest + return &r +} + +func (cb *containerBuilder) WithMemRequest(memRequest resource.Quantity) *containerBuilder { + r := *cb + r.memRequest = &memRequest + return &r +} + +func (cb *containerBuilder) WithCPULimit(cpuLimit resource.Quantity) *containerBuilder { + r := *cb + r.cpuLimit = &cpuLimit + return &r +} + +func (cb *containerBuilder) WithMemLimit(memLimit resource.Quantity) *containerBuilder { + r := *cb + r.memLimit = &memLimit + return &r +} + +func (cb *containerBuilder) Get() apiv1.Container { + container := apiv1.Container{ + Name: cb.name, + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{}, + Limits: apiv1.ResourceList{}, + }, + } + if cb.cpuRequest != nil { + container.Resources.Requests[apiv1.ResourceCPU] = *cb.cpuRequest + } + if cb.memRequest != nil { + container.Resources.Requests[apiv1.ResourceMemory] = *cb.memRequest + } + if cb.cpuLimit != nil { + container.Resources.Limits[apiv1.ResourceCPU] = *cb.cpuLimit + } + if cb.memLimit != nil { + container.Resources.Limits[apiv1.ResourceMemory] = *cb.memLimit + } + return container +} diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_pod.go b/vertical-pod-autoscaler/pkg/utils/test/test_pod.go index 5468835766ba..a2ffe4254f75 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_pod.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_pod.go @@ -26,6 +26,7 @@ type PodBuilder interface { WithName(name string) PodBuilder AddContainer(container apiv1.Container) PodBuilder WithCreator(creatorObjectMeta *metav1.ObjectMeta, creatorTypeMeta *metav1.TypeMeta) PodBuilder + WithLabels(labels map[string]string) PodBuilder WithPhase(phase apiv1.PodPhase) PodBuilder Get() *apiv1.Pod } @@ -37,20 +38,21 @@ func Pod() PodBuilder { } } -type container struct { - name string - cpu string - mem string -} - type podBuilderImpl struct { name string containers []apiv1.Container creatorObjectMeta *metav1.ObjectMeta creatorTypeMeta *metav1.TypeMeta + labels map[string]string phase apiv1.PodPhase } +func (pb *podBuilderImpl) WithLabels(labels map[string]string) PodBuilder { + r := *pb + r.labels = labels + return &r +} + func (pb *podBuilderImpl) WithName(name string) PodBuilder { r := *pb r.name = name @@ -91,6 +93,10 @@ func (pb *podBuilderImpl) Get() *apiv1.Pod { }, } + if pb.labels != nil { + pod.ObjectMeta.Labels = pb.labels + } + if pb.creatorObjectMeta != nil && pb.creatorTypeMeta != nil { isController := true pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_utils.go b/vertical-pod-autoscaler/pkg/utils/test/test_utils.go index e415c9ee1a8a..b587d131b3fa 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_utils.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_utils.go @@ -30,7 +30,7 @@ import ( vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" vpa_lister_v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1" vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta2" - v1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" ) @@ -41,23 +41,18 @@ var ( // BuildTestContainer creates container with specified resources func BuildTestContainer(containerName, cpu, mem string) apiv1.Container { - container := apiv1.Container{ - Name: containerName, - Resources: apiv1.ResourceRequirements{ - Requests: apiv1.ResourceList{}, - }, - } + // TODO: Use builder directly, remove this function. + builder := Container().WithName(containerName) if len(cpu) > 0 { cpuVal, _ := resource.ParseQuantity(cpu) - container.Resources.Requests[apiv1.ResourceCPU] = cpuVal + builder = builder.WithCPURequest(cpuVal) } if len(mem) > 0 { memVal, _ := resource.ParseQuantity(mem) - container.Resources.Requests[apiv1.ResourceMemory] = memVal + builder = builder.WithMemRequest(memVal) } - - return container + return builder.Get() } // BuildTestPolicy creates ResourcesPolicy with specified constraints From de616f531be84de37513964f3ca3721ae3c9a85a Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Tue, 7 May 2019 16:01:30 +0200 Subject: [PATCH 07/12] When limit is set but request is not treat them as matching --- .../logic/recommendation_provider.go | 21 ++++++++++++---- .../logic/recommendation_provider_test.go | 24 +++++++++++++++---- 2 files changed, 36 insertions(+), 9 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go index 5dc412d8f89a..3742fb0814ef 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go @@ -18,6 +18,7 @@ package logic import ( "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" @@ -60,6 +61,19 @@ func NewRecommendationProvider(vpaLister vpa_lister.VerticalPodAutoscalerLister, } } +func limitMatchesRequest(limit, request *resource.Quantity) bool { + // Limit not set, it cannot match the request. + if limit == nil || limit.Value() == 0 { + return false + } + // Limit set but request not set - K8s will treat the pod as if they were equal. + if request == nil || request.Value() == 0 { + return true + } + // Limit and request are set. Check if they are equal. + return request.Value() == limit.Value() +} + // getContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec. func getContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources) []ContainerResources { resources := make([]ContainerResources, len(pod.Spec.Containers)) @@ -73,14 +87,11 @@ func getContainersResources(pod *v1.Pod, podRecommendation vpa_types.Recommended } resources[i].Requests = recommendation.Target - // If user set containers limit and request for a resource to the same value then keep the limit in sync. - if container.Resources.Limits.Cpu() != nil && container.Resources.Limits.Cpu().Value() != 0 && - container.Resources.Requests.Cpu() != nil && *container.Resources.Limits.Cpu() == *container.Resources.Requests.Cpu() { + if limitMatchesRequest(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu()) { resources[i].Limits[v1.ResourceCPU] = *resources[i].Requests.Cpu() } - if container.Resources.Limits.Memory() != nil && container.Resources.Limits.Memory().Value() != 0 && - container.Resources.Requests.Memory() != nil && *container.Resources.Limits.Memory() == *container.Resources.Requests.Memory() { + if limitMatchesRequest(container.Resources.Limits.Memory(), container.Resources.Requests.Memory()) { resources[i].Limits[v1.ResourceMemory] = *resources[i].Requests.Memory() } } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index 945173c5b36a..5a71bab3a764 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -65,11 +65,16 @@ func TestUpdateResourceRequests(t *testing.T) { initialized := test.Pod().WithName("test_initialized"). AddContainer(initializedContainer).WithLabels(labels).Get() - guaranteedCPUContainer := test.Container().WithName(containerName). + limitsMatchRequestsContainer := test.Container().WithName(containerName). WithCPURequest(resource.MustParse("2")).WithCPULimit(resource.MustParse("2")). WithMemLimit(resource.MustParse("200Mi")).WithMemRequest(resource.MustParse("200Mi")).Get() - guaranteedCPUPod := test.Pod().WithName("test_initialized"). - AddContainer(guaranteedCPUContainer).WithLabels(labels).Get() + limitsMatchRequestsPod := test.Pod().WithName("test_initialized"). + AddContainer(limitsMatchRequestsContainer).WithLabels(labels).Get() + + limitsNoRequestsContainer := test.Container().WithName(containerName). + WithCPULimit(resource.MustParse("2")).WithMemLimit(resource.MustParse("200Mi")).Get() + limitsNoRequestsPod := test.Pod().WithName("test_initialized"). + AddContainer(limitsNoRequestsContainer).WithLabels(labels).Get() offVPA := vpaBuilder.WithUpdateMode(vpa_types.UpdateModeOff).Get() @@ -188,7 +193,18 @@ func TestUpdateResourceRequests(t *testing.T) { }, { name: "guaranteed resources", - pod: guaranteedCPUPod, + pod: limitsMatchRequestsPod, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedMem: resource.MustParse("200Mi"), + expectedCPU: resource.MustParse("2"), + expectedCPULimit: mustParseResourcePointer("2"), + expectedMemLimit: mustParseResourcePointer("200Mi"), + labelSelector: "app = testingApp", + }, + { + name: "guaranteed resources - no request", + pod: limitsNoRequestsPod, vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, expectedAction: true, expectedMem: resource.MustParse("200Mi"), From 62fd7873695f1070bf398ba995f89064560b62ff Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Wed, 8 May 2019 15:41:09 +0200 Subject: [PATCH 08/12] Add e2e test for keeping limit equal to request --- .../e2e/v1beta2/admission_controller.go | 34 +++++++++++++++++++ vertical-pod-autoscaler/e2e/v1beta2/common.go | 13 +++++++ 2 files changed, 47 insertions(+) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index 9d964efb36ea..f3bce9c7b393 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -195,6 +195,40 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) + ginkgo.It("keeps limits equal to request", func() { + d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) + + ginkgo.By("Setting up a VPA CRD") + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) + vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ + ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ + ContainerName: "hamster", + Target: apiv1.ResourceList{ + apiv1.ResourceCPU: ParseQuantityOrDie("250m"), + apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), + }, + }}, + } + vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{{ + ContainerName: "hamster", + }}, + } + InstallVPA(f, vpaCRD) + + ginkgo.By("Setting up a hamster deployment") + podList := startDeploymentPods(f, d) + + // Originally Pods had 100m CPU, 100Mi of memory, but admission controller + // should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal + for _, pod := range podList.Items { + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + } + }) + }) func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) *apiv1.PodList { diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index 7af01f820e75..93d33906e311 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -130,6 +130,19 @@ func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memo return d } +// NewHamsterDeploymentWithGuaranteedResources creates a simple hamster deployment with specific +// resource requests for e2e test purposes. Since the container in the pod specifies resource limits +// but not resource requests K8s will set requests equal to limits and the pod will have guaranteed +// QoS class. +func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeployment(f) + d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantity, + apiv1.ResourceMemory: memoryQuantity, + } + return d +} + // GetHamsterPods returns running hamster pods (matched by hamsterLabels) func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) From 77ea843812db1cdd560b44a4aed9ad96a23173ea Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 9 May 2019 16:48:36 +0200 Subject: [PATCH 09/12] Stop capping recommendations to limit Users should use ContainerResourcePolicy if they want to control VPA recommendation range. --- .../pkg/utils/vpa/capping.go | 21 ------------------- .../pkg/utils/vpa/capping_test.go | 12 +++++------ 2 files changed, 6 insertions(+), 27 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index 38cd39352eb6..0703e90123d3 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -37,7 +37,6 @@ type cappingAction string var ( cappedToMinAllowed cappingAction = "capped to minAllowed" cappedToMaxAllowed cappingAction = "capped to maxAllowed" - cappedToLimit cappingAction = "capped to container limit" ) func toCappingAnnotation(resourceName apiv1.ResourceName, action cappingAction) string { @@ -105,11 +104,6 @@ func getCappedRecommendationForContainer( if genAnnotations { cappingAnnotations = append(cappingAnnotations, annotations...) } - // TODO: If limits and policy are conflicting, set some condition on the VPA. - annotations = capRecommendationToContainerLimit(recommendation, container) - if genAnnotations { - cappingAnnotations = append(cappingAnnotations, annotations...) - } } process(cappedRecommendations.Target, true) @@ -119,21 +113,6 @@ func getCappedRecommendationForContainer( return cappedRecommendations, cappingAnnotations, nil } -// capRecommendationToContainerLimit makes sure recommendation is not above current limit for the container. -// If this function makes adjustments appropriate annotations are returned. -func capRecommendationToContainerLimit(recommendation apiv1.ResourceList, container apiv1.Container) []string { - annotations := make([]string, 0) - // Iterate over limits set in the container. Unset means Infinite limit. - for resourceName, limit := range container.Resources.Limits { - recommendedValue, found := recommendation[resourceName] - if found && recommendedValue.MilliValue() > limit.MilliValue() { - recommendation[resourceName] = limit - annotations = append(annotations, toCappingAnnotation(resourceName, cappedToLimit)) - } - } - return annotations -} - // applyVPAPolicy updates recommendation if recommended resources are outside of limits defined in VPA resources policy func applyVPAPolicy(recommendation apiv1.ResourceList, policy *vpa_types.ContainerResourcePolicy) []string { if policy == nil { diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index 847332a0b992..e0ad3d342a83 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -47,7 +47,7 @@ func TestRecommendationNotAvailable(t *testing.T) { assert.Empty(t, res.ContainerRecommendations) } -func TestRecommendationCappedToLimit(t *testing.T) { +func TestRecommendationNotCappedToLimit(t *testing.T) { pod := test.Pod().WithName("pod1").AddContainer(test.BuildTestContainer("ctr-name", "", "")).Get() pod.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ @@ -75,16 +75,16 @@ func TestRecommendationCappedToLimit(t *testing.T) { res, annotations, err := NewCappingRecommendationProcessor().Apply(&podRecommendation, &policy, nil, pod) assert.Nil(t, err) assert.Equal(t, apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(3, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(7000, 1), + apiv1.ResourceCPU: *resource.NewScaledQuantity(10, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(8000, 1), }, res.ContainerRecommendations[0].Target) - assert.Contains(t, annotations, "ctr-name") - assert.Contains(t, annotations["ctr-name"], "memory capped to container limit") + assert.NotContains(t, annotations, "ctr-name") + assert.NotContains(t, annotations["ctr-name"], "memory capped to container limit") assert.Equal(t, apiv1.ResourceList{ apiv1.ResourceCPU: *resource.NewScaledQuantity(2, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(7000, 1), + apiv1.ResourceMemory: *resource.NewScaledQuantity(9000, 1), }, res.ContainerRecommendations[0].UpperBound) } From b73499ac92c0213b2365504a6f87fce4f83b2332 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Mon, 13 May 2019 17:32:54 +0200 Subject: [PATCH 10/12] Preserve proportion between limit and request Except when proportion is huge and recommendation is huge which would result in milli limit exceeding int64 maximum. Then cap the limit milli value to int63 max. --- .../logic/recommendation_provider.go | 69 ++++++++++++++----- .../logic/recommendation_provider_test.go | 50 ++++++++++++-- 2 files changed, 97 insertions(+), 22 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go index 3742fb0814ef..ec1d490a96d8 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go @@ -17,9 +17,12 @@ limitations under the License. package logic import ( + "fmt" "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" + "math" + "math/big" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta2" @@ -61,21 +64,38 @@ func NewRecommendationProvider(vpaLister vpa_lister.VerticalPodAutoscalerLister, } } -func limitMatchesRequest(limit, request *resource.Quantity) bool { - // Limit not set, it cannot match the request. - if limit == nil || limit.Value() == 0 { - return false +func getProportionalLimit(originalLimit, originalRequest, recommendedRequest *resource.Quantity) (limit *resource.Quantity, capped bool) { + // originalLimit not set, don't set limit. + if originalLimit == nil || originalLimit.Value() == 0 { + return nil, false } - // Limit set but request not set - K8s will treat the pod as if they were equal. - if request == nil || request.Value() == 0 { - return true + // originalLimit set but originalRequest not set - K8s will treat the pod as if they were equal, + // recommend limit equal to request + if originalRequest == nil || originalRequest.Value() == 0 { + result := *recommendedRequest + return &result, false } - // Limit and request are set. Check if they are equal. - return request.Value() == limit.Value() + // originalLimit and originalRequest are set. If they are equal recommend limit equal to request. + if originalRequest.MilliValue() == originalLimit.MilliValue() { + result := *recommendedRequest + return &result, false + } + + // Input and output milli values should fit in int64 but intermediate values might be bigger. + originalMilliRequest := big.NewInt(originalRequest.MilliValue()) + originalMilliLimit := big.NewInt(originalLimit.MilliValue()) + recommendedMilliRequest := big.NewInt(recommendedRequest.MilliValue()) + var recommendedMilliLimit big.Int + recommendedMilliLimit.Mul(recommendedMilliRequest, originalMilliLimit) + recommendedMilliLimit.Div(&recommendedMilliLimit, originalMilliRequest) + if recommendedMilliLimit.IsInt64() { + return resource.NewMilliQuantity(recommendedMilliLimit.Int64(), recommendedRequest.Format), false + } + return resource.NewMilliQuantity(math.MaxInt64, recommendedRequest.Format), true } -// getContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec. -func getContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources) []ContainerResources { +// GetContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec. +func GetContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources, annotations vpa_api_util.ContainerToAnnotationsMap) []ContainerResources { resources := make([]ContainerResources, len(pod.Spec.Containers)) for i, container := range pod.Spec.Containers { resources[i] = newContainerResources() @@ -87,12 +107,27 @@ func getContainersResources(pod *v1.Pod, podRecommendation vpa_types.Recommended } resources[i].Requests = recommendation.Target - if limitMatchesRequest(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu()) { - resources[i].Limits[v1.ResourceCPU] = *resources[i].Requests.Cpu() + cpuLimit, capped := getProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), resources[i].Requests.Cpu()) + if cpuLimit != nil { + resources[i].Limits[v1.ResourceCPU] = *cpuLimit } - - if limitMatchesRequest(container.Resources.Limits.Memory(), container.Resources.Requests.Memory()) { - resources[i].Limits[v1.ResourceMemory] = *resources[i].Requests.Memory() + if capped { + annotations[container.Name] = append( + annotations[container.Name], + fmt.Sprintf( + "Failed to keep CPU limit to request proportion of %d to %d with recommended request of %d milliCPU; doesn't fit in int64. Capping limit to MaxInt64", + container.Resources.Limits.Cpu().MilliValue(), container.Resources.Requests.Cpu().MilliValue(), resources[i].Requests.Cpu().MilliValue())) + } + memLimit, capped := getProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), resources[i].Requests.Memory()) + if memLimit != nil { + resources[i].Limits[v1.ResourceMemory] = *memLimit + } + if capped { + annotations[container.Name] = append( + annotations[container.Name], + fmt.Sprintf( + "Failed to keep memory limit to request proportion of %d to %d with recommended request of %d milliBytes; doesn't fit in int64. Capping limit to MaxInt64", + container.Resources.Limits.Memory().MilliValue(), container.Resources.Requests.Memory().MilliValue(), resources[i].Requests.Memory().MilliValue())) } } return resources @@ -148,6 +183,6 @@ func (p *recommendationProvider) GetContainersResourcesForPod(pod *v1.Pod) ([]Co return nil, annotations, vpaConfig.Name, err } } - containerResources := getContainersResources(pod, *recommendedPodResources) + containerResources := GetContainersResources(pod, *recommendedPodResources, annotations) return containerResources, annotations, vpaConfig.Name, nil } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index 5a71bab3a764..fa0555d867a8 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -18,6 +18,7 @@ package logic import ( "fmt" + "math" "testing" "github.com/golang/mock/gomock" @@ -67,10 +68,22 @@ func TestUpdateResourceRequests(t *testing.T) { limitsMatchRequestsContainer := test.Container().WithName(containerName). WithCPURequest(resource.MustParse("2")).WithCPULimit(resource.MustParse("2")). - WithMemLimit(resource.MustParse("200Mi")).WithMemRequest(resource.MustParse("200Mi")).Get() + WithMemRequest(resource.MustParse("200Mi")).WithMemLimit(resource.MustParse("200Mi")).Get() limitsMatchRequestsPod := test.Pod().WithName("test_initialized"). AddContainer(limitsMatchRequestsContainer).WithLabels(labels).Get() + containerWithDoubleLimit := test.Container().WithName(containerName). + WithCPURequest(resource.MustParse("1")).WithCPULimit(resource.MustParse("2")). + WithMemRequest(resource.MustParse("100Mi")).WithMemLimit(resource.MustParse("200Mi")).Get() + podWithDoubleLimit := test.Pod().WithName("test_initialized"). + AddContainer(containerWithDoubleLimit).WithLabels(labels).Get() + + containerWithTenfoldLimit := test.Container().WithName(containerName). + WithCPURequest(resource.MustParse("1")).WithCPULimit(resource.MustParse("10")). + WithMemRequest(resource.MustParse("100Mi")).WithMemLimit(resource.MustParse("1000Mi")).Get() + podWithTenfoldLimit := test.Pod().WithName("test_initialized"). + AddContainer(containerWithTenfoldLimit).WithLabels(labels).Get() + limitsNoRequestsContainer := test.Container().WithName(containerName). WithCPULimit(resource.MustParse("2")).WithMemLimit(resource.MustParse("200Mi")).Get() limitsNoRequestsPod := test.Pod().WithName("test_initialized"). @@ -80,8 +93,8 @@ func TestUpdateResourceRequests(t *testing.T) { targetBelowMinVPA := vpaBuilder.WithTarget("3", "150Mi").WithMinAllowed("4", "300Mi").WithMaxAllowed("5", "1Gi").Get() targetAboveMaxVPA := vpaBuilder.WithTarget("7", "2Gi").WithMinAllowed("4", "300Mi").WithMaxAllowed("5", "1Gi").Get() - vpaWithHighMemory := vpaBuilder.WithTarget("2", "1000Mi").WithMaxAllowed("3", "3Gi").Get() + vpaWithExabyteRecommendation := vpaBuilder.WithTarget("1Ei", "1Ei").WithMaxAllowed("1Ei", "1Ei").Get() vpaWithEmptyRecommendation := vpaBuilder.Get() vpaWithEmptyRecommendation.Status.Recommendation = &vpa_types.RecommendedPodResources{} @@ -213,9 +226,36 @@ func TestUpdateResourceRequests(t *testing.T) { expectedMemLimit: mustParseResourcePointer("200Mi"), labelSelector: "app = testingApp", }, + { + name: "proportional limit", + pod: podWithDoubleLimit, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedCPU: resource.MustParse("2"), + expectedMem: resource.MustParse("200Mi"), + expectedCPULimit: mustParseResourcePointer("4"), + expectedMemLimit: mustParseResourcePointer("400Mi"), + labelSelector: "app = testingApp", + }, + { + name: "limit over int64", + pod: podWithTenfoldLimit, + vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithExabyteRecommendation}, + expectedAction: true, + expectedCPU: resource.MustParse("1Ei"), + expectedMem: resource.MustParse("1Ei"), + expectedCPULimit: resource.NewMilliQuantity(math.MaxInt64, resource.DecimalExponent), + expectedMemLimit: resource.NewMilliQuantity(math.MaxInt64, resource.DecimalExponent), + labelSelector: "app = testingApp", + annotations: vpa_api_util.ContainerToAnnotationsMap{ + containerName: []string{ + "Failed to keep CPU limit to request proportion of 10000 to 1000 with recommended request of -9223372036854775808 milliCPU; doesn't fit in int64. Capping limit to MaxInt64", + "Failed to keep memory limit to request proportion of 1048576000000 to 104857600000 with recommended request of -9223372036854775808 milliBytes; doesn't fit in int64. Capping limit to MaxInt64", + }, + }, + }, } for _, tc := range testCases { - t.Run(fmt.Sprintf(tc.name), func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -256,7 +296,7 @@ func TestUpdateResourceRequests(t *testing.T) { assert.False(t, cpuLimitPresent, "expected no cpu limit, got %s", cpuLimit.String()) } else { if assert.True(t, cpuLimitPresent, "expected cpu limit, but it's missing") { - assert.Equal(t, *tc.expectedCPULimit, cpuLimit, "cpu limit doesn't match") + assert.Equal(t, tc.expectedCPULimit.MilliValue(), cpuLimit.MilliValue(), "cpu limit doesn't match") } } @@ -265,7 +305,7 @@ func TestUpdateResourceRequests(t *testing.T) { assert.False(t, memLimitPresent, "expected no memory limit, got %s", memLimit.String()) } else { if assert.True(t, memLimitPresent, "expected cpu limit, but it's missing") { - assert.Equal(t, *tc.expectedMemLimit, memLimit, "memory limit doesn't match") + assert.Equal(t, tc.expectedMemLimit.MilliValue(), memLimit.MilliValue(), "memory limit doesn't match") } } From 877002bdc52712aca304ee31955b80fdd82c2252 Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 9 May 2019 16:30:42 +0200 Subject: [PATCH 11/12] Replace test for capping recomendation --- .../e2e/v1beta1/admission_controller.go | 21 +++----- vertical-pod-autoscaler/e2e/v1beta1/common.go | 13 +++++ .../e2e/v1beta2/admission_controller.go | 51 +++---------------- 3 files changed, 28 insertions(+), 57 deletions(-) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index dbbb4230d335..a250f3fdd5bb 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -59,17 +59,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("caps request to limit set by the user", func() { - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("222m"), - apiv1.ResourceMemory: ParseQuantityOrDie("123Mi"), - } + ginkgo.It("keeps limits equal to request", func() { + d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ - MatchLabels: d.Spec.Template.Labels, - }) + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ ContainerName: "hamster", @@ -85,11 +79,12 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { podList := startDeploymentPods(f, d) // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 222m CPU and 123Mi of memory (as this is the recommendation - // capped to the limit set by the user) + // should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal. for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("222m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("123Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) } }) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index 902a3d6ae40b..98db1b4d0e08 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -123,6 +123,19 @@ func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memo return d } +// NewHamsterDeploymentWithGuaranteedResources creates a simple hamster deployment with specific +// resource requests for e2e test purposes. Since the container in the pod specifies resource limits +// but not resource requests K8s will set requests equal to limits and the pod will have guaranteed +// QoS class. +func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeployment(f) + d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantity, + apiv1.ResourceMemory: memoryQuantity, + } + return d +} + // GetHamsterPods returns running hamster pods (matched by hamsterLabels) func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index f3bce9c7b393..cb7f4d92074f 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -56,12 +56,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("caps request to limit set by the user", func() { - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("222m"), - apiv1.ResourceMemory: ParseQuantityOrDie("123Mi"), - } + ginkgo.It("keeps limits equal to request", func() { + d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) @@ -80,11 +76,12 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { podList := startDeploymentPods(f, d) // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 222m CPU and 123Mi of memory (as this is the recommendation - // capped to the limit set by the user) + // should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal. for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("222m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("123Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) } }) @@ -195,40 +192,6 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("keeps limits equal to request", func() { - d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - - ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) - vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ - ContainerName: "hamster", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("250m"), - apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"), - }, - }}, - } - vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{ - ContainerPolicies: []vpa_types.ContainerResourcePolicy{{ - ContainerName: "hamster", - }}, - } - InstallVPA(f, vpaCRD) - - ginkgo.By("Setting up a hamster deployment") - podList := startDeploymentPods(f, d) - - // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal - for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) - } - }) - }) func startDeploymentPods(f *framework.Framework, deployment *appsv1.Deployment) *apiv1.PodList { From 14a589b1223caf67be71b2a0afd5cd9197a4e3dd Mon Sep 17 00:00:00 2001 From: Joachim Bartosik Date: Thu, 9 May 2019 16:53:07 +0200 Subject: [PATCH 12/12] Drop an uninteresting test I think it's useful to revers condtions in the tests when dropping capping to limits to make sure are the relevant code is gone. I don't think it's an useful test to keep around. --- .../pkg/utils/vpa/capping_test.go | 41 ------------------- 1 file changed, 41 deletions(-) diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index e0ad3d342a83..ad80dff1f5dc 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -47,47 +47,6 @@ func TestRecommendationNotAvailable(t *testing.T) { assert.Empty(t, res.ContainerRecommendations) } -func TestRecommendationNotCappedToLimit(t *testing.T) { - pod := test.Pod().WithName("pod1").AddContainer(test.BuildTestContainer("ctr-name", "", "")).Get() - pod.Spec.Containers[0].Resources.Limits = - apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(3, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(7000, 1), - } - - podRecommendation := vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{ - { - ContainerName: "ctr-name", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(10, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(8000, 1), - }, - UpperBound: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(2, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(9000, 1), - }, - }, - }, - } - policy := vpa_types.PodResourcePolicy{} - - res, annotations, err := NewCappingRecommendationProcessor().Apply(&podRecommendation, &policy, nil, pod) - assert.Nil(t, err) - assert.Equal(t, apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(10, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(8000, 1), - }, res.ContainerRecommendations[0].Target) - - assert.NotContains(t, annotations, "ctr-name") - assert.NotContains(t, annotations["ctr-name"], "memory capped to container limit") - - assert.Equal(t, apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(2, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(9000, 1), - }, res.ContainerRecommendations[0].UpperBound) -} - func TestRecommendationCappedToMinMaxPolicy(t *testing.T) { pod := test.Pod().WithName("pod1").AddContainer(test.BuildTestContainer("ctr-name", "", "")).Get() podRecommendation := vpa_types.RecommendedPodResources{