diff --git a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go index dbbb4230d335..a250f3fdd5bb 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/admission_controller.go @@ -59,17 +59,11 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("caps request to limit set by the user", func() { - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("222m"), - apiv1.ResourceMemory: ParseQuantityOrDie("123Mi"), - } + ginkgo.It("keeps limits equal to request", func() { + d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") - vpaCRD := NewVPA(f, "hamster-vpa", &metav1.LabelSelector{ - MatchLabels: d.Spec.Template.Labels, - }) + vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{ ContainerRecommendations: []vpa_types.RecommendedContainerResources{{ ContainerName: "hamster", @@ -85,11 +79,12 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { podList := startDeploymentPods(f, d) // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 222m CPU and 123Mi of memory (as this is the recommendation - // capped to the limit set by the user) + // should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal. for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("222m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("123Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) } }) diff --git a/vertical-pod-autoscaler/e2e/v1beta1/common.go b/vertical-pod-autoscaler/e2e/v1beta1/common.go index 902a3d6ae40b..98db1b4d0e08 100644 --- a/vertical-pod-autoscaler/e2e/v1beta1/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta1/common.go @@ -123,6 +123,19 @@ func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memo return d } +// NewHamsterDeploymentWithGuaranteedResources creates a simple hamster deployment with specific +// resource requests for e2e test purposes. Since the container in the pod specifies resource limits +// but not resource requests K8s will set requests equal to limits and the pod will have guaranteed +// QoS class. +func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeployment(f) + d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantity, + apiv1.ResourceMemory: memoryQuantity, + } + return d +} + // GetHamsterPods returns running hamster pods (matched by hamsterLabels) func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go index 9d964efb36ea..cb7f4d92074f 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/admission_controller.go @@ -56,12 +56,8 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { } }) - ginkgo.It("caps request to limit set by the user", func() { - d := NewHamsterDeploymentWithResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) - d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ - apiv1.ResourceCPU: ParseQuantityOrDie("222m"), - apiv1.ResourceMemory: ParseQuantityOrDie("123Mi"), - } + ginkgo.It("keeps limits equal to request", func() { + d := NewHamsterDeploymentWithGuaranteedResources(f, ParseQuantityOrDie("100m") /*cpu*/, ParseQuantityOrDie("100Mi") /*memory*/) ginkgo.By("Setting up a VPA CRD") vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef) @@ -80,11 +76,12 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() { podList := startDeploymentPods(f, d) // Originally Pods had 100m CPU, 100Mi of memory, but admission controller - // should change it to 222m CPU and 123Mi of memory (as this is the recommendation - // capped to the limit set by the user) + // should change it to 250m CPU and 200Mi of memory. Limits and requests should stay equal. for _, pod := range podList.Items { - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("222m"))) - gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("123Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Requests[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceCPU]).To(gomega.Equal(ParseQuantityOrDie("250m"))) + gomega.Expect(pod.Spec.Containers[0].Resources.Limits[apiv1.ResourceMemory]).To(gomega.Equal(ParseQuantityOrDie("200Mi"))) } }) diff --git a/vertical-pod-autoscaler/e2e/v1beta2/common.go b/vertical-pod-autoscaler/e2e/v1beta2/common.go index 7af01f820e75..93d33906e311 100644 --- a/vertical-pod-autoscaler/e2e/v1beta2/common.go +++ b/vertical-pod-autoscaler/e2e/v1beta2/common.go @@ -130,6 +130,19 @@ func NewHamsterDeploymentWithResources(f *framework.Framework, cpuQuantity, memo return d } +// NewHamsterDeploymentWithGuaranteedResources creates a simple hamster deployment with specific +// resource requests for e2e test purposes. Since the container in the pod specifies resource limits +// but not resource requests K8s will set requests equal to limits and the pod will have guaranteed +// QoS class. +func NewHamsterDeploymentWithGuaranteedResources(f *framework.Framework, cpuQuantity, memoryQuantity resource.Quantity) *appsv1.Deployment { + d := NewHamsterDeployment(f) + d.Spec.Template.Spec.Containers[0].Resources.Limits = apiv1.ResourceList{ + apiv1.ResourceCPU: cpuQuantity, + apiv1.ResourceMemory: memoryQuantity, + } + return d +} + // GetHamsterPods returns running hamster pods (matched by hamsterLabels) func GetHamsterPods(f *framework.Framework) (*apiv1.PodList, error) { label := labels.SelectorFromSet(labels.Set(hamsterLabels)) diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go index 68447ac206c8..ec1d490a96d8 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider.go @@ -17,8 +17,12 @@ limitations under the License. package logic import ( - v1 "k8s.io/api/core/v1" + "fmt" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" + "math" + "math/big" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta2" @@ -29,11 +33,15 @@ import ( // ContainerResources holds resources request for container type ContainerResources struct { + Limits v1.ResourceList Requests v1.ResourceList } func newContainerResources() ContainerResources { - return ContainerResources{Requests: v1.ResourceList{}} + return ContainerResources{ + Requests: v1.ResourceList{}, + Limits: v1.ResourceList{}, + } } // RecommendationProvider gets current recommendation, annotations and vpaName for the given pod. @@ -56,8 +64,38 @@ func NewRecommendationProvider(vpaLister vpa_lister.VerticalPodAutoscalerLister, } } -// getContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec. -func getContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources) []ContainerResources { +func getProportionalLimit(originalLimit, originalRequest, recommendedRequest *resource.Quantity) (limit *resource.Quantity, capped bool) { + // originalLimit not set, don't set limit. + if originalLimit == nil || originalLimit.Value() == 0 { + return nil, false + } + // originalLimit set but originalRequest not set - K8s will treat the pod as if they were equal, + // recommend limit equal to request + if originalRequest == nil || originalRequest.Value() == 0 { + result := *recommendedRequest + return &result, false + } + // originalLimit and originalRequest are set. If they are equal recommend limit equal to request. + if originalRequest.MilliValue() == originalLimit.MilliValue() { + result := *recommendedRequest + return &result, false + } + + // Input and output milli values should fit in int64 but intermediate values might be bigger. + originalMilliRequest := big.NewInt(originalRequest.MilliValue()) + originalMilliLimit := big.NewInt(originalLimit.MilliValue()) + recommendedMilliRequest := big.NewInt(recommendedRequest.MilliValue()) + var recommendedMilliLimit big.Int + recommendedMilliLimit.Mul(recommendedMilliRequest, originalMilliLimit) + recommendedMilliLimit.Div(&recommendedMilliLimit, originalMilliRequest) + if recommendedMilliLimit.IsInt64() { + return resource.NewMilliQuantity(recommendedMilliLimit.Int64(), recommendedRequest.Format), false + } + return resource.NewMilliQuantity(math.MaxInt64, recommendedRequest.Format), true +} + +// GetContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec. +func GetContainersResources(pod *v1.Pod, podRecommendation vpa_types.RecommendedPodResources, annotations vpa_api_util.ContainerToAnnotationsMap) []ContainerResources { resources := make([]ContainerResources, len(pod.Spec.Containers)) for i, container := range pod.Spec.Containers { resources[i] = newContainerResources() @@ -68,6 +106,29 @@ func getContainersResources(pod *v1.Pod, podRecommendation vpa_types.Recommended continue } resources[i].Requests = recommendation.Target + + cpuLimit, capped := getProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), resources[i].Requests.Cpu()) + if cpuLimit != nil { + resources[i].Limits[v1.ResourceCPU] = *cpuLimit + } + if capped { + annotations[container.Name] = append( + annotations[container.Name], + fmt.Sprintf( + "Failed to keep CPU limit to request proportion of %d to %d with recommended request of %d milliCPU; doesn't fit in int64. Capping limit to MaxInt64", + container.Resources.Limits.Cpu().MilliValue(), container.Resources.Requests.Cpu().MilliValue(), resources[i].Requests.Cpu().MilliValue())) + } + memLimit, capped := getProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), resources[i].Requests.Memory()) + if memLimit != nil { + resources[i].Limits[v1.ResourceMemory] = *memLimit + } + if capped { + annotations[container.Name] = append( + annotations[container.Name], + fmt.Sprintf( + "Failed to keep memory limit to request proportion of %d to %d with recommended request of %d milliBytes; doesn't fit in int64. Capping limit to MaxInt64", + container.Resources.Limits.Memory().MilliValue(), container.Resources.Requests.Memory().MilliValue(), resources[i].Requests.Memory().MilliValue())) + } } return resources } @@ -122,6 +183,6 @@ func (p *recommendationProvider) GetContainersResourcesForPod(pod *v1.Pod) ([]Co return nil, annotations, vpaConfig.Name, err } } - containerResources := getContainersResources(pod, *recommendedPodResources) + containerResources := GetContainersResources(pod, *recommendedPodResources, annotations) return containerResources, annotations, vpaConfig.Name, nil } diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go index 00d1b46e750f..fa0555d867a8 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/recommendation_provider_test.go @@ -18,6 +18,7 @@ package logic import ( "fmt" + "math" "testing" "github.com/golang/mock/gomock" @@ -39,16 +40,12 @@ func parseLabelSelector(selector string) labels.Selector { return parsedSelector } +func mustParseResourcePointer(val string) *resource.Quantity { + q := resource.MustParse(val) + return &q +} + func TestUpdateResourceRequests(t *testing.T) { - type testCase struct { - pod *apiv1.Pod - vpas []*vpa_types.VerticalPodAutoscaler - expectedAction bool - expectedMem string - expectedCPU string - annotations vpa_api_util.ContainerToAnnotationsMap - labelSelector string - } containerName := "container1" vpaName := "vpa1" labels := map[string]string{"app": "testingApp"} @@ -60,107 +57,206 @@ func TestUpdateResourceRequests(t *testing.T) { WithMaxAllowed("3", "1Gi") vpa := vpaBuilder.Get() - uninitialized := test.Pod().WithName("test_uninitialized").AddContainer(test.BuildTestContainer(containerName, "", "")).Get() - uninitialized.ObjectMeta.Labels = labels + uninitialized := test.Pod().WithName("test_uninitialized"). + AddContainer(test.Container().WithName(containerName).Get()). + WithLabels(labels).Get() + + initializedContainer := test.Container().WithName(containerName). + WithCPURequest(resource.MustParse("1")).WithMemRequest(resource.MustParse("100Mi")).Get() + initialized := test.Pod().WithName("test_initialized"). + AddContainer(initializedContainer).WithLabels(labels).Get() - initialized := test.Pod().WithName("test_initialized").AddContainer(test.BuildTestContainer(containerName, "1", "100Mi")).Get() - initialized.ObjectMeta.Labels = labels + limitsMatchRequestsContainer := test.Container().WithName(containerName). + WithCPURequest(resource.MustParse("2")).WithCPULimit(resource.MustParse("2")). + WithMemRequest(resource.MustParse("200Mi")).WithMemLimit(resource.MustParse("200Mi")).Get() + limitsMatchRequestsPod := test.Pod().WithName("test_initialized"). + AddContainer(limitsMatchRequestsContainer).WithLabels(labels).Get() + + containerWithDoubleLimit := test.Container().WithName(containerName). + WithCPURequest(resource.MustParse("1")).WithCPULimit(resource.MustParse("2")). + WithMemRequest(resource.MustParse("100Mi")).WithMemLimit(resource.MustParse("200Mi")).Get() + podWithDoubleLimit := test.Pod().WithName("test_initialized"). + AddContainer(containerWithDoubleLimit).WithLabels(labels).Get() + + containerWithTenfoldLimit := test.Container().WithName(containerName). + WithCPURequest(resource.MustParse("1")).WithCPULimit(resource.MustParse("10")). + WithMemRequest(resource.MustParse("100Mi")).WithMemLimit(resource.MustParse("1000Mi")).Get() + podWithTenfoldLimit := test.Pod().WithName("test_initialized"). + AddContainer(containerWithTenfoldLimit).WithLabels(labels).Get() + + limitsNoRequestsContainer := test.Container().WithName(containerName). + WithCPULimit(resource.MustParse("2")).WithMemLimit(resource.MustParse("200Mi")).Get() + limitsNoRequestsPod := test.Pod().WithName("test_initialized"). + AddContainer(limitsNoRequestsContainer).WithLabels(labels).Get() offVPA := vpaBuilder.WithUpdateMode(vpa_types.UpdateModeOff).Get() targetBelowMinVPA := vpaBuilder.WithTarget("3", "150Mi").WithMinAllowed("4", "300Mi").WithMaxAllowed("5", "1Gi").Get() targetAboveMaxVPA := vpaBuilder.WithTarget("7", "2Gi").WithMinAllowed("4", "300Mi").WithMaxAllowed("5", "1Gi").Get() - vpaWithHighMemory := vpaBuilder.WithTarget("2", "1000Mi").WithMaxAllowed("3", "3Gi").Get() + vpaWithExabyteRecommendation := vpaBuilder.WithTarget("1Ei", "1Ei").WithMaxAllowed("1Ei", "1Ei").Get() vpaWithEmptyRecommendation := vpaBuilder.Get() vpaWithEmptyRecommendation.Status.Recommendation = &vpa_types.RecommendedPodResources{} vpaWithNilRecommendation := vpaBuilder.Get() vpaWithNilRecommendation.Status.Recommendation = nil - testCases := []testCase{{ - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, - expectedAction: true, - expectedMem: "200Mi", - expectedCPU: "2", - labelSelector: "app = testingApp", - }, { - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, - expectedAction: true, - expectedMem: "200Mi", - expectedCPU: "2", - labelSelector: "app = testingApp", - }, { - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{targetBelowMinVPA}, - expectedAction: true, - expectedMem: "300Mi", // MinMemory is expected to be used - expectedCPU: "4", // MinCpu is expected to be used - annotations: vpa_api_util.ContainerToAnnotationsMap{ - containerName: []string{"cpu capped to minAllowed", "memory capped to minAllowed"}, + testCases := []struct { + name string + pod *apiv1.Pod + vpas []*vpa_types.VerticalPodAutoscaler + expectedAction bool + expectedMem resource.Quantity + expectedCPU resource.Quantity + expectedCPULimit *resource.Quantity + expectedMemLimit *resource.Quantity + annotations vpa_api_util.ContainerToAnnotationsMap + labelSelector string + }{ + { + name: "uninitialized pod", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedMem: resource.MustParse("200Mi"), + expectedCPU: resource.MustParse("2"), + labelSelector: "app = testingApp", + }, + { + name: "target below min", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{targetBelowMinVPA}, + expectedAction: true, + expectedMem: resource.MustParse("300Mi"), // MinMemory is expected to be used + expectedCPU: resource.MustParse("4"), // MinCpu is expected to be used + annotations: vpa_api_util.ContainerToAnnotationsMap{ + containerName: []string{"cpu capped to minAllowed", "memory capped to minAllowed"}, + }, + labelSelector: "app = testingApp", + }, + { + name: "target above max", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{targetAboveMaxVPA}, + expectedAction: true, + expectedMem: resource.MustParse("1Gi"), // MaxMemory is expected to be used + expectedCPU: resource.MustParse("5"), // MaxCpu is expected to be used + annotations: vpa_api_util.ContainerToAnnotationsMap{ + containerName: []string{"cpu capped to maxAllowed", "memory capped to maxAllowed"}, + }, + labelSelector: "app = testingApp", + }, + { + name: "initialized pod", + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedMem: resource.MustParse("200Mi"), + expectedCPU: resource.MustParse("2"), + labelSelector: "app = testingApp", + }, + { + name: "high memory", + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithHighMemory}, + expectedAction: true, + expectedMem: resource.MustParse("1000Mi"), + expectedCPU: resource.MustParse("2"), + labelSelector: "app = testingApp", + }, + { + name: "not matching selecetor", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: false, + labelSelector: "app = differentApp", + }, + { + name: "off mode", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{offVPA}, + expectedAction: false, + labelSelector: "app = testingApp", + }, + { + name: "two vpas one in off mode", + pod: uninitialized, + vpas: []*vpa_types.VerticalPodAutoscaler{offVPA, vpa}, + expectedAction: true, + expectedMem: resource.MustParse("200Mi"), + expectedCPU: resource.MustParse("2"), + labelSelector: "app = testingApp", + }, + { + name: "empty recommendation", + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithEmptyRecommendation}, + expectedAction: true, + expectedMem: resource.MustParse("0"), + expectedCPU: resource.MustParse("0"), + labelSelector: "app = testingApp", }, - labelSelector: "app = testingApp", - }, { - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{targetAboveMaxVPA}, - expectedAction: true, - expectedMem: "1Gi", // MaxMemory is expected to be used - expectedCPU: "5", // MaxCpu is expected to be used - annotations: vpa_api_util.ContainerToAnnotationsMap{ - containerName: []string{"cpu capped to maxAllowed", "memory capped to maxAllowed"}, + { + pod: initialized, + vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithNilRecommendation}, + expectedAction: true, + expectedMem: resource.MustParse("0"), + expectedCPU: resource.MustParse("0"), + labelSelector: "app = testingApp", }, - labelSelector: "app = testingApp", - }, { - pod: initialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, - expectedAction: true, - expectedMem: "200Mi", - expectedCPU: "2", - labelSelector: "app = testingApp", - }, { - pod: initialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithHighMemory}, - expectedAction: true, - expectedMem: "1000Mi", - expectedCPU: "2", - labelSelector: "app = testingApp", - }, { - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, - expectedAction: false, - labelSelector: "app = differentApp", - }, { - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{offVPA}, - expectedAction: false, - labelSelector: "app = testingApp", - }, { - pod: uninitialized, - vpas: []*vpa_types.VerticalPodAutoscaler{offVPA, vpa}, - expectedAction: true, - expectedMem: "200Mi", - expectedCPU: "2", - labelSelector: "app = testingApp", - }, { - pod: initialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithEmptyRecommendation}, - expectedAction: true, - expectedMem: "0", - expectedCPU: "0", - labelSelector: "app = testingApp", - }, { - pod: initialized, - vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithNilRecommendation}, - expectedAction: true, - expectedMem: "0", - expectedCPU: "0", - labelSelector: "app = testingApp", - }} - for i, tc := range testCases { - - t.Run(fmt.Sprintf("test case number: %d", i), func(t *testing.T) { + { + name: "guaranteed resources", + pod: limitsMatchRequestsPod, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedMem: resource.MustParse("200Mi"), + expectedCPU: resource.MustParse("2"), + expectedCPULimit: mustParseResourcePointer("2"), + expectedMemLimit: mustParseResourcePointer("200Mi"), + labelSelector: "app = testingApp", + }, + { + name: "guaranteed resources - no request", + pod: limitsNoRequestsPod, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedMem: resource.MustParse("200Mi"), + expectedCPU: resource.MustParse("2"), + expectedCPULimit: mustParseResourcePointer("2"), + expectedMemLimit: mustParseResourcePointer("200Mi"), + labelSelector: "app = testingApp", + }, + { + name: "proportional limit", + pod: podWithDoubleLimit, + vpas: []*vpa_types.VerticalPodAutoscaler{vpa}, + expectedAction: true, + expectedCPU: resource.MustParse("2"), + expectedMem: resource.MustParse("200Mi"), + expectedCPULimit: mustParseResourcePointer("4"), + expectedMemLimit: mustParseResourcePointer("400Mi"), + labelSelector: "app = testingApp", + }, + { + name: "limit over int64", + pod: podWithTenfoldLimit, + vpas: []*vpa_types.VerticalPodAutoscaler{vpaWithExabyteRecommendation}, + expectedAction: true, + expectedCPU: resource.MustParse("1Ei"), + expectedMem: resource.MustParse("1Ei"), + expectedCPULimit: resource.NewMilliQuantity(math.MaxInt64, resource.DecimalExponent), + expectedMemLimit: resource.NewMilliQuantity(math.MaxInt64, resource.DecimalExponent), + labelSelector: "app = testingApp", + annotations: vpa_api_util.ContainerToAnnotationsMap{ + containerName: []string{ + "Failed to keep CPU limit to request proportion of 10000 to 1000 with recommended request of -9223372036854775808 milliCPU; doesn't fit in int64. Capping limit to MaxInt64", + "Failed to keep memory limit to request proportion of 1048576000000 to 104857600000 with recommended request of -9223372036854775808 milliBytes; doesn't fit in int64. Capping limit to MaxInt64", + }, + }, + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf(tc.name), func(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -185,15 +281,34 @@ func TestUpdateResourceRequests(t *testing.T) { if tc.expectedAction { assert.Equal(t, vpaName, name) assert.Nil(t, err) - assert.Equal(t, len(resources), 1) - expectedCPU, err := resource.ParseQuantity(tc.expectedCPU) - assert.NoError(t, err) + if !assert.Equal(t, len(resources), 1) { + return + } + cpuRequest := resources[0].Requests[apiv1.ResourceCPU] - assert.Equal(t, expectedCPU.Value(), cpuRequest.Value(), "cpu request doesn't match") - expectedMemory, err := resource.ParseQuantity(tc.expectedMem) - assert.NoError(t, err) + assert.Equal(t, tc.expectedCPU.Value(), cpuRequest.Value(), "cpu request doesn't match") + memoryRequest := resources[0].Requests[apiv1.ResourceMemory] - assert.Equal(t, expectedMemory.Value(), memoryRequest.Value(), "memory request doesn't match") + assert.Equal(t, tc.expectedMem.Value(), memoryRequest.Value(), "memory request doesn't match") + + cpuLimit, cpuLimitPresent := resources[0].Limits[apiv1.ResourceCPU] + if tc.expectedCPULimit == nil { + assert.False(t, cpuLimitPresent, "expected no cpu limit, got %s", cpuLimit.String()) + } else { + if assert.True(t, cpuLimitPresent, "expected cpu limit, but it's missing") { + assert.Equal(t, tc.expectedCPULimit.MilliValue(), cpuLimit.MilliValue(), "cpu limit doesn't match") + } + } + + memLimit, memLimitPresent := resources[0].Limits[apiv1.ResourceMemory] + if tc.expectedMemLimit == nil { + assert.False(t, memLimitPresent, "expected no memory limit, got %s", memLimit.String()) + } else { + if assert.True(t, memLimitPresent, "expected cpu limit, but it's missing") { + assert.Equal(t, tc.expectedMemLimit.MilliValue(), memLimit.MilliValue(), "memory limit doesn't match") + } + } + assert.Len(t, annotations, len(tc.annotations)) if len(tc.annotations) > 0 { for annotationKey, annotationValues := range tc.annotations { diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go index 0031bae00afb..885dae569147 100644 --- a/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server.go @@ -26,6 +26,7 @@ import ( "k8s.io/api/admission/v1beta1" "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" metrics_admission "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/metrics/admission" @@ -74,42 +75,12 @@ func (s *AdmissionServer) getPatchesForPodResourceRequest(raw []byte, namespace patches := []patchRecord{} updatesAnnotation := []string{} for i, containerResources := range containersResources { - - // Add resources empty object if missing - if pod.Spec.Containers[i].Resources.Limits == nil && - pod.Spec.Containers[i].Resources.Requests == nil { - patches = append(patches, patchRecord{ - Op: "add", - Path: fmt.Sprintf("/spec/containers/%d/resources", i), - Value: v1.ResourceRequirements{}, - }) - } - - // Add request empty map if missing - if pod.Spec.Containers[i].Resources.Requests == nil { - patches = append(patches, patchRecord{ - Op: "add", - Path: fmt.Sprintf("/spec/containers/%d/resources/requests", i), - Value: v1.ResourceList{}}) - } - - annotations, found := annotationsPerContainer[pod.Spec.Containers[i].Name] - if !found { - annotations = make([]string, 0) - } - for resource, request := range containerResources.Requests { - // Set request - patches = append(patches, patchRecord{ - Op: "add", - Path: fmt.Sprintf("/spec/containers/%d/resources/requests/%s", i, resource), - Value: request.String()}) - annotations = append(annotations, fmt.Sprintf("%s request", resource)) - } - - updatesAnnotation = append(updatesAnnotation, fmt.Sprintf("container %d: ", i)+strings.Join(annotations, ", ")) + newPatches, newUpdatesAnnotation := s.getContainerPatch(pod, i, "requests", annotationsPerContainer, containerResources) + patches = append(patches, newPatches...) + updatesAnnotation = append(updatesAnnotation, newUpdatesAnnotation) } if len(updatesAnnotation) > 0 { - vpaAnnotationValue := fmt.Sprintf("Pod resources updated by %s: ", vpaName) + strings.Join(updatesAnnotation, "; ") + vpaAnnotationValue := fmt.Sprintf("Pod resources updated by %s: %s", vpaName, strings.Join(updatesAnnotation, "; ")) if pod.Annotations == nil { patches = append(patches, patchRecord{ Op: "add", @@ -125,6 +96,61 @@ func (s *AdmissionServer) getPatchesForPodResourceRequest(raw []byte, namespace return patches, nil } +func getPatchInitializingEmptyResources(i int) patchRecord { + return patchRecord{ + Op: "add", + Path: fmt.Sprintf("/spec/containers/%d/resources", i), + Value: v1.ResourceRequirements{}, + } +} + +func getPatchInitializingEmptyResourcesSubfield(i int, kind string) patchRecord { + return patchRecord{ + Op: "add", + Path: fmt.Sprintf("/spec/containers/%d/resources/%s", i, kind), + Value: v1.ResourceList{}, + } +} + +func getAddResourceRequirementValuePatch(i int, kind string, resource v1.ResourceName, quantity resource.Quantity) patchRecord { + return patchRecord{ + Op: "add", + Path: fmt.Sprintf("/spec/containers/%d/resources/%s/%s", i, kind, resource), + Value: quantity.String()} +} + +func (s *AdmissionServer) getContainerPatch(pod v1.Pod, i int, patchKind string, annotationsPerContainer vpa_api_util.ContainerToAnnotationsMap, containerResources ContainerResources) ([]patchRecord, string) { + var patches []patchRecord + // Add empty resources object if missing + if pod.Spec.Containers[i].Resources.Limits == nil && + pod.Spec.Containers[i].Resources.Requests == nil { + patches = append(patches, getPatchInitializingEmptyResources(i)) + } + + annotations, found := annotationsPerContainer[pod.Spec.Containers[i].Name] + if !found { + annotations = make([]string, 0) + } + + patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Requests, i, containerResources.Requests, "requests", "request") + patches, annotations = appendPatchesAndAnnotations(patches, annotations, pod.Spec.Containers[i].Resources.Limits, i, containerResources.Limits, "limits", "limit") + + updatesAnnotation := fmt.Sprintf("container %d: ", i) + strings.Join(annotations, ", ") + return patches, updatesAnnotation +} + +func appendPatchesAndAnnotations(patches []patchRecord, annotations []string, current v1.ResourceList, containerIndex int, resources v1.ResourceList, fieldName, resourceName string) ([]patchRecord, []string) { + // Add empty object if it's missing and we're about to fill it. + if current == nil && len(resources) > 0 { + patches = append(patches, getPatchInitializingEmptyResourcesSubfield(containerIndex, fieldName)) + } + for resource, request := range resources { + patches = append(patches, getAddResourceRequirementValuePatch(containerIndex, fieldName, resource, request)) + annotations = append(annotations, fmt.Sprintf("%s %s", resource, resourceName)) + } + return patches, annotations +} + func parseVPA(raw []byte) (*vpa_types.VerticalPodAutoscaler, error) { vpa := vpa_types.VerticalPodAutoscaler{} if err := json.Unmarshal(raw, &vpa); err != nil { diff --git a/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go new file mode 100644 index 000000000000..35ed1e7ff9ce --- /dev/null +++ b/vertical-pod-autoscaler/pkg/admission-controller/logic/server_test.go @@ -0,0 +1,501 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logic + +import ( + "encoding/json" + "fmt" + "github.com/stretchr/testify/assert" + "strings" + "testing" + + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" + vpa_api_util "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/vpa" +) + +const ( + cpu = "cpu" + unobtanium = "unobtanium" + limit = "limit" + request = "request" +) + +type fakePodPreProcessor struct { + e error +} + +func (fpp *fakePodPreProcessor) Process(pod apiv1.Pod) (apiv1.Pod, error) { + return pod, fpp.e +} + +type fakeRecommendationProvider struct { + resources []ContainerResources + containerToAnnotations vpa_api_util.ContainerToAnnotationsMap + name string + e error +} + +func (frp *fakeRecommendationProvider) GetContainersResourcesForPod(pod *apiv1.Pod) ([]ContainerResources, vpa_api_util.ContainerToAnnotationsMap, string, error) { + return frp.resources, frp.containerToAnnotations, frp.name, frp.e +} + +func addResourcesPatch(idx int) patchRecord { + return patchRecord{ + "add", + fmt.Sprintf("/spec/containers/%d/resources", idx), + apiv1.ResourceRequirements{}, + } +} + +func addRequestsPatch(idx int) patchRecord { + return patchRecord{ + "add", + fmt.Sprintf("/spec/containers/%d/resources/requests", idx), + apiv1.ResourceList{}, + } +} + +func addLimitsPatch(idx int) patchRecord { + return patchRecord{ + "add", + fmt.Sprintf("/spec/containers/%d/resources/limits", idx), + apiv1.ResourceList{}, + } +} + +func addResourceRequestPatch(index int, res, amount string) patchRecord { + return patchRecord{ + "add", + fmt.Sprintf("/spec/containers/%d/resources/requests/%s", index, res), + resource.MustParse(amount), + } +} + +func addResourceLimitPatch(index int, res, amount string) patchRecord { + return patchRecord{ + "add", + fmt.Sprintf("/spec/containers/%d/resources/limits/%s", index, res), + resource.MustParse(amount), + } +} + +func addAnnotationRequest(updateResources [][]string, kind string) patchRecord { + requests := make([]string, 0) + for idx, podResources := range updateResources { + podRequests := make([]string, 0) + for _, resource := range podResources { + podRequests = append(podRequests, resource+" "+kind) + } + requests = append(requests, fmt.Sprintf("container %d: %s", idx, strings.Join(podRequests, ", "))) + } + + vpaUpdates := fmt.Sprintf("Pod resources updated by name: %s", strings.Join(requests, "; ")) + return patchRecord{ + "add", + "/metadata/annotations", + map[string]string{ + "vpaUpdates": vpaUpdates, + }, + } +} + +func eqPatch(a, b patchRecord) bool { + aJson, aErr := json.Marshal(a) + bJson, bErr := json.Marshal(b) + return string(aJson) == string(bJson) && aErr == bErr +} + +func TestGetPatchesForResourceRequest(t *testing.T) { + tests := []struct { + name string + podJson []byte + namespace string + preProcessorError error + recommendResources []ContainerResources + recommendAnnotations vpa_api_util.ContainerToAnnotationsMap + recommendName string + recommendError error + expectPatches []patchRecord + expectError error + }{ + { + name: "invalid JSON", + podJson: []byte("{"), + namespace: "default", + preProcessorError: nil, + recommendResources: []ContainerResources{}, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectError: fmt.Errorf("unexpected end of JSON input"), + }, + { + name: "invalid pod", + podJson: []byte("{}"), + namespace: "default", + preProcessorError: fmt.Errorf("bad pod"), + recommendResources: []ContainerResources{}, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectError: fmt.Errorf("bad pod"), + }, + { + name: "new cpu recommendation", + podJson: []byte( + `{ + "spec": { + "containers": [{}] + } + }`), + namespace: "default", + recommendResources: []ContainerResources{ + { + Requests: apiv1.ResourceList{ + cpu: resource.MustParse("1"), + }, + }, + }, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectPatches: []patchRecord{ + addResourcesPatch(0), + addRequestsPatch(0), + addResourceRequestPatch(0, cpu, "1"), + addAnnotationRequest([][]string{{cpu}}, request), + }, + }, + { + name: "replacement cpu recommendation", + podJson: []byte( + `{ + "spec": { + "containers": [ + { + "resources": { + "requests": { + "cpu": "0" + } + } + } + ] + } + }`), + namespace: "default", + recommendResources: []ContainerResources{ + { + Requests: apiv1.ResourceList{ + cpu: resource.MustParse("1"), + }, + }, + }, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectPatches: []patchRecord{ + addResourceRequestPatch(0, cpu, "1"), + addAnnotationRequest([][]string{{cpu}}, request), + }, + }, + { + name: "two containers", + podJson: []byte( + `{ + "spec": { + "containers": [ + { + "resources": { + "requests": { + "cpu": "0" + } + } + }, + {} + ] + } + }`), + namespace: "default", + recommendResources: []ContainerResources{ + { + Requests: apiv1.ResourceList{ + cpu: resource.MustParse("1"), + }, + }, + { + Requests: apiv1.ResourceList{ + cpu: resource.MustParse("2"), + }, + }, + }, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectPatches: []patchRecord{ + addResourceRequestPatch(0, cpu, "1"), + addResourcesPatch(1), + addRequestsPatch(1), + addResourceRequestPatch(1, cpu, "2"), + addAnnotationRequest([][]string{{cpu}, {cpu}}, request), + }, + }, + { + name: "new cpu limit", + podJson: []byte( + `{ + "spec": { + "containers": [{}] + } + }`), + namespace: "default", + recommendResources: []ContainerResources{ + { + Limits: apiv1.ResourceList{ + cpu: resource.MustParse("1"), + }, + }, + }, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectPatches: []patchRecord{ + addResourcesPatch(0), + addLimitsPatch(0), + addResourceLimitPatch(0, cpu, "1"), + addAnnotationRequest([][]string{{cpu}}, limit), + }, + }, + { + name: "replacement cpu limit", + podJson: []byte( + `{ + "spec": { + "containers": [ + { + "resources": { + "limits": { + "cpu": "0" + } + } + } + ] + } + }`), + namespace: "default", + recommendResources: []ContainerResources{ + { + Limits: apiv1.ResourceList{ + cpu: resource.MustParse("1"), + }, + }, + }, + recommendAnnotations: vpa_api_util.ContainerToAnnotationsMap{}, + recommendName: "name", + expectPatches: []patchRecord{ + addResourceLimitPatch(0, cpu, "1"), + addAnnotationRequest([][]string{{cpu}}, limit), + }, + }, + } + for _, tc := range tests { + t.Run(fmt.Sprintf("test case: %s", tc.name), func(t *testing.T) { + fppp := fakePodPreProcessor{e: tc.preProcessorError} + frp := fakeRecommendationProvider{tc.recommendResources, tc.recommendAnnotations, tc.recommendName, tc.recommendError} + s := NewAdmissionServer(&frp, &fppp) + patches, err := s.getPatchesForPodResourceRequest(tc.podJson, tc.namespace) + if tc.expectError == nil { + assert.NoError(t, err) + } else { + if assert.Error(t, err) { + assert.Equal(t, tc.expectError.Error(), err.Error()) + } + } + if assert.Equal(t, len(tc.expectPatches), len(patches), fmt.Sprintf("got %+v, want %+v", patches, tc.expectPatches)) { + for i, gotPatch := range patches { + if !eqPatch(gotPatch, tc.expectPatches[i]) { + t.Errorf("Expected patch at position %d to be %+v, got %+v", i, tc.expectPatches[i], gotPatch) + } + } + } + }) + } +} + +func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) { + + fppp := fakePodPreProcessor{} + recommendResources := []ContainerResources{ + { + Requests: apiv1.ResourceList{ + cpu: resource.MustParse("1"), + unobtanium: resource.MustParse("2"), + }, + }, + } + podJson := []byte( + `{ + "spec": { + "containers": [ + { + "resources": { + "requests": { + "cpu": "0" + } + } + } + ] + } + }`) + recommendAnnotations := vpa_api_util.ContainerToAnnotationsMap{} + frp := fakeRecommendationProvider{recommendResources, recommendAnnotations, "name", nil} + s := NewAdmissionServer(&frp, &fppp) + patches, err := s.getPatchesForPodResourceRequest(podJson, "default") + assert.NoError(t, err) + // Order of updates for cpu and unobtanium depends on order of iterating a map, both possible results are valid. + if assert.Equal(t, len(patches), 3) { + cpuUpdate := addResourceRequestPatch(0, cpu, "1") + unobtaniumUpdate := addResourceRequestPatch(0, unobtanium, "2") + assert.True(t, eqPatch(patches[0], cpuUpdate) || eqPatch(patches[0], unobtaniumUpdate)) + assert.True(t, eqPatch(patches[1], cpuUpdate) || eqPatch(patches[1], unobtaniumUpdate)) + assert.False(t, eqPatch(patches[0], patches[1])) + assert.True(t, eqPatch(patches[2], addAnnotationRequest([][]string{{cpu, unobtanium}}, request)) || eqPatch(patches[2], addAnnotationRequest([][]string{{unobtanium, cpu}}, request))) + } +} + +func TestValidateVPA(t *testing.T) { + badUpdateMode := vpa_types.UpdateMode("bad") + validUpdateMode := vpa_types.UpdateModeOff + badScalingMode := vpa_types.ContainerScalingMode("bad") + validScalingMode := vpa_types.ContainerScalingModeAuto + tests := []struct { + name string + vpa vpa_types.VerticalPodAutoscaler + isCreate bool + expectError error + }{ + { + name: "empty update", + vpa: vpa_types.VerticalPodAutoscaler{}, + }, + { + name: "empty create", + vpa: vpa_types.VerticalPodAutoscaler{}, + isCreate: true, + expectError: fmt.Errorf("TargetRef is required. If you're using v1beta1 version of the API, please migrate to v1beta2."), + }, + { + name: "no update mode", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + UpdatePolicy: &vpa_types.PodUpdatePolicy{}, + }, + }, + expectError: fmt.Errorf("UpdateMode is required if UpdatePolicy is used"), + }, + { + name: "bad update mode", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + UpdatePolicy: &vpa_types.PodUpdatePolicy{ + UpdateMode: &badUpdateMode, + }, + }, + }, + expectError: fmt.Errorf("unexpected UpdateMode value bad"), + }, + { + name: "no policy name", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + ResourcePolicy: &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{{}}, + }, + }, + }, + expectError: fmt.Errorf("ContainerPolicies.ContainerName is required"), + }, + { + name: "invalid scaling mode", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + ResourcePolicy: &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{ + { + ContainerName: "loot box", + Mode: &badScalingMode, + }, + }, + }, + }, + }, + expectError: fmt.Errorf("unexpected Mode value bad"), + }, + { + name: "bad limits", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + ResourcePolicy: &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{ + { + ContainerName: "loot box", + MinAllowed: apiv1.ResourceList{ + cpu: resource.MustParse("100"), + }, + MaxAllowed: apiv1.ResourceList{ + cpu: resource.MustParse("10"), + }, + }, + }, + }, + }, + }, + expectError: fmt.Errorf("max resource for cpu is lower than min"), + }, + { + name: "all valid", + vpa: vpa_types.VerticalPodAutoscaler{ + Spec: vpa_types.VerticalPodAutoscalerSpec{ + ResourcePolicy: &vpa_types.PodResourcePolicy{ + ContainerPolicies: []vpa_types.ContainerResourcePolicy{ + { + ContainerName: "loot box", + Mode: &validScalingMode, + MinAllowed: apiv1.ResourceList{ + cpu: resource.MustParse("10"), + }, + MaxAllowed: apiv1.ResourceList{ + cpu: resource.MustParse("100"), + }, + }, + }, + }, + UpdatePolicy: &vpa_types.PodUpdatePolicy{ + UpdateMode: &validUpdateMode, + }, + }, + }, + }, + } + for _, tc := range tests { + t.Run(fmt.Sprintf("test case: %s", tc.name), func(t *testing.T) { + err := validateVPA(&tc.vpa, tc.isCreate) + if tc.expectError == nil { + assert.NoError(t, err) + } else { + if assert.Error(t, err) { + assert.Equal(t, tc.expectError.Error(), err.Error()) + } + } + }) + } +} diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_container.go b/vertical-pod-autoscaler/pkg/utils/test/test_container.go new file mode 100644 index 000000000000..a2336fdcb818 --- /dev/null +++ b/vertical-pod-autoscaler/pkg/utils/test/test_container.go @@ -0,0 +1,88 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + apiv1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +type containerBuilder struct { + name string + cpuRequest *resource.Quantity + memRequest *resource.Quantity + cpuLimit *resource.Quantity + memLimit *resource.Quantity +} + +// Container returns object that helps build containers for tests. +func Container() *containerBuilder { + return &containerBuilder{} +} + +func (cb *containerBuilder) WithName(name string) *containerBuilder { + r := *cb + r.name = name + return &r +} + +func (cb *containerBuilder) WithCPURequest(cpuRequest resource.Quantity) *containerBuilder { + r := *cb + r.cpuRequest = &cpuRequest + return &r +} + +func (cb *containerBuilder) WithMemRequest(memRequest resource.Quantity) *containerBuilder { + r := *cb + r.memRequest = &memRequest + return &r +} + +func (cb *containerBuilder) WithCPULimit(cpuLimit resource.Quantity) *containerBuilder { + r := *cb + r.cpuLimit = &cpuLimit + return &r +} + +func (cb *containerBuilder) WithMemLimit(memLimit resource.Quantity) *containerBuilder { + r := *cb + r.memLimit = &memLimit + return &r +} + +func (cb *containerBuilder) Get() apiv1.Container { + container := apiv1.Container{ + Name: cb.name, + Resources: apiv1.ResourceRequirements{ + Requests: apiv1.ResourceList{}, + Limits: apiv1.ResourceList{}, + }, + } + if cb.cpuRequest != nil { + container.Resources.Requests[apiv1.ResourceCPU] = *cb.cpuRequest + } + if cb.memRequest != nil { + container.Resources.Requests[apiv1.ResourceMemory] = *cb.memRequest + } + if cb.cpuLimit != nil { + container.Resources.Limits[apiv1.ResourceCPU] = *cb.cpuLimit + } + if cb.memLimit != nil { + container.Resources.Limits[apiv1.ResourceMemory] = *cb.memLimit + } + return container +} diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_pod.go b/vertical-pod-autoscaler/pkg/utils/test/test_pod.go index 5468835766ba..a2ffe4254f75 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_pod.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_pod.go @@ -26,6 +26,7 @@ type PodBuilder interface { WithName(name string) PodBuilder AddContainer(container apiv1.Container) PodBuilder WithCreator(creatorObjectMeta *metav1.ObjectMeta, creatorTypeMeta *metav1.TypeMeta) PodBuilder + WithLabels(labels map[string]string) PodBuilder WithPhase(phase apiv1.PodPhase) PodBuilder Get() *apiv1.Pod } @@ -37,20 +38,21 @@ func Pod() PodBuilder { } } -type container struct { - name string - cpu string - mem string -} - type podBuilderImpl struct { name string containers []apiv1.Container creatorObjectMeta *metav1.ObjectMeta creatorTypeMeta *metav1.TypeMeta + labels map[string]string phase apiv1.PodPhase } +func (pb *podBuilderImpl) WithLabels(labels map[string]string) PodBuilder { + r := *pb + r.labels = labels + return &r +} + func (pb *podBuilderImpl) WithName(name string) PodBuilder { r := *pb r.name = name @@ -91,6 +93,10 @@ func (pb *podBuilderImpl) Get() *apiv1.Pod { }, } + if pb.labels != nil { + pod.ObjectMeta.Labels = pb.labels + } + if pb.creatorObjectMeta != nil && pb.creatorTypeMeta != nil { isController := true pod.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ diff --git a/vertical-pod-autoscaler/pkg/utils/test/test_utils.go b/vertical-pod-autoscaler/pkg/utils/test/test_utils.go index e415c9ee1a8a..b587d131b3fa 100644 --- a/vertical-pod-autoscaler/pkg/utils/test/test_utils.go +++ b/vertical-pod-autoscaler/pkg/utils/test/test_utils.go @@ -30,7 +30,7 @@ import ( vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2" vpa_lister_v1beta1 "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta1" vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta2" - v1 "k8s.io/client-go/listers/core/v1" + "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/record" ) @@ -41,23 +41,18 @@ var ( // BuildTestContainer creates container with specified resources func BuildTestContainer(containerName, cpu, mem string) apiv1.Container { - container := apiv1.Container{ - Name: containerName, - Resources: apiv1.ResourceRequirements{ - Requests: apiv1.ResourceList{}, - }, - } + // TODO: Use builder directly, remove this function. + builder := Container().WithName(containerName) if len(cpu) > 0 { cpuVal, _ := resource.ParseQuantity(cpu) - container.Resources.Requests[apiv1.ResourceCPU] = cpuVal + builder = builder.WithCPURequest(cpuVal) } if len(mem) > 0 { memVal, _ := resource.ParseQuantity(mem) - container.Resources.Requests[apiv1.ResourceMemory] = memVal + builder = builder.WithMemRequest(memVal) } - - return container + return builder.Get() } // BuildTestPolicy creates ResourcesPolicy with specified constraints diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go index 38cd39352eb6..0703e90123d3 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping.go @@ -37,7 +37,6 @@ type cappingAction string var ( cappedToMinAllowed cappingAction = "capped to minAllowed" cappedToMaxAllowed cappingAction = "capped to maxAllowed" - cappedToLimit cappingAction = "capped to container limit" ) func toCappingAnnotation(resourceName apiv1.ResourceName, action cappingAction) string { @@ -105,11 +104,6 @@ func getCappedRecommendationForContainer( if genAnnotations { cappingAnnotations = append(cappingAnnotations, annotations...) } - // TODO: If limits and policy are conflicting, set some condition on the VPA. - annotations = capRecommendationToContainerLimit(recommendation, container) - if genAnnotations { - cappingAnnotations = append(cappingAnnotations, annotations...) - } } process(cappedRecommendations.Target, true) @@ -119,21 +113,6 @@ func getCappedRecommendationForContainer( return cappedRecommendations, cappingAnnotations, nil } -// capRecommendationToContainerLimit makes sure recommendation is not above current limit for the container. -// If this function makes adjustments appropriate annotations are returned. -func capRecommendationToContainerLimit(recommendation apiv1.ResourceList, container apiv1.Container) []string { - annotations := make([]string, 0) - // Iterate over limits set in the container. Unset means Infinite limit. - for resourceName, limit := range container.Resources.Limits { - recommendedValue, found := recommendation[resourceName] - if found && recommendedValue.MilliValue() > limit.MilliValue() { - recommendation[resourceName] = limit - annotations = append(annotations, toCappingAnnotation(resourceName, cappedToLimit)) - } - } - return annotations -} - // applyVPAPolicy updates recommendation if recommended resources are outside of limits defined in VPA resources policy func applyVPAPolicy(recommendation apiv1.ResourceList, policy *vpa_types.ContainerResourcePolicy) []string { if policy == nil { diff --git a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go index 847332a0b992..ad80dff1f5dc 100644 --- a/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go +++ b/vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go @@ -47,47 +47,6 @@ func TestRecommendationNotAvailable(t *testing.T) { assert.Empty(t, res.ContainerRecommendations) } -func TestRecommendationCappedToLimit(t *testing.T) { - pod := test.Pod().WithName("pod1").AddContainer(test.BuildTestContainer("ctr-name", "", "")).Get() - pod.Spec.Containers[0].Resources.Limits = - apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(3, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(7000, 1), - } - - podRecommendation := vpa_types.RecommendedPodResources{ - ContainerRecommendations: []vpa_types.RecommendedContainerResources{ - { - ContainerName: "ctr-name", - Target: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(10, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(8000, 1), - }, - UpperBound: apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(2, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(9000, 1), - }, - }, - }, - } - policy := vpa_types.PodResourcePolicy{} - - res, annotations, err := NewCappingRecommendationProcessor().Apply(&podRecommendation, &policy, nil, pod) - assert.Nil(t, err) - assert.Equal(t, apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(3, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(7000, 1), - }, res.ContainerRecommendations[0].Target) - - assert.Contains(t, annotations, "ctr-name") - assert.Contains(t, annotations["ctr-name"], "memory capped to container limit") - - assert.Equal(t, apiv1.ResourceList{ - apiv1.ResourceCPU: *resource.NewScaledQuantity(2, 1), - apiv1.ResourceMemory: *resource.NewScaledQuantity(7000, 1), - }, res.ContainerRecommendations[0].UpperBound) -} - func TestRecommendationCappedToMinMaxPolicy(t *testing.T) { pod := test.Pod().WithName("pod1").AddContainer(test.BuildTestContainer("ctr-name", "", "")).Get() podRecommendation := vpa_types.RecommendedPodResources{