Skip to content

Commit

Permalink
Support scaling request to maintain ratio with limit in updater
Browse files Browse the repository at this point in the history
To be squashed before submit
  • Loading branch information
jbartosik committed May 29, 2019
1 parent d8264ad commit 5094dc0
Show file tree
Hide file tree
Showing 13 changed files with 150 additions and 73 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange"

vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2"
vpa_lister "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/client/listers/autoscaling.k8s.io/v1beta2"
Expand All @@ -35,14 +36,14 @@ type RecommendationProvider interface {
}

type recommendationProvider struct {
limitsRangeCalculator LimitRangeCalculator
limitsRangeCalculator limitrange.LimitRangeCalculator
recommendationProcessor vpa_api_util.RecommendationProcessor
selectorFetcher target.VpaTargetSelectorFetcher
vpaLister vpa_lister.VerticalPodAutoscalerLister
}

// NewRecommendationProvider constructs the recommendation provider that list VPAs and can be used to determine recommendations for pods.
func NewRecommendationProvider(calculator LimitRangeCalculator, recommendationProcessor vpa_api_util.RecommendationProcessor,
func NewRecommendationProvider(calculator limitrange.LimitRangeCalculator, recommendationProcessor vpa_api_util.RecommendationProcessor,
selectorFetcher target.VpaTargetSelectorFetcher, vpaLister vpa_lister.VerticalPodAutoscalerLister) *recommendationProvider {
return &recommendationProvider{
limitsRangeCalculator: calculator,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package logic

import (
"fmt"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange"
"math"
"testing"

Expand Down Expand Up @@ -329,7 +330,7 @@ func TestUpdateResourceRequests(t *testing.T) {

recommendationProvider := &recommendationProvider{
vpaLister: vpaLister,
recommendationProcessor: api.NewCappingRecommendationProcessor(),
recommendationProcessor: api.NewCappingRecommendationProcessor(limitrange.NewNoopLimitsCalculator()),
selectorFetcher: mockSelectorFetcher,
limitsRangeCalculator: &fakeLimitRangeCalculator{
tc.limitRange,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"encoding/json"
"fmt"
"io/ioutil"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange"
"net/http"

"strings"
Expand All @@ -39,11 +40,11 @@ type AdmissionServer struct {
recommendationProvider RecommendationProvider
podPreProcessor PodPreProcessor
vpaPreProcessor VpaPreProcessor
limitsChecker LimitRangeCalculator
limitsChecker limitrange.LimitRangeCalculator
}

// NewAdmissionServer constructs new AdmissionServer
func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, vpaPreProcessor VpaPreProcessor, limitsChecker LimitRangeCalculator) *AdmissionServer {
func NewAdmissionServer(recommendationProvider RecommendationProvider, podPreProcessor PodPreProcessor, vpaPreProcessor VpaPreProcessor, limitsChecker limitrange.LimitRangeCalculator) *AdmissionServer {
return &AdmissionServer{recommendationProvider, podPreProcessor, vpaPreProcessor, limitsChecker}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"encoding/json"
"fmt"
"github.com/stretchr/testify/assert"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange"
"strings"
"testing"

Expand Down Expand Up @@ -320,7 +321,7 @@ func TestGetPatchesForResourceRequest(t *testing.T) {
fppp := fakePodPreProcessor{e: tc.podPreProcessorError}
fvpp := fakeVpaPreProcessor{}
frp := fakeRecommendationProvider{tc.recommendResources, tc.recommendAnnotations, tc.recommendName, tc.recommendError}
lc := NewNoopLimitsCalculator()
lc := limitrange.NewNoopLimitsCalculator()
s := NewAdmissionServer(&frp, &fppp, &fvpp, lc)
patches, err := s.getPatchesForPodResourceRequest(tc.podJson, tc.namespace)
if tc.expectError == nil {
Expand Down Expand Up @@ -368,7 +369,7 @@ func TestGetPatchesForResourceRequest_TwoReplacementResources(t *testing.T) {
}`)
recommendAnnotations := vpa_api_util.ContainerToAnnotationsMap{}
frp := fakeRecommendationProvider{recommendResources, recommendAnnotations, "name", nil}
lc := NewNoopLimitsCalculator()
lc := limitrange.NewNoopLimitsCalculator()
s := NewAdmissionServer(&frp, &fppp, &fvpp, lc)
patches, err := s.getPatchesForPodResourceRequest(podJson, "default")
assert.NoError(t, err)
Expand Down
13 changes: 7 additions & 6 deletions vertical-pod-autoscaler/pkg/admission-controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package main
import (
"flag"
"fmt"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange"
"net/http"
"os"
"time"
Expand Down Expand Up @@ -82,15 +83,15 @@ func main() {
)
podPreprocessor := logic.NewDefaultPodPreProcessor()
vpaPreprocessor := logic.NewDefaultVpaPreProcessor()
var limitsChecker logic.LimitRangeCalculator
limitsChecker, err = logic.NewLimitsRangeCalculator(factory)
var limitRangeCalculator limitrange.LimitRangeCalculator
limitRangeCalculator, err = limitrange.NewLimitsRangeCalculator(factory)
if err != nil {
klog.Errorf("Failed to create limitsChecker, falling back to not checking limits. Error message: %s", err)
limitsChecker = logic.NewNoopLimitsCalculator()
klog.Errorf("Failed to create limitRangeCalculator, falling back to not checking limits. Error message: %s", err)
limitRangeCalculator = limitrange.NewNoopLimitsCalculator()
}
recommendationProvider := logic.NewRecommendationProvider(limitsChecker, vpa_api_util.NewCappingRecommendationProcessor(), targetSelectorFetcher, vpaLister)
recommendationProvider := logic.NewRecommendationProvider(limitRangeCalculator, vpa_api_util.NewCappingRecommendationProcessor(limitRangeCalculator), targetSelectorFetcher, vpaLister)

as := logic.NewAdmissionServer(recommendationProvider, podPreprocessor, vpaPreprocessor, limitsChecker)
as := logic.NewAdmissionServer(recommendationProvider, podPreprocessor, vpaPreprocessor, limitRangeCalculator)
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
as.Serve(w, r)
healthCheck.UpdateLastActivity()
Expand Down
9 changes: 8 additions & 1 deletion vertical-pod-autoscaler/pkg/updater/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package main

import (
"flag"
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange"
"time"

"k8s.io/autoscaler/vertical-pod-autoscaler/common"
Expand Down Expand Up @@ -71,8 +72,14 @@ func main() {
target.NewVpaTargetSelectorFetcher(config, kubeClient, factory),
target.NewBeta1TargetSelectorFetcher(config),
)
var limitRangeCalculator limitrange.LimitRangeCalculator
limitRangeCalculator, err = limitrange.NewLimitsRangeCalculator(factory)
if err != nil {
klog.Errorf("Failed to create limitRangeCalculator, falling back to not checking limits. Error message: %s", err)
limitRangeCalculator = limitrange.NewNoopLimitsCalculator()
}
// TODO: use SharedInformerFactory in updater
updater, err := updater.NewUpdater(kubeClient, vpaClient, *minReplicas, *evictionToleranceFraction, vpa_api_util.NewCappingRecommendationProcessor(), nil, targetSelectorFetcher)
updater, err := updater.NewUpdater(kubeClient, vpaClient, *minReplicas, *evictionToleranceFraction, vpa_api_util.NewCappingRecommendationProcessor(limitRangeCalculator), nil, targetSelectorFetcher)
if err != nil {
klog.Fatalf("Failed to create updater: %v", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

package logic
package limitrange

import (
"fmt"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/

package logic
package limitrange

import (
"testing"
Expand Down
54 changes: 49 additions & 5 deletions vertical-pod-autoscaler/pkg/utils/vpa/capping.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,14 @@ import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1beta2"

"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/limitrange"
"k8s.io/klog"
)

// NewCappingRecommendationProcessor constructs new RecommendationsProcessor that adjusts recommendation
// for given pod to obey VPA resources policy and container limits
func NewCappingRecommendationProcessor() RecommendationProcessor {
return &cappingRecommendationProcessor{}
func NewCappingRecommendationProcessor(limitsRangeCalculator limitrange.LimitRangeCalculator) RecommendationProcessor {
return &cappingRecommendationProcessor{limitsRangeCalculator: limitsRangeCalculator}
}

type cappingAction string
Expand All @@ -43,14 +43,17 @@ func toCappingAnnotation(resourceName apiv1.ResourceName, action cappingAction)
return fmt.Sprintf("%s %s", resourceName, action)
}

type cappingRecommendationProcessor struct{}
type cappingRecommendationProcessor struct {
limitsRangeCalculator limitrange.LimitRangeCalculator
}

// Apply returns a recommendation for the given pod, adjusted to obey policy and limits.
func (c *cappingRecommendationProcessor) Apply(
podRecommendation *vpa_types.RecommendedPodResources,
policy *vpa_types.PodResourcePolicy,
conditions []vpa_types.VerticalPodAutoscalerCondition,
pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) {
// TODO: Annotate if request enforced by maintaining proportion with limit and allowed limit range is in conflict with policy.

if podRecommendation == nil && policy == nil {
// If there is no recommendation and no policies have been defined then no recommendation can be computed.
Expand All @@ -62,7 +65,11 @@ func (c *cappingRecommendationProcessor) Apply(
}
updatedRecommendations := []vpa_types.RecommendedContainerResources{}
containerToAnnotationsMap := ContainerToAnnotationsMap{}
for _, containerRecommendation := range podRecommendation.ContainerRecommendations {
limitAdjustedRecommendation, err := c.capProportionallyToMaxLimit(podRecommendation, pod, containerToAnnotationsMap)
if err != nil {
return nil, nil, err
}
for _, containerRecommendation := range limitAdjustedRecommendation {
container := getContainer(containerRecommendation.ContainerName, pod)

if container == nil {
Expand All @@ -84,6 +91,43 @@ func (c *cappingRecommendationProcessor) Apply(
return &vpa_types.RecommendedPodResources{ContainerRecommendations: updatedRecommendations}, containerToAnnotationsMap, nil
}

func (c *cappingRecommendationProcessor) capProportionallyToMaxLimit(podRecommendation *vpa_types.RecommendedPodResources, pod *apiv1.Pod, containerToAnnotationsMap ContainerToAnnotationsMap) ([]vpa_types.RecommendedContainerResources, error) {
podLimitRange, err := c.limitsRangeCalculator.GetContainerLimitRangeItem(pod.Namespace)
if err != nil {
return nil, fmt.Errorf("error obtaining limit range: %s", err)
}
if podLimitRange == nil {
return podRecommendation.ContainerRecommendations, nil
}
defaultCpu := podLimitRange.Default.Cpu()
defaultMem := podLimitRange.Default.Memory()
maxCpuLimit := podLimitRange.Max.Cpu()
maxMemLimit := podLimitRange.Max.Memory()
updatedRecommendations := []vpa_types.RecommendedContainerResources{}

for _, container := range pod.Spec.Containers {
recommendation := GetRecommendationForContainer(container.Name, podRecommendation)
if recommendation == nil {
klog.V(2).Infof("no matching recommendation found for container %s", container.Name)
continue
}
cpuLimit, _ := GetProportionalLimit(container.Resources.Limits.Cpu(), container.Resources.Requests.Cpu(), recommendation.Target.Cpu(), defaultCpu)
memLimit, _ := GetProportionalLimit(container.Resources.Limits.Memory(), container.Resources.Requests.Memory(), recommendation.Target.Memory(), defaultMem)
capped := ProportionallyCapResourcesToMaxLimit(recommendation.Target, cpuLimit, memLimit, maxCpuLimit, maxMemLimit)
if capped.Requests.Cpu().MilliValue() != recommendation.Target.Cpu().MilliValue() {
containerToAnnotationsMap[container.Name] = append(containerToAnnotationsMap[container.Name], "changed CPU limit to fit within limit range")
}
if capped.Requests.Memory().Value() != recommendation.Target.Memory().Value() {
containerToAnnotationsMap[container.Name] = append(containerToAnnotationsMap[container.Name], "changed memoery limit to fit within limit range")
}
scaledRecommendation := recommendation.DeepCopy()
scaledRecommendation.Target[apiv1.ResourceCPU] = *capped.Requests.Cpu()
scaledRecommendation.Target[apiv1.ResourceMemory] = *capped.Requests.Memory()
updatedRecommendations = append(updatedRecommendations, *scaledRecommendation)
}
return updatedRecommendations, nil
}

// getCappedRecommendationForContainer returns a recommendation for the given container, adjusted to obey policy and limits.
func getCappedRecommendationForContainer(
container apiv1.Container,
Expand Down
72 changes: 69 additions & 3 deletions vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func TestRecommendationNotAvailable(t *testing.T) {
}
policy := vpa_types.PodResourcePolicy{}

res, annotations, err := NewCappingRecommendationProcessor().Apply(&podRecommendation, &policy, nil, pod)
res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &policy, nil, pod)
assert.Nil(t, err)
assert.Empty(t, annotations)
assert.Empty(t, res.ContainerRecommendations)
Expand Down Expand Up @@ -84,7 +84,7 @@ func TestRecommendationCappedToMinMaxPolicy(t *testing.T) {
},
}

res, annotations, err := NewCappingRecommendationProcessor().Apply(&podRecommendation, &policy, nil, pod)
res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &policy, nil, pod)
assert.Nil(t, err)
assert.Equal(t, apiv1.ResourceList{
apiv1.ResourceCPU: *resource.NewScaledQuantity(40, 1),
Expand Down Expand Up @@ -146,7 +146,7 @@ func TestApply(t *testing.T) {
pod := test.Pod().WithName("pod1").AddContainer(test.BuildTestContainer("ctr-name", "", "")).Get()

for _, testCase := range applyTestCases {
res, _, err := NewCappingRecommendationProcessor().Apply(
res, _, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(
testCase.PodRecommendation, testCase.Policy, nil, pod)
assert.Equal(t, testCase.ExpectedPodRecommendation, res)
assert.Equal(t, testCase.ExpectedError, err)
Expand Down Expand Up @@ -215,3 +215,69 @@ func TestApplyVpa(t *testing.T) {
apiv1.ResourceMemory: *resource.NewScaledQuantity(4500, 1),
}, res.ContainerRecommendations[0].UpperBound)
}

type fakeLimitRangeCalculator struct {
limitRange apiv1.LimitRangeItem
}

func (nlrc *fakeLimitRangeCalculator) GetContainerLimitRangeItem(namespace string) (*apiv1.LimitRangeItem, error) {
return &nlrc.limitRange, nil
}

func TestApplyCapsToLimitRange(t *testing.T) {
limitRange := apiv1.LimitRangeItem{
Type: apiv1.LimitTypeContainer,
Max: apiv1.ResourceList{
apiv1.ResourceCPU: resource.MustParse("1"),
apiv1.ResourceMemory: resource.MustParse("1G"),
},
}
recommendation := vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "container",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: resource.MustParse("2"),
apiv1.ResourceMemory: resource.MustParse("10G"),
},
},
},
}
pod := apiv1.Pod{
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
{
Name: "container",
Resources: apiv1.ResourceRequirements{
Requests: apiv1.ResourceList{
apiv1.ResourceCPU: resource.MustParse("1"),
apiv1.ResourceMemory: resource.MustParse("1G"),
},
Limits: apiv1.ResourceList{
apiv1.ResourceCPU: resource.MustParse("1"),
apiv1.ResourceMemory: resource.MustParse("1G"),
},
},
},
},
},
}
expectedRecommendation := vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "container",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: resource.MustParse("1000m"),
apiv1.ResourceMemory: resource.MustParse("1000000000000m"),
},
},
},
}

calculator := fakeLimitRangeCalculator{limitRange}
processor := NewCappingRecommendationProcessor(&calculator)
processedRecommendation, annotations, err := processor.Apply(&recommendation, nil, nil, &pod)
assert.NoError(t, err)
assert.Equal(t, map[string][]string{"container": {"changed CPU limit to fit within limit range", "changed memoery limit to fit within limit range"}}, annotations)
assert.Equal(t, expectedRecommendation, *processedRecommendation)
}
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ func GetProportionalLimit(originalLimit, originalRequest, recommendedRequest, de
return &result, ""
}
result, capped := scaleQuantityProportionally( /*scaledQuantity=*/ originalLimit /*scaleBase=*/, originalRequest /*scaleResult=*/, recommendedRequest)
if capped {
if !capped {
return result, ""
}
return result, fmt.Sprintf(
Expand Down Expand Up @@ -92,7 +92,7 @@ func proportionallyCapLimitToMax(recommendedRequest, recommendedLimit, maxLimit
return scaledRequest, maxLimit
}

// ProportionallyCapResourcesToMaxLimit caps CPU and memory limit to maximu and scales requests to maintain limit/request ratio.
// ProportionallyCapResourcesToMaxLimit caps CPU and memory limit to maximum and scales requests to maintain limit/request ratio.
func ProportionallyCapResourcesToMaxLimit(recommendedRequests v1.ResourceList, cpuLimit, memLimit, maxCpuLimit, maxMemLimit *resource.Quantity) ContainerResources {
scaledCpuRequest, scaledCpuLimit := proportionallyCapLimitToMax(recommendedRequests.Cpu(), cpuLimit, maxCpuLimit)
scaledMemRequest, scaledMemLimit := proportionallyCapLimitToMax(recommendedRequests.Memory(), memLimit, maxMemLimit)
Expand Down
Loading

0 comments on commit 5094dc0

Please sign in to comment.