Skip to content

Commit

Permalink
Configurable container limit scaling
Browse files Browse the repository at this point in the history
  • Loading branch information
johanneswuerbach committed May 16, 2020
1 parent 7b7d704 commit 3e664f1
Show file tree
Hide file tree
Showing 15 changed files with 171 additions and 18 deletions.
1 change: 0 additions & 1 deletion vertical-pod-autoscaler/e2e/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ require (
github.com/onsi/gomega v1.5.0
github.com/prometheus/procfs v0.0.6 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d // indirect
golang.org/x/net v0.0.0-20191112182307-2180aed22343 // indirect
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea // indirect
google.golang.org/appengine v1.6.5 // indirect
Expand Down
2 changes: 0 additions & 2 deletions vertical-pod-autoscaler/e2e/go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -545,8 +545,6 @@ golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU=
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4=
golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw=
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
Expand Down
38 changes: 38 additions & 0 deletions vertical-pod-autoscaler/e2e/v1/admission_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,44 @@ var _ = AdmissionControllerE2eDescribe("Admission-controller", func() {
}
})

ginkgo.It("keeps limits unchanged when limit scaling is disabled", func() {
d := NewHamsterDeploymentWithResourcesAndLimits(f,
ParseQuantityOrDie("100m") /*cpu request*/, ParseQuantityOrDie("100Mi"), /*memory request*/
ParseQuantityOrDie("500m") /*cpu limit*/, ParseQuantityOrDie("500Mi") /*memory limit*/)

ginkgo.By("Setting up a VPA CRD")
vpaCRD := NewVPA(f, "hamster-vpa", hamsterTargetRef)
vpaCRD.Status.Recommendation = &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{{
ContainerName: "hamster",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: ParseQuantityOrDie("250m"),
apiv1.ResourceMemory: ParseQuantityOrDie("200Mi"),
},
}},
}
containerLimitScalingOff := vpa_types.ContainerLimitScalingModeAuto
vpaCRD.Spec.ResourcePolicy = &vpa_types.PodResourcePolicy{
ContainerPolicies: []vpa_types.ContainerResourcePolicy{{
ContainerName: "hamster",
LimitMode: &containerLimitScalingOff,
}},
}
InstallVPA(f, vpaCRD)

ginkgo.By("Setting up a hamster deployment")
podList := startDeploymentPods(f, d)

// Originally Pods had 100m CPU, 100Mi of memory, but admission controller
// should change it to 250m CPU and 200Mi of memory. Limits should stay unchanged.
for _, pod := range podList.Items {
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Cpu()).To(gomega.Equal(ParseQuantityOrDie("250m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Requests.Memory()).To(gomega.Equal(ParseQuantityOrDie("200Mi")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Cpu()).To(gomega.Equal(ParseQuantityOrDie("500m")))
gomega.Expect(*pod.Spec.Containers[0].Resources.Limits.Memory()).To(gomega.Equal(ParseQuantityOrDie("500Mi")))
}
})

ginkgo.It("caps request according to container max limit set in LimitRange", func() {
startCpuRequest := ParseQuantityOrDie("100m")
startCpuLimit := ParseQuantityOrDie("150m")
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ func NewProvider(calculator limitrange.LimitRangeCalculator,
}

// GetContainersResources returns the recommended resources for each container in the given pod in the same order they are specified in the pod.Spec.
func GetContainersResources(pod *core.Pod, podRecommendation vpa_types.RecommendedPodResources, limitRange *core.LimitRangeItem,
func GetContainersResources(pod *core.Pod, vpaResourcePolicy *vpa_types.PodResourcePolicy, podRecommendation vpa_types.RecommendedPodResources, limitRange *core.LimitRangeItem,
annotations vpa_api_util.ContainerToAnnotationsMap) []vpa_api_util.ContainerResources {
resources := make([]vpa_api_util.ContainerResources, len(pod.Spec.Containers))
for i, container := range pod.Spec.Containers {
Expand All @@ -60,17 +60,29 @@ func GetContainersResources(pod *core.Pod, podRecommendation vpa_types.Recommend
if limitRange != nil {
defaultLimit = limitRange.Default
}
proportionalLimits, limitAnnotations := vpa_api_util.GetProportionalLimit(container.Resources.Limits, container.Resources.Requests, recommendation.Target, defaultLimit)
if proportionalLimits != nil {
resources[i].Limits = proportionalLimits
if len(limitAnnotations) > 0 {
annotations[container.Name] = append(annotations[container.Name], limitAnnotations...)
containerLimitPolicy := GetContainerLimitPolicy(container.Name, vpaResourcePolicy)
if containerLimitPolicy == vpa_types.ContainerLimitScalingModeAuto {
proportionalLimits, limitAnnotations := vpa_api_util.GetProportionalLimit(container.Resources.Limits, container.Resources.Requests, recommendation.Target, defaultLimit)
if proportionalLimits != nil {
resources[i].Limits = proportionalLimits
if len(limitAnnotations) > 0 {
annotations[container.Name] = append(annotations[container.Name], limitAnnotations...)
}
}
}
}
return resources
}

// GetContainerLimitPolicy returns container limit scaling policy by container name
func GetContainerLimitPolicy(name string, vpaResourcePolicy *vpa_types.PodResourcePolicy) vpa_types.ContainerLimitScalingMode {
containerPolicy := vpa_api_util.GetContainerResourcePolicy(name, vpaResourcePolicy)
if containerPolicy == nil || containerPolicy.LimitMode == nil {
return vpa_types.ContainerLimitScalingModeAuto
}
return *containerPolicy.LimitMode
}

// GetContainersResourcesForPod returns recommended request for a given pod and associated annotations.
// The returned slice corresponds 1-1 to containers in the Pod.
func (p *recommendationProvider) GetContainersResourcesForPod(pod *core.Pod, vpa *vpa_types.VerticalPodAutoscaler) ([]vpa_api_util.ContainerResources, vpa_api_util.ContainerToAnnotationsMap, error) {
Expand All @@ -95,6 +107,10 @@ func (p *recommendationProvider) GetContainersResourcesForPod(pod *core.Pod, vpa
if err != nil {
return nil, nil, fmt.Errorf("error getting containerLimitRange: %s", err)
}
containerResources := GetContainersResources(pod, *recommendedPodResources, containerLimitRange, annotations)
var resourcePolicy *vpa_types.PodResourcePolicy
if vpa.Spec.UpdatePolicy == nil || vpa.Spec.UpdatePolicy.UpdateMode == nil || *vpa.Spec.UpdatePolicy.UpdateMode != vpa_types.UpdateModeOff {
resourcePolicy = vpa.Spec.ResourcePolicy
}
containerResources := GetContainersResources(pod, resourcePolicy, *recommendedPodResources, containerLimitRange, annotations)
return containerResources, annotations, nil
}
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,8 @@ func TestUpdateResourceRequests(t *testing.T) {
vpaWithHighMemory := vpaBuilder.WithTarget("2", "1000Mi").WithMaxAllowed("3", "3Gi").Get()
vpaWithExabyteRecommendation := vpaBuilder.WithTarget("1Ei", "1Ei").WithMaxAllowed("1Ei", "1Ei").Get()

limitScalingOffVPA := vpaBuilder.WithLimitMode(vpa_types.ContainerLimitScalingModeOff).Get()

vpaWithEmptyRecommendation := vpaBuilder.Get()
vpaWithEmptyRecommendation.Status.Recommendation = &vpa_types.RecommendedPodResources{}
vpaWithNilRecommendation := vpaBuilder.Get()
Expand Down Expand Up @@ -210,6 +212,14 @@ func TestUpdateResourceRequests(t *testing.T) {
expectedCPULimit: mustParseResourcePointer("4"),
expectedMemLimit: mustParseResourcePointer("400Mi"),
},
{
name: "disabled limit scaling",
pod: podWithDoubleLimit,
vpa: limitScalingOffVPA,
expectedAction: true,
expectedCPU: resource.MustParse("2"),
expectedMem: resource.MustParse("200Mi"),
},
{
name: "limit over int64",
pod: podWithTenfoldLimit,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,12 @@ func validateVPA(vpa *vpa_types.VerticalPodAutoscaler, isCreate bool) error {
return fmt.Errorf("max resource for %v is lower than min", resource)
}
}
limitMode := policy.LimitMode
if mode != nil && limitMode != nil {
if *mode == vpa_types.ContainerScalingModeOff && *limitMode == vpa_types.ContainerLimitScalingModeAuto {
return fmt.Errorf("Mode can not be Off when LimitScaling is Auto")
}
}
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,8 @@ func TestValidateVPA(t *testing.T) {
validUpdateMode := vpa_types.UpdateModeOff
badScalingMode := vpa_types.ContainerScalingMode("bad")
validScalingMode := vpa_types.ContainerScalingModeAuto
scalingModeOff := vpa_types.ContainerScalingModeOff
autoLimitScalingMode := vpa_types.ContainerLimitScalingModeAuto
tests := []struct {
name string
vpa vpa_types.VerticalPodAutoscaler
Expand Down Expand Up @@ -120,6 +122,23 @@ func TestValidateVPA(t *testing.T) {
},
expectError: fmt.Errorf("max resource for cpu is lower than min"),
},
{
name: "scaling off with limit scaling auto",
vpa: vpa_types.VerticalPodAutoscaler{
Spec: vpa_types.VerticalPodAutoscalerSpec{
ResourcePolicy: &vpa_types.PodResourcePolicy{
ContainerPolicies: []vpa_types.ContainerResourcePolicy{
{
ContainerName: "loot box",
Mode: &scalingModeOff,
LimitMode: &autoLimitScalingMode,
},
},
},
},
},
expectError: fmt.Errorf("Mode can not be Off when LimitScaling is Auto"),
},
{
name: "all valid",
vpa: vpa_types.VerticalPodAutoscaler{
Expand Down
17 changes: 17 additions & 0 deletions vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,11 @@ type ContainerResourcePolicy struct {
// (and possibly applied) by VPA.
// If not specified, the default of [ResourceCPU, ResourceMemory] will be used.
ControlledResources *[]v1.ResourceName `json:"controlledResources,omitempty" patchStrategy:"merge" protobuf:"bytes,5,rep,name=controlledResources"`

// Whether autoscaler limit scaling is enabled for the container. The default is "Auto".
// Enabling this requires the autoscaler to be enabled for the container
// +optional
LimitMode *ContainerLimitScalingMode `json:"limitMode,omitempty" protobuf:"bytes,6,rep,name=limitMode"`
}

const (
Expand All @@ -171,6 +176,18 @@ const (
ContainerScalingModeOff ContainerScalingMode = "Off"
)

// ContainerLimitScalingMode controls whether autoscaler limit scaling
// is enabled for a specific container.
type ContainerLimitScalingMode string

const (
// ContainerLimitScalingModeAuto means limits are scaled automatically.
// Limit is scaled proportionally to the request.
ContainerLimitScalingModeAuto ContainerLimitScalingMode = "Auto"
// ContainerLimitScalingModeOff means limit scaling is disabled for a container.
ContainerLimitScalingModeOff ContainerLimitScalingMode = "Off"
)

// VerticalPodAutoscalerStatus describes the runtime state of the autoscaler.
type VerticalPodAutoscalerStatus struct {
// The most recently computed amount of resources recommended by the
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

9 changes: 9 additions & 0 deletions vertical-pod-autoscaler/pkg/utils/test/test_vpa.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ type VerticalPodAutoscalerBuilder interface {
WithCreationTimestamp(timestamp time.Time) VerticalPodAutoscalerBuilder
WithMinAllowed(cpu, memory string) VerticalPodAutoscalerBuilder
WithMaxAllowed(cpu, memory string) VerticalPodAutoscalerBuilder
WithLimitMode(mode vpa_types.ContainerLimitScalingMode) VerticalPodAutoscalerBuilder
WithTarget(cpu, memory string) VerticalPodAutoscalerBuilder
WithLowerBound(cpu, memory string) VerticalPodAutoscalerBuilder
WithTargetRef(targetRef *autoscaling.CrossVersionObjectReference) VerticalPodAutoscalerBuilder
Expand Down Expand Up @@ -63,6 +64,7 @@ type verticalPodAutoscalerBuilder struct {
creationTimestamp time.Time
minAllowed core.ResourceList
maxAllowed core.ResourceList
limitMode *vpa_types.ContainerLimitScalingMode
recommendation RecommendationBuilder
conditions []vpa_types.VerticalPodAutoscalerCondition
annotations map[string]string
Expand Down Expand Up @@ -115,6 +117,12 @@ func (b *verticalPodAutoscalerBuilder) WithMaxAllowed(cpu, memory string) Vertic
return &c
}

func (b *verticalPodAutoscalerBuilder) WithLimitMode(mode vpa_types.ContainerLimitScalingMode) VerticalPodAutoscalerBuilder {
c := *b
c.limitMode = &mode
return &c
}

func (b *verticalPodAutoscalerBuilder) WithTarget(cpu, memory string) VerticalPodAutoscalerBuilder {
c := *b
c.recommendation = c.recommendation.WithTarget(cpu, memory)
Expand Down Expand Up @@ -171,6 +179,7 @@ func (b *verticalPodAutoscalerBuilder) Get() *vpa_types.VerticalPodAutoscaler {
ContainerName: b.containerName,
MinAllowed: b.minAllowed,
MaxAllowed: b.maxAllowed,
LimitMode: b.limitMode,
}}}

recommendation := b.recommendation.WithContainer(b.containerName).Get()
Expand Down

0 comments on commit 3e664f1

Please sign in to comment.