Skip to content

Commit

Permalink
Get capi targetsize from cache
Browse files Browse the repository at this point in the history
This ensured that access to replicas during scale down operations were never stale by accessing the API server kubernetes#3104.
This honoured that behaviour while moving to unstructured client kubernetes#3312.
This regressed that behaviour while trying to reduce the API server load kubernetes#4443.
This put back the never stale replicas behaviour at the cost of loading back the API server kubernetes#4634.

Currently on e.g a 48 minutes cluster it does 1.4k get request to the scale subresource.
This PR tries to satisfy both non stale replicas during scale down and prevent the API server from being overloaded. To achieve that it lets targetSize which is called on every autoscaling cluster state loop from come from cache.

Also note that the scale down implementation has changed https://github.com/kubernetes/autoscaler/commits/master/cluster-autoscaler/core/scaledown.
  • Loading branch information
enxebre committed Jul 13, 2022
1 parent b972efd commit f8a54b9
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package clusterapi

import (
"fmt"
"github.com/pkg/errors"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
Expand Down Expand Up @@ -52,7 +53,14 @@ func (ng *nodegroup) MaxSize() int {
// (new nodes finish startup and registration or removed nodes are
// deleted completely). Implementation required.
func (ng *nodegroup) TargetSize() (int, error) {
return ng.scalableResource.Replicas()
replicas, found, err := unstructured.NestedInt64(ng.scalableResource.unstructured.Object, "spec", "replicas")
if err != nil {
return 0, errors.Wrap(err, "error getting replica count")
}
if !found {
return 0, fmt.Errorf("unable to find replicas")
}
return int(replicas), nil
}

// IncreaseSize increases the size of the node group. To delete a node
Expand Down Expand Up @@ -163,7 +171,7 @@ func (ng *nodegroup) DecreaseTargetSize(delta int) error {
return fmt.Errorf("size decrease must be negative")
}

size, err := ng.TargetSize()
size, err := ng.scalableResource.Replicas()
if err != nil {
return err
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1072,7 +1072,7 @@ func TestNodeGroupDeleteNodesSequential(t *testing.T) {
ng = nodegroups[0]

// Check the nodegroup is at the expected size
actualSize, err := ng.TargetSize()
actualSize, err := ng.scalableResource.Replicas()
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,17 @@ func TestSetSize(t *testing.T) {
if s.Spec.Replicas != int32(updatedReplicas) {
t.Errorf("expected %v, got: %v", updatedReplicas, s.Spec.Replicas)
}

replicas, found, err := unstructured.NestedInt64(sr.unstructured.Object, "spec", "replicas")
if err != nil {
t.Fatal(err)
}
if !found {
t.Fatal("replicas = 0")
}
if replicas != int64(updatedReplicas) {
t.Errorf("expected %v, got: %v", updatedReplicas, replicas)
}
}

t.Run("MachineSet", func(t *testing.T) {
Expand Down

0 comments on commit f8a54b9

Please sign in to comment.