Skip to content

Commit

Permalink
e2e/autoscaler: consume MaxNodesTotalReached event
Browse files Browse the repository at this point in the history
The previous behaviour was to scale out by waiting for the expected
scale up events to be generated on the node groups. After that had
completed we then waited for a further 60s to ensure that there were
no further scale up attempts as we had explicitly capped the cluster
size.

This commit changes the behaviour from waiting for 60s to explicitly
waiting for the MaxNodesTotalReached event to be generated. We use
that event to assert that the cluster will not grow beyond the
specified maximum.

This PR also helps to validate that we carry two upstream fixes that
were made with respect to calculating when the maximum size of the
cluster is reached:
- kubernetes/autoscaler#1516
- kubernetes/autoscaler#1771
  • Loading branch information
frobware committed Jun 14, 2019
1 parent 0796360 commit 6324af3
Showing 1 changed file with 22 additions and 4 deletions.
26 changes: 22 additions & 4 deletions pkg/e2e/autoscaler/autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,19 @@ func newScaleDownCounter(w *eventWatcher, v uint32) *eventCounter {
return c
}

func newMaxNodesTotalReachedCounter(w *eventWatcher, v uint32) *eventCounter {
isAutoscalerMaxNodesTotalEvent := func(event *corev1.Event) bool {
return event.Source.Component == clusterAutoscalerComponent &&
event.Reason == clusterAutoscalerMaxNodesTotalReached &&
event.InvolvedObject.Kind == clusterAutoscalerObjectKind &&
strings.HasPrefix(event.Message, "Max total nodes in cluster reached")
}

c := newEventCounter(w, isAutoscalerMaxNodesTotalEvent, v, increment)
c.enable()
return c
}

func remaining(t time.Time) time.Duration {
return t.Sub(time.Now()).Round(time.Second)
}
Expand Down Expand Up @@ -268,6 +281,7 @@ var _ = g.Describe("[Feature:Machines][Serial] Autoscaler should", func() {
scaledGroups[path.Join(machineSets[i].Namespace, machineSets[i].Name)] = false
}
scaleUpCounter := newScaleUpCounter(eventWatcher, 0, scaledGroups)
maxNodesTotalReachedCounter := newMaxNodesTotalReachedCounter(eventWatcher, 0)
workload := newWorkLoad()
o.Expect(client.Create(context.TODO(), workload)).Should(o.Succeed())
cleanupObjects = append(cleanupObjects, runtime.Object(workload))
Expand All @@ -286,10 +300,14 @@ var _ = g.Describe("[Feature:Machines][Serial] Autoscaler should", func() {
// clusterExpansionSize -1). We run for a period of
// time asserting that the cluster does not exceed the
// capped size.
//
// TODO(frobware): switch to matching on
// MaxNodesTotalReached when that is available in the
// cluster-autoscaler image.
testDuration = time.Now().Add(time.Duration(e2e.WaitShort))
o.Eventually(func() uint32 {
v := maxNodesTotalReachedCounter.get()
glog.Infof("[%s remaining] Waiting for %s to generate a %q event; observed %v",
remaining(testDuration), clusterAutoscalerComponent, clusterAutoscalerMaxNodesTotalReached, v)
return v
}, e2e.WaitShort, 3*time.Second).Should(o.BeNumerically(">=", 1))

testDuration = time.Now().Add(time.Duration(e2e.WaitShort))
o.Consistently(func() bool {
v := scaleUpCounter.get()
Expand Down

0 comments on commit 6324af3

Please sign in to comment.