diff --git a/pkg/gameservers/controller_test.go b/pkg/gameservers/controller_test.go index f25c9d1642..d87192a741 100644 --- a/pkg/gameservers/controller_test.go +++ b/pkg/gameservers/controller_test.go @@ -22,6 +22,7 @@ import ( "net/http" "strconv" "testing" + "time" "agones.dev/agones/pkg/apis/agones" agonesv1 "agones.dev/agones/pkg/apis/agones/v1" @@ -37,6 +38,7 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" @@ -89,7 +91,11 @@ func TestControllerSyncGameServer(t *testing.T) { assert.Equal(t, fixture.ObjectMeta.Name, pod.ObjectMeta.Name) watchPods.Add(pod) // wait for the change to propagate - assert.True(t, cache.WaitForCacheSync(context.Background().Done(), mocks.KubeInformerFactory.Core().V1().Pods().Informer().HasSynced)) + require.Eventually(t, func() bool { + list, err := c.podLister.List(labels.Everything()) + assert.NoError(t, err) + return len(list) == 1 + }, 5*time.Second, time.Second) return true, pod, nil }) mocks.AgonesClient.AddReactor("list", "gameservers", func(action k8stesting.Action) (bool, runtime.Object, error) { @@ -1452,30 +1458,43 @@ func TestControllerGameServerPod(t *testing.T) { } t.Run("no pod exists", func(t *testing.T) { - c, gs, _, stop, cancel := setup() + c, gs, _, _, cancel := setup() defer cancel() - cache.WaitForCacheSync(stop, c.gameServerSynced) + require.Never(t, func() bool { + list, err := c.podLister.List(labels.Everything()) + assert.NoError(t, err) + return len(list) > 0 + }, time.Second, 100*time.Millisecond) _, err := c.gameServerPod(gs) assert.Error(t, err) assert.True(t, k8serrors.IsNotFound(err)) }) t.Run("a pod exists", func(t *testing.T) { - c, gs, fakeWatch, stop, cancel := setup() + c, gs, fakeWatch, _, cancel := setup() defer cancel() pod, err := gs.Pod() require.NoError(t, err) fakeWatch.Add(pod.DeepCopy()) - cache.WaitForCacheSync(stop, c.gameServerSynced) + require.Eventually(t, func() bool { + list, err := c.podLister.List(labels.Everything()) + assert.NoError(t, err) + return len(list) == 1 + }, 5*time.Second, time.Second) + pod2, err := c.gameServerPod(gs) require.NoError(t, err) assert.Equal(t, pod, pod2) fakeWatch.Delete(pod.DeepCopy()) - cache.WaitForCacheSync(stop, c.gameServerSynced) + require.Eventually(t, func() bool { + list, err := c.podLister.List(labels.Everything()) + assert.NoError(t, err) + return len(list) == 0 + }, 5*time.Second, time.Second) _, err = c.gameServerPod(gs) assert.Error(t, err) assert.True(t, k8serrors.IsNotFound(err)) diff --git a/pkg/gameservers/pernodecounter_test.go b/pkg/gameservers/pernodecounter_test.go index a29a295a05..e8578961eb 100644 --- a/pkg/gameservers/pernodecounter_test.go +++ b/pkg/gameservers/pernodecounter_test.go @@ -16,6 +16,7 @@ package gameservers import ( "testing" + "time" agonesv1 "agones.dev/agones/pkg/apis/agones/v1" agtesting "agones.dev/agones/pkg/testing" @@ -26,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" k8stesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" ) const ( @@ -43,8 +43,7 @@ func TestPerNodeCounterGameServerEvents(t *testing.T) { fakeWatch := watch.NewFake() m.AgonesClient.AddWatchReactor("gameservers", k8stesting.DefaultWatchReactor(fakeWatch, nil)) - hasSynced := m.AgonesInformerFactory.Agones().V1().GameServers().Informer().HasSynced - stop, cancel := agtesting.StartInformers(m) + _, cancel := agtesting.StartInformers(m) defer cancel() assert.Empty(t, pnc.Counts()) @@ -57,46 +56,47 @@ func TestPerNodeCounterGameServerEvents(t *testing.T) { } fakeWatch.Add(gs.DeepCopy()) - cache.WaitForCacheSync(stop, hasSynced) - - assert.Empty(t, pnc.Counts()) + require.Eventuallyf(t, func() bool { + return len(pnc.Counts()) == 0 + }, 5*time.Second, time.Second, "Should be empty, instead has %v elements", len(pnc.Counts())) gs.Status.State = agonesv1.GameServerStateReady fakeWatch.Add(gs.DeepCopy()) - cache.WaitForCacheSync(stop, hasSynced) - counts := pnc.Counts() - require.Len(t, counts, 1) + var counts map[string]NodeCount + require.Eventuallyf(t, func() bool { + counts = pnc.Counts() + return len(counts) == 1 + }, 5*time.Second, time.Second, "len should be 1, instead: %v", len(counts)) assert.Equal(t, int64(1), counts[name1].Ready) assert.Equal(t, int64(0), counts[name1].Allocated) gs.Status.State = agonesv1.GameServerStateAllocated fakeWatch.Add(gs.DeepCopy()) - cache.WaitForCacheSync(stop, hasSynced) - counts = pnc.Counts() - require.Len(t, counts, 1) - assert.Equal(t, int64(0), counts[name1].Ready) + require.Eventuallyf(t, func() bool { + counts = pnc.Counts() + return len(counts) == 1 && int64(0) == counts[name1].Ready + }, 5*time.Second, time.Second, "Ready should be 0, but is instead", counts[name1].Ready) assert.Equal(t, int64(1), counts[name1].Allocated) gs.Status.State = agonesv1.GameServerStateShutdown fakeWatch.Add(gs.DeepCopy()) - cache.WaitForCacheSync(stop, hasSynced) - - counts = pnc.Counts() - require.Len(t, counts, 1) + require.Eventuallyf(t, func() bool { + counts = pnc.Counts() + return len(counts) == 1 && int64(0) == counts[name1].Allocated + }, 5*time.Second, time.Second, "Allocated should be 0, but is instead", counts[name1].Allocated) assert.Equal(t, int64(0), counts[name1].Ready) - assert.Equal(t, int64(0), counts[name1].Allocated) gs.ObjectMeta.Name = "gs2" gs.Status.State = agonesv1.GameServerStateReady gs.Status.NodeName = name2 fakeWatch.Add(gs.DeepCopy()) - cache.WaitForCacheSync(stop, hasSynced) - - counts = pnc.Counts() - require.Len(t, counts, 2) + require.Eventuallyf(t, func() bool { + counts = pnc.Counts() + return len(counts) == 2 + }, 5*time.Second, time.Second, "len should be 2, instead: %v", len(counts)) assert.Equal(t, int64(0), counts[name1].Ready) assert.Equal(t, int64(0), counts[name1].Allocated) assert.Equal(t, int64(1), counts[name2].Ready) @@ -108,14 +108,13 @@ func TestPerNodeCounterGameServerEvents(t *testing.T) { gs.Status.NodeName = name2 fakeWatch.Add(gs.DeepCopy()) - cache.WaitForCacheSync(stop, hasSynced) - - counts = pnc.Counts() - require.Len(t, counts, 2) + require.Eventuallyf(t, func() bool { + counts = pnc.Counts() + return len(counts) == 2 && int64(1) == counts[name2].Allocated + }, 5*time.Second, time.Second, "Allocated should be 1, but is instead", counts[name2].Allocated) assert.Equal(t, int64(0), counts[name1].Ready) assert.Equal(t, int64(0), counts[name1].Allocated) assert.Equal(t, int64(1), counts[name2].Ready) - assert.Equal(t, int64(1), counts[name2].Allocated) } func TestPerNodeCounterNodeEvents(t *testing.T) { @@ -128,13 +127,10 @@ func TestPerNodeCounterNodeEvents(t *testing.T) { m.AgonesClient.AddWatchReactor("gameservers", k8stesting.DefaultWatchReactor(gsWatch, nil)) m.KubeClient.AddWatchReactor("nodes", k8stesting.DefaultWatchReactor(nodeWatch, nil)) - gsSynced := m.AgonesInformerFactory.Agones().V1().GameServers().Informer().HasSynced - nodeSynced := m.KubeInformerFactory.Core().V1().Nodes().Informer().HasSynced - - stop, cancel := agtesting.StartInformers(m) + _, cancel := agtesting.StartInformers(m) defer cancel() - assert.Empty(t, pnc.Counts()) + require.Empty(t, pnc.Counts()) gs := &agonesv1.GameServer{ ObjectMeta: metav1.ObjectMeta{Name: "gs1", Namespace: defaultNs}, @@ -144,12 +140,14 @@ func TestPerNodeCounterNodeEvents(t *testing.T) { gsWatch.Add(gs.DeepCopy()) nodeWatch.Add(node.DeepCopy()) - cache.WaitForCacheSync(stop, gsSynced, nodeSynced) - assert.Len(t, pnc.Counts(), 1) + require.Eventuallyf(t, func() bool { + return len(pnc.Counts()) == 1 + }, 5*time.Second, time.Second, "Should be 1 element, not %v", len(pnc.Counts())) nodeWatch.Delete(node.DeepCopy()) - cache.WaitForCacheSync(stop, nodeSynced) - assert.Empty(t, pnc.Counts()) + require.Eventually(t, func() bool { + return len(pnc.Counts()) == 0 + }, 5*time.Second, time.Second, "pnc.Counts() should be empty, but is instead has %v element", len(pnc.Counts())) } func TestPerNodeCounterRun(t *testing.T) { diff --git a/pkg/gameservers/portallocator_test.go b/pkg/gameservers/portallocator_test.go index 1ea3cec8ab..3b44ce18d7 100644 --- a/pkg/gameservers/portallocator_test.go +++ b/pkg/gameservers/portallocator_test.go @@ -19,6 +19,7 @@ import ( "strconv" "sync" "testing" + "time" agonesv1 "agones.dev/agones/pkg/apis/agones/v1" agtesting "agones.dev/agones/pkg/testing" @@ -27,6 +28,7 @@ import ( "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/watch" @@ -445,14 +447,17 @@ func TestPortAllocatorSyncDeleteGameServer(t *testing.T) { return true, nl, nil }) - stop, cancel := agtesting.StartInformers(m, pa.gameServerSynced, pa.nodeSynced) + _, cancel := agtesting.StartInformers(m, pa.gameServerSynced, pa.nodeSynced) defer cancel() gsWatch.Add(gs1.DeepCopy()) gsWatch.Add(gs2.DeepCopy()) gsWatch.Add(gs3.DeepCopy()) - - assert.True(t, cache.WaitForCacheSync(stop, pa.gameServerSynced)) + require.Eventually(t, func() bool { + list, err := pa.gameServerLister.GameServers(gs1.ObjectMeta.Namespace).List(labels.Everything()) + assert.NoError(t, err) + return len(list) == 3 + }, 5*time.Second, time.Second) err := pa.syncAll() require.NoError(t, err) @@ -465,7 +470,11 @@ func TestPortAllocatorSyncDeleteGameServer(t *testing.T) { // delete allocated gs gsWatch.Delete(gs3.DeepCopy()) - assert.True(t, cache.WaitForCacheSync(stop, pa.gameServerSynced)) + require.Eventually(t, func() bool { + list, err := pa.gameServerLister.GameServers(gs1.ObjectMeta.Namespace).List(labels.Everything()) + assert.NoError(t, err) + return len(list) == 2 + }, 5*time.Second, time.Second) pa.mutex.RLock() // reading mutable state, so read lock assert.Equal(t, 1, countAllocatedPorts(pa, 10)) @@ -475,7 +484,11 @@ func TestPortAllocatorSyncDeleteGameServer(t *testing.T) { // delete the currently non allocated server, all should be the same // simulated getting an old delete message gsWatch.Delete(gs4.DeepCopy()) - assert.True(t, cache.WaitForCacheSync(stop, pa.gameServerSynced)) + require.Never(t, func() bool { + list, err := pa.gameServerLister.GameServers(gs1.ObjectMeta.Namespace).List(labels.Everything()) + assert.NoError(t, err) + return len(list) != 2 + }, time.Second, 100*time.Millisecond) pa.mutex.RLock() // reading mutable state, so read lock assert.Equal(t, 1, countAllocatedPorts(pa, 10)) assert.Equal(t, 1, countAllocatedPorts(pa, 11)) diff --git a/pkg/metrics/controller_test.go b/pkg/metrics/controller_test.go index b1bee6c527..82577c9ddd 100644 --- a/pkg/metrics/controller_test.go +++ b/pkg/metrics/controller_test.go @@ -24,13 +24,17 @@ import ( agtesting "agones.dev/agones/pkg/testing" "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricexport" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" ) +const defaultNs = "default" + type metricExporter struct { metrics []*metricdata.Metric } @@ -65,7 +69,7 @@ func assertMetricData(t *testing.T, exporter *metricExporter, metricName string, assert.NotNil(t, wantedMetric, "No metric found with name: %s", metricName) assert.Equal(t, len(expectedValuesAsMap), len(expected), "Multiple entries in 'expected' slice have the exact same labels") - assert.Equal(t, len(wantedMetric.TimeSeries), len(expectedValuesAsMap), "number of timeseries does not match") + assert.Equalf(t, len(expectedValuesAsMap), len(wantedMetric.TimeSeries), "number of timeseries does not match under metric: %v", metricName) for _, tsd := range wantedMetric.TimeSeries { actualLabelValues := make([]string, len(tsd.LabelValues)) for i, k := range tsd.LabelValues { @@ -73,9 +77,9 @@ func assertMetricData(t *testing.T, exporter *metricExporter, metricName string, } e, ok := expectedValuesAsMap[serialize(actualLabelValues)] assert.True(t, ok, "no TimeSeries found with labels: %v", actualLabelValues) - assert.Equal(t, actualLabelValues, e.labels, "label values don't match") - assert.Equal(t, len(tsd.Points), 1, "assertMetricDataValues can only handle a single Point in a TimeSeries") - assert.Equal(t, tsd.Points[0].Value, e.val, "metric: %s, tags: %v, values don't match; got: %v, want: %v", metricName, tsd.LabelValues, tsd.Points[0].Value, e.val) + assert.Equal(t, e.labels, actualLabelValues, "label values don't match") + assert.Equal(t, 1, len(tsd.Points), "assertMetricDataValues can only handle a single Point in a TimeSeries") + assert.Equal(t, e.val, tsd.Points[0].Value, "metric: %s, tags: %v, values don't match; got: %v, want: %v", metricName, tsd.LabelValues, tsd.Points[0].Value, e.val) } } @@ -99,7 +103,12 @@ func TestControllerGameServerCount(t *testing.T) { c.gsWatch.Modify(gs1) c.run(t) - c.sync() + require.True(t, c.sync()) + require.Eventually(t, func() bool { + gs, err := c.gameServerLister.GameServers(gs1.ObjectMeta.Namespace).Get(gs1.ObjectMeta.Name) + assert.NoError(t, err) + return gs.Status.State == agonesv1.GameServerStateReady + }, 5*time.Second, time.Second) c.collect() gs1 = gs1.DeepCopy() @@ -109,13 +118,31 @@ func TestControllerGameServerCount(t *testing.T) { c.gsWatch.Add(gameServerWithFleetAndState("", agonesv1.GameServerStatePortAllocation)) c.run(t) - c.sync() + require.True(t, c.sync()) + // Port allocation is last, so wait for that come to the state we expect + require.Eventually(t, func() bool { + ex := &metricExporter{} + reader.ReadAndExport(ex) + + for _, m := range ex.metrics { + if m.Descriptor.Name == "gameservers_count" { + for _, d := range m.TimeSeries { + if d.LabelValues[0].Value == "none" && d.LabelValues[1].Value == defaultNs && d.LabelValues[2].Value == "PortAllocation" { + return d.Points[0].Value == int64(2) + } + } + } + } + + return false + }, 5*time.Second, time.Second) c.collect() + reader.ReadAndExport(exporter) assertMetricData(t, exporter, "gameservers_count", []expectedMetricData{ - {labels: []string{"test-fleet", "default", "Ready"}, val: int64(0)}, - {labels: []string{"test-fleet", "default", "Shutdown"}, val: int64(1)}, - {labels: []string{"none", "default", "PortAllocation"}, val: int64(2)}, + {labels: []string{"test-fleet", defaultNs, "Ready"}, val: int64(0)}, + {labels: []string{"test-fleet", defaultNs, "Shutdown"}, val: int64(1)}, + {labels: []string{"none", defaultNs, "PortAllocation"}, val: int64(2)}, }) } @@ -141,22 +168,31 @@ func TestControllerGameServersTotal(t *testing.T) { generateGsEvents(16, agonesv1.GameServerStateStarting, "", c.gsWatch) generateGsEvents(1, agonesv1.GameServerStateUnhealthy, "", c.gsWatch) - c.sync() + expected := 96 + assert.Eventually(t, func() bool { + list, err := c.gameServerLister.GameServers(gs.ObjectMeta.Namespace).List(labels.Everything()) + require.NoError(t, err) + return len(list) == expected + }, 5*time.Second, time.Second) + // write a good message if something goes wrong + list, err := c.gameServerLister.GameServers(gs.ObjectMeta.Namespace).List(labels.Everything()) + require.NoError(t, err) + require.Len(t, list, expected) + reader.ReadAndExport(exporter) assertMetricData(t, exporter, "gameservers_total", []expectedMetricData{ - {labels: []string{"test", "default", "Creating"}, val: int64(16)}, - {labels: []string{"test", "default", "Scheduled"}, val: int64(15)}, - {labels: []string{"test", "default", "Starting"}, val: int64(10)}, - {labels: []string{"test", "default", "Unhealthy"}, val: int64(1)}, - {labels: []string{"none", "default", "Creating"}, val: int64(19)}, - {labels: []string{"none", "default", "Scheduled"}, val: int64(18)}, - {labels: []string{"none", "default", "Starting"}, val: int64(16)}, - {labels: []string{"none", "default", "Unhealthy"}, val: int64(1)}, + {labels: []string{"test", defaultNs, "Creating"}, val: int64(16)}, + {labels: []string{"test", defaultNs, "Scheduled"}, val: int64(15)}, + {labels: []string{"test", defaultNs, "Starting"}, val: int64(10)}, + {labels: []string{"test", defaultNs, "Unhealthy"}, val: int64(1)}, + {labels: []string{"none", defaultNs, "Creating"}, val: int64(19)}, + {labels: []string{"none", defaultNs, "Scheduled"}, val: int64(18)}, + {labels: []string{"none", defaultNs, "Starting"}, val: int64(16)}, + {labels: []string{"none", defaultNs, "Unhealthy"}, val: int64(1)}, }) } func TestControllerFleetReplicasCount(t *testing.T) { - resetMetrics() exporter := &metricExporter{} reader := metricexport.NewReader() @@ -174,18 +210,35 @@ func TestControllerFleetReplicasCount(t *testing.T) { c.fleetWatch.Add(fd) c.fleetWatch.Delete(fd) - c.sync() + // wait until we have a fleet deleted and it's allocation count is 0 + // since that is our last operation + require.Eventually(t, func() bool { + ex := &metricExporter{} + reader.ReadAndExport(ex) + + for _, m := range ex.metrics { + if m.Descriptor.Name == "fleets_replicas_count" { + for _, d := range m.TimeSeries { + if d.LabelValues[0].Value == "fleet-deleted" && d.LabelValues[1].Value == defaultNs && d.LabelValues[2].Value == "total" { + return d.Points[0].Value == int64(0) + } + } + } + } + + return false + }, 5*time.Second, time.Second) reader.ReadAndExport(exporter) assertMetricData(t, exporter, "fleets_replicas_count", []expectedMetricData{ - {labels: []string{"fleet-deleted", "default", "allocated"}, val: int64(0)}, - {labels: []string{"fleet-deleted", "default", "desired"}, val: int64(0)}, - {labels: []string{"fleet-deleted", "default", "ready"}, val: int64(0)}, - {labels: []string{"fleet-deleted", "default", "total"}, val: int64(0)}, - {labels: []string{"fleet-test", "default", "allocated"}, val: int64(2)}, - {labels: []string{"fleet-test", "default", "desired"}, val: int64(5)}, - {labels: []string{"fleet-test", "default", "ready"}, val: int64(1)}, - {labels: []string{"fleet-test", "default", "total"}, val: int64(8)}, + {labels: []string{"fleet-deleted", defaultNs, "allocated"}, val: int64(0)}, + {labels: []string{"fleet-deleted", defaultNs, "desired"}, val: int64(0)}, + {labels: []string{"fleet-deleted", defaultNs, "ready"}, val: int64(0)}, + {labels: []string{"fleet-deleted", defaultNs, "total"}, val: int64(0)}, + {labels: []string{"fleet-test", defaultNs, "allocated"}, val: int64(2)}, + {labels: []string{"fleet-test", defaultNs, "desired"}, val: int64(5)}, + {labels: []string{"fleet-test", defaultNs, "ready"}, val: int64(1)}, + {labels: []string{"fleet-test", defaultNs, "total"}, val: int64(8)}, }) } @@ -220,40 +273,58 @@ func TestControllerFleetAutoScalerState(t *testing.T) { c.fasWatch.Delete(fasDeleted) c.sync() + // wait until we have a fleet deleted and it's allocation count is 0 + // since that is our last operation + require.Eventually(t, func() bool { + ex := &metricExporter{} + reader.ReadAndExport(ex) + + for _, m := range ex.metrics { + if m.Descriptor.Name == "fleet_autoscalers_limited" { + for _, d := range m.TimeSeries { + if d.LabelValues[0].Value == "deleted-fleet" && d.LabelValues[1].Value == "deleted" && d.LabelValues[2].Value == defaultNs { + return d.Points[0].Value == int64(0) + } + } + } + } + + return false + }, 5*time.Second, time.Second) reader.ReadAndExport(exporter) assertMetricData(t, exporter, "fleet_autoscalers_able_to_scale", []expectedMetricData{ - {labels: []string{"first-fleet", "name-switch", "default"}, val: int64(0)}, - {labels: []string{"second-fleet", "name-switch", "default"}, val: int64(1)}, - {labels: []string{"deleted-fleet", "deleted", "default"}, val: int64(0)}, + {labels: []string{"first-fleet", "name-switch", defaultNs}, val: int64(0)}, + {labels: []string{"second-fleet", "name-switch", defaultNs}, val: int64(1)}, + {labels: []string{"deleted-fleet", "deleted", defaultNs}, val: int64(0)}, }) assertMetricData(t, exporter, "fleet_autoscalers_buffer_limits", []expectedMetricData{ - {labels: []string{"first-fleet", "name-switch", "default", "max"}, val: int64(50)}, - {labels: []string{"first-fleet", "name-switch", "default", "min"}, val: int64(10)}, - {labels: []string{"second-fleet", "name-switch", "default", "max"}, val: int64(50)}, - {labels: []string{"second-fleet", "name-switch", "default", "min"}, val: int64(10)}, - {labels: []string{"deleted-fleet", "deleted", "default", "max"}, val: int64(150)}, - {labels: []string{"deleted-fleet", "deleted", "default", "min"}, val: int64(15)}, + {labels: []string{"first-fleet", "name-switch", defaultNs, "max"}, val: int64(50)}, + {labels: []string{"first-fleet", "name-switch", defaultNs, "min"}, val: int64(10)}, + {labels: []string{"second-fleet", "name-switch", defaultNs, "max"}, val: int64(50)}, + {labels: []string{"second-fleet", "name-switch", defaultNs, "min"}, val: int64(10)}, + {labels: []string{"deleted-fleet", "deleted", defaultNs, "max"}, val: int64(150)}, + {labels: []string{"deleted-fleet", "deleted", defaultNs, "min"}, val: int64(15)}, }) assertMetricData(t, exporter, "fleet_autoscalers_buffer_size", []expectedMetricData{ - {labels: []string{"first-fleet", "name-switch", "default", "count"}, val: int64(10)}, - {labels: []string{"second-fleet", "name-switch", "default", "count"}, val: int64(10)}, - {labels: []string{"deleted-fleet", "deleted", "default", "percentage"}, val: int64(50)}, + {labels: []string{"first-fleet", "name-switch", defaultNs, "count"}, val: int64(10)}, + {labels: []string{"second-fleet", "name-switch", defaultNs, "count"}, val: int64(10)}, + {labels: []string{"deleted-fleet", "deleted", defaultNs, "percentage"}, val: int64(50)}, }) assertMetricData(t, exporter, "fleet_autoscalers_current_replicas_count", []expectedMetricData{ - {labels: []string{"first-fleet", "name-switch", "default"}, val: int64(0)}, - {labels: []string{"second-fleet", "name-switch", "default"}, val: int64(20)}, - {labels: []string{"deleted-fleet", "deleted", "default"}, val: int64(0)}, + {labels: []string{"first-fleet", "name-switch", defaultNs}, val: int64(0)}, + {labels: []string{"second-fleet", "name-switch", defaultNs}, val: int64(20)}, + {labels: []string{"deleted-fleet", "deleted", defaultNs}, val: int64(0)}, }) assertMetricData(t, exporter, "fleet_autoscalers_desired_replicas_count", []expectedMetricData{ - {labels: []string{"first-fleet", "name-switch", "default"}, val: int64(0)}, - {labels: []string{"second-fleet", "name-switch", "default"}, val: int64(10)}, - {labels: []string{"deleted-fleet", "deleted", "default"}, val: int64(0)}, + {labels: []string{"first-fleet", "name-switch", defaultNs}, val: int64(0)}, + {labels: []string{"second-fleet", "name-switch", defaultNs}, val: int64(10)}, + {labels: []string{"deleted-fleet", "deleted", defaultNs}, val: int64(0)}, }) assertMetricData(t, exporter, "fleet_autoscalers_limited", []expectedMetricData{ - {labels: []string{"first-fleet", "name-switch", "default"}, val: int64(0)}, - {labels: []string{"second-fleet", "name-switch", "default"}, val: int64(1)}, - {labels: []string{"deleted-fleet", "deleted", "default"}, val: int64(0)}, + {labels: []string{"first-fleet", "name-switch", defaultNs}, val: int64(0)}, + {labels: []string{"second-fleet", "name-switch", defaultNs}, val: int64(1)}, + {labels: []string{"deleted-fleet", "deleted", defaultNs}, val: int64(0)}, }) } @@ -269,10 +340,36 @@ func TestControllerGameServersNodeState(t *testing.T) { c.gsWatch.Add(gameServerWithNode("node2")) c.run(t) - c.sync() + reader := metricexport.NewReader() + + // wait until we have a some nodes and gameservers + assert.Eventually(t, func() bool { + ex := &metricExporter{} + reader.ReadAndExport(ex) + + check := 0 + + for _, m := range ex.metrics { + switch m.Descriptor.Name { + case "nodes_count": + for _, d := range m.TimeSeries { + if d.LabelValues[0].Value == "true" { + check++ + } + } + case "gameservers_node_count": + dist := m.TimeSeries[0].Points[0].Value.(*metricdata.Distribution) + if dist.Count == 3 && dist.Sum == 3 && dist.SumOfSquaredDeviation == 2 { + check++ + } + } + } + + return check == 2 + }, time.Minute, time.Second) // give a minute, since we're waiting for the run -> sync to fire. + // now confirm all the details. exporter := &metricExporter{} - reader := metricexport.NewReader() reader.ReadAndExport(exporter) assertMetricData(t, exporter, "gameservers_node_count", []expectedMetricData{ {labels: []string{}, val: &metricdata.Distribution{ diff --git a/pkg/metrics/util_test.go b/pkg/metrics/util_test.go index eef6c7ba7c..d11cbe0f8f 100644 --- a/pkg/metrics/util_test.go +++ b/pkg/metrics/util_test.go @@ -76,8 +76,8 @@ func (c *fakeController) run(t *testing.T) { c.sync() } -func (c *fakeController) sync() { - cache.WaitForCacheSync(c.stop, c.gameServerSynced, c.fleetSynced, c.fasSynced, c.nodeSynced) +func (c *fakeController) sync() bool { + return cache.WaitForCacheSync(c.stop, c.gameServerSynced, c.fleetSynced, c.fasSynced, c.nodeSynced) } type fakeController struct {