From 28b4508b0be7971af0398e39b54e90a7022d4409 Mon Sep 17 00:00:00 2001 From: Andrei Matei Date: Wed, 28 Nov 2018 17:48:06 -0500 Subject: [PATCH] storage: muck with TestGossipHandlesReplacedNode The test was using manual replication, although it didn't have any good reason to do so. What it does care about is having all the data be replicated before it does something, but that's the case after cluster startup with regular replication too. The test used to take 2.2s, now it takes 3s. Not sure why, but in any case I'm trying to speed up cluster creation separately, so hopefully the delta with go away. Even with the delta, I think the test is better the more vanilla it is. And I've also skipped the test for testshort. Release note: None --- pkg/storage/gossip_test.go | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/pkg/storage/gossip_test.go b/pkg/storage/gossip_test.go index 19adda4ab934..b164b5f6349d 100644 --- a/pkg/storage/gossip_test.go +++ b/pkg/storage/gossip_test.go @@ -142,6 +142,10 @@ func TestGossipFirstRange(t *testing.T) { // restarted after losing its data) without the cluster breaking. func TestGossipHandlesReplacedNode(t *testing.T) { defer leaktest.AfterTest(t)() + if testing.Short() { + // As of Nov 2018 it takes 3.6s. + t.Skip("short") + } ctx := context.Background() // Shorten the raft tick interval and election timeout to make range leases @@ -161,19 +165,10 @@ func TestGossipHandlesReplacedNode(t *testing.T) { tc := testcluster.StartTestCluster(t, 3, base.TestClusterArgs{ - // Use manual replication so that we can ensure the range is properly - // replicated to all three nodes before stopping one of them. - ReplicationMode: base.ReplicationManual, - ServerArgs: serverArgs, + ServerArgs: serverArgs, }) defer tc.Stopper().Stop(context.TODO()) - // Ensure that the first range is fully replicated before moving on. - firstRangeKey := keys.MinKey - if _, err := tc.AddReplicas(firstRangeKey, tc.Target(1), tc.Target(2)); err != nil { - t.Fatal(err) - } - // Take down the first node and replace it with a new one. oldNodeIdx := 0 newServerArgs := serverArgs