diff --git a/block/manager.go b/block/manager.go index 1c56553b295..ff336218792 100644 --- a/block/manager.go +++ b/block/manager.go @@ -250,6 +250,7 @@ func (m *Manager) SetFraudProofService(fraudProofServ *fraudserv.ProofService) { } func (m *Manager) ProcessFraudProof(ctx context.Context, cancel context.CancelFunc) { + defer cancel() // subscribe to state fraud proof sub, err := m.executor.FraudService.Subscribe(types.StateFraudProofType) if err != nil { @@ -281,7 +282,6 @@ func (m *Manager) ProcessFraudProof(ctx context.Context, cancel context.CancelFu // halt chain m.logger.Info("verified fraud proof, halting chain") - cancel() } // SyncLoop is responsible for syncing blocks. diff --git a/block/manager_test.go b/block/manager_test.go index b14a0d091c8..741c72c34f0 100644 --- a/block/manager_test.go +++ b/block/manager_test.go @@ -33,7 +33,8 @@ func TestInitialState(t *testing.T) { NextValidators: types.GetRandomValidatorSet(), } - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() es, _ := store.NewDefaultInMemoryKVStore() emptyStore := store.New(ctx, es) @@ -79,6 +80,9 @@ func TestInitialState(t *testing.T) { assert := assert.New(t) logger := log.TestingLogger() dalc := getMockDALC(logger) + defer func() { + require.NoError(t, dalc.Stop()) + }() dumbChan := make(chan struct{}) agg, err := NewManager(key, conf, c.genesis, c.store, nil, nil, dalc, nil, logger, dumbChan) assert.NoError(err) diff --git a/da/test/da_test.go b/da/test/da_test.go index 52a10cb7a68..c5ec3a61dc6 100644 --- a/da/test/da_test.go +++ b/da/test/da_test.go @@ -74,8 +74,9 @@ func doTestLifecycle(t *testing.T, dalc da.DataAvailabilityLayerClient) { err = dalc.Start() require.NoError(err) - err = dalc.Stop() - require.NoError(err) + defer func() { + require.NoError(dalc.Stop()) + }() } func TestDALC(t *testing.T) { @@ -89,7 +90,8 @@ func TestDALC(t *testing.T) { func doTestDALC(t *testing.T, dalc da.DataAvailabilityLayerClient) { require := require.New(t) assert := assert.New(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // mock DALC will advance block height every 100ms conf := []byte{} @@ -105,6 +107,9 @@ func doTestDALC(t *testing.T, dalc da.DataAvailabilityLayerClient) { err = dalc.Start() require.NoError(err) + defer func() { + require.NoError(dalc.Stop()) + }() // wait a bit more than mockDaBlockTime, so mock can "produce" some blocks time.Sleep(mockDaBlockTime + 20*time.Millisecond) @@ -151,7 +156,8 @@ func TestRetrieve(t *testing.T) { } func doTestRetrieve(t *testing.T, dalc da.DataAvailabilityLayerClient) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() require := require.New(t) assert := assert.New(t) @@ -169,6 +175,9 @@ func doTestRetrieve(t *testing.T, dalc da.DataAvailabilityLayerClient) { err = dalc.Start() require.NoError(err) + defer func() { + require.NoError(dalc.Stop()) + }() // wait a bit more than mockDaBlockTime, so mock can "produce" some blocks time.Sleep(mockDaBlockTime + 20*time.Millisecond) diff --git a/node/full_client_test.go b/node/full_client_test.go index b7b06e30965..9282e67b515 100644 --- a/node/full_client_test.go +++ b/node/full_client_test.go @@ -114,7 +114,8 @@ func getRPC(t *testing.T) (*mocks.Application, *FullClient) { app.On("InitChain", mock.Anything).Return(abci.ResponseInitChain{}) key, _, _ := crypto.GenerateEd25519Key(crand.Reader) signingKey, _, _ := crypto.GenerateEd25519Key(crand.Reader) - node, err := newFullNode(context.Background(), config.NodeConfig{DALayer: "mock"}, key, signingKey, proxy.NewLocalClientCreator(app), &tmtypes.GenesisDoc{ChainID: "test"}, log.TestingLogger()) + ctx := context.Background() + node, err := newFullNode(ctx, config.NodeConfig{DALayer: "mock"}, key, signingKey, proxy.NewLocalClientCreator(app), &tmtypes.GenesisDoc{ChainID: "test"}, log.TestingLogger()) require.NoError(err) require.NotNil(node) @@ -223,7 +224,9 @@ func TestGenesisChunked(t *testing.T) { err = rpc.node.Start() require.NoError(t, err) - + defer func() { + assert.NoError(rpc.node.Stop()) + }() expectedID = 0 gc2, err := rpc.GenesisChunked(context.Background(), expectedID) gotID := gc2.ChunkNumber @@ -246,7 +249,9 @@ func TestBroadcastTxAsync(t *testing.T) { err := rpc.node.Start() require.NoError(t, err) - + defer func() { + assert.NoError(rpc.node.Stop()) + }() res, err := rpc.BroadcastTxAsync(context.Background(), expectedTx) assert.NoError(err) assert.NotNil(res) @@ -256,9 +261,6 @@ func TestBroadcastTxAsync(t *testing.T) { assert.Empty(res.Codespace) assert.NotEmpty(res.Hash) mockApp.AssertExpectations(t) - - err = rpc.node.Stop() - require.NoError(t, err) } func TestBroadcastTxSync(t *testing.T) { @@ -280,7 +282,9 @@ func TestBroadcastTxSync(t *testing.T) { err := rpc.node.Start() require.NoError(t, err) - + defer func() { + assert.NoError(rpc.node.Stop()) + }() mockApp.On("CheckTx", abci.RequestCheckTx{Tx: expectedTx}).Return(expectedResponse) res, err := rpc.BroadcastTxSync(context.Background(), expectedTx) @@ -292,9 +296,6 @@ func TestBroadcastTxSync(t *testing.T) { assert.Equal(expectedResponse.Codespace, res.Codespace) assert.NotEmpty(res.Hash) mockApp.AssertExpectations(t) - - err = rpc.node.Stop() - require.NoError(t, err) } func TestBroadcastTxCommit(t *testing.T) { @@ -331,7 +332,9 @@ func TestBroadcastTxCommit(t *testing.T) { // in order to broadcast, the node must be started err := rpc.node.Start() require.NoError(err) - + defer func() { + require.NoError(rpc.node.Stop()) + }() go func() { time.Sleep(mockTxProcessingTime) err := rpc.node.EventBus().PublishEventTx(tmtypes.EventDataTx{TxResult: abci.TxResult{ @@ -349,9 +352,6 @@ func TestBroadcastTxCommit(t *testing.T) { assert.Equal(expectedCheckResp, res.CheckTx) assert.Equal(expectedDeliverResp, res.DeliverTx) mockApp.AssertExpectations(t) - - err = rpc.node.Stop() - require.NoError(err) } func TestGetBlock(t *testing.T) { @@ -366,7 +366,9 @@ func TestGetBlock(t *testing.T) { err := rpc.node.Start() require.NoError(err) - + defer func() { + require.NoError(rpc.node.Stop()) + }() block := getRandomBlock(1, 10) err = rpc.node.Store.SaveBlock(block, &types.Commit{}) rpc.node.Store.SetHeight(uint64(block.SignedHeader.Header.Height())) @@ -377,9 +379,6 @@ func TestGetBlock(t *testing.T) { require.NotNil(blockResp) assert.NotNil(blockResp.Block) - - err = rpc.node.Stop() - require.NoError(err) } func TestGetCommit(t *testing.T) { @@ -393,7 +392,9 @@ func TestGetCommit(t *testing.T) { err := rpc.node.Start() require.NoError(err) - + defer func() { + require.NoError(rpc.node.Stop()) + }() for _, b := range blocks { err = rpc.node.Store.SaveBlock(b, &types.Commit{}) rpc.node.Store.SetHeight(uint64(b.SignedHeader.Header.Height())) @@ -415,9 +416,6 @@ func TestGetCommit(t *testing.T) { require.NotNil(commit) assert.Equal(blocks[3].SignedHeader.Header.Height(), commit.Height) }) - - err = rpc.node.Stop() - require.NoError(err) } func TestBlockSearch(t *testing.T) { @@ -489,7 +487,9 @@ func TestGetBlockByHash(t *testing.T) { err := rpc.node.Start() require.NoError(err) - + defer func() { + require.NoError(rpc.node.Stop()) + }() block := getRandomBlock(1, 10) err = rpc.node.Store.SaveBlock(block, &types.Commit{}) require.NoError(err) @@ -509,9 +509,6 @@ func TestGetBlockByHash(t *testing.T) { require.NotNil(blockResp) assert.NotNil(blockResp.Block) - - err = rpc.node.Stop() - require.NoError(err) } func TestTx(t *testing.T) { @@ -546,7 +543,9 @@ func TestTx(t *testing.T) { err = rpc.node.Start() require.NoError(err) - + defer func() { + require.NoError(rpc.node.Stop()) + }() tx1 := tmtypes.Tx("tx1") res, err := rpc.BroadcastTxSync(context.Background(), tx1) assert.NoError(err) @@ -595,6 +594,9 @@ func TestUnconfirmedTxs(t *testing.T) { err := rpc.node.Start() require.NoError(err) + defer func() { + require.NoError(rpc.node.Stop()) + }() for _, tx := range c.txs { res, err := rpc.BroadcastTxAsync(context.Background(), tx) @@ -631,6 +633,9 @@ func TestUnconfirmedTxsLimit(t *testing.T) { err := rpc.node.Start() require.NoError(err) + defer func() { + require.NoError(rpc.node.Stop()) + }() tx1 := tmtypes.Tx("tx1") tx2 := tmtypes.Tx("another tx") @@ -742,7 +747,9 @@ func TestBlockchainInfo(t *testing.T) { } } -func createGenesisValidators(numNodes int, appCreator func(require *require.Assertions, vKeyToRemove tmcrypto.PrivKey, wg *sync.WaitGroup) *mocks.Application, require *require.Assertions, wg *sync.WaitGroup) *FullClient { +func createGenesisValidators(t *testing.T, numNodes int, appCreator func(require *require.Assertions, vKeyToRemove tmcrypto.PrivKey, wg *sync.WaitGroup) *mocks.Application, wg *sync.WaitGroup) *FullClient { + t.Helper() + require := require.New(t) vKeys := make([]tmcrypto.PrivKey, numNodes) apps := make([]*mocks.Application, numNodes) nodes := make([]*FullNode, numNodes) @@ -762,6 +769,9 @@ func createGenesisValidators(numNodes int, appCreator func(require *require.Asse require.Nil(err) err = dalc.Start() require.Nil(err) + t.Cleanup(func() { + require.NoError(dalc.Stop()) + }) for i := 0; i < len(nodes); i++ { nodeKey := &p2p.NodeKey{ @@ -797,8 +807,13 @@ func createGenesisValidators(numNodes int, appCreator func(require *require.Asse require.NotNil(rpc) for i := 0; i < len(nodes); i++ { + node := nodes[i] err := nodes[i].Start() require.NoError(err) + + t.Cleanup(func() { + require.NoError(node.Stop()) + }) } return rpc } @@ -848,13 +863,11 @@ func createApp(require *require.Assertions, vKeyToRemove tmcrypto.PrivKey, wg *s // Tests moving from two validators to one validator and then back to two validators func TestValidatorSetHandling(t *testing.T) { assert := assert.New(t) - require := require.New(t) var wg sync.WaitGroup numNodes := 2 - rpc := createGenesisValidators(numNodes, createApp, require, &wg) - + rpc := createGenesisValidators(t, numNodes, createApp, &wg) wg.Wait() // test first blocks @@ -879,11 +892,9 @@ func TestValidatorSetHandling(t *testing.T) { // Tests moving from a centralized validator to empty validator set func TestValidatorSetHandlingBased(t *testing.T) { assert := assert.New(t) - require := require.New(t) - var wg sync.WaitGroup numNodes := 1 - rpc := createGenesisValidators(numNodes, createApp, require, &wg) + rpc := createGenesisValidators(t, numNodes, createApp, &wg) wg.Wait() @@ -925,8 +936,10 @@ func TestMempool2Nodes(t *testing.T) { app.On("DeliverTx", mock.Anything).Return(abci.ResponseDeliverTx{}) app.On("GetAppHash", mock.Anything).Return(abci.ResponseGetAppHash{}) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // make node1 an aggregator, so that node2 can start gracefully - node1, err := newFullNode(context.Background(), config.NodeConfig{ + node1, err := newFullNode(ctx, config.NodeConfig{ Aggregator: true, DALayer: "mock", P2P: config.P2PConfig{ @@ -939,7 +952,7 @@ func TestMempool2Nodes(t *testing.T) { require.NoError(err) require.NotNil(node1) - node2, err := newFullNode(context.Background(), config.NodeConfig{ + node2, err := newFullNode(ctx, config.NodeConfig{ DALayer: "mock", P2P: config.P2PConfig{ ListenAddress: "/ip4/127.0.0.1/tcp/9002", @@ -951,31 +964,35 @@ func TestMempool2Nodes(t *testing.T) { err = node1.Start() require.NoError(err) - time.Sleep(1 * time.Second) + defer func() { + require.NoError(node1.Stop()) + }() err = node2.Start() require.NoError(err) + defer func() { + require.NoError(node2.Stop()) + }() time.Sleep(1 * time.Second) - - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - defer cancel() + timeoutCtx, timeoutCancel := context.WithTimeout(context.Background(), 3*time.Second) + defer timeoutCancel() local := NewFullClient(node1) require.NotNil(local) // broadcast the bad Tx, this should not be propogated or added to the local mempool - resp, err := local.BroadcastTxSync(ctx, []byte("bad")) + resp, err := local.BroadcastTxSync(timeoutCtx, []byte("bad")) assert.NoError(err) assert.NotNil(resp) // broadcast the good Tx, this should be propogated and added to the local mempool - resp, err = local.BroadcastTxSync(ctx, []byte("good")) + resp, err = local.BroadcastTxSync(timeoutCtx, []byte("good")) assert.NoError(err) assert.NotNil(resp) // broadcast the good Tx again in the same block, this should not be propogated and // added to the local mempool - resp, err = local.BroadcastTxSync(ctx, []byte("good")) + resp, err = local.BroadcastTxSync(timeoutCtx, []byte("good")) assert.Error(err) assert.Nil(resp) @@ -1063,6 +1080,9 @@ func TestStatus(t *testing.T) { err = node.Start() require.NoError(err) + defer func() { + require.NoError(node.Stop()) + }() resp, err := rpc.Status(context.Background()) assert.NoError(err) @@ -1127,7 +1147,9 @@ func TestFutureGenesisTime(t *testing.T) { key, _, _ := crypto.GenerateEd25519Key(crand.Reader) genesisValidators, signingKey := getGenesisValidatorSetWithSigner(1) genesisTime := time.Now().Local().Add(time.Second * time.Duration(1)) - node, err := newFullNode(context.Background(), config.NodeConfig{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + node, err := newFullNode(ctx, config.NodeConfig{ DALayer: "mock", Aggregator: true, BlockManagerConfig: config.BlockManagerConfig{ @@ -1148,6 +1170,9 @@ func TestFutureGenesisTime(t *testing.T) { err = node.Start() require.NoError(err) + defer func() { + require.NoError(node.Stop()) + }() wg.Wait() assert.True(beginBlockTime.After(genesisTime)) diff --git a/node/full_node_integration_test.go b/node/full_node_integration_test.go index 664eb2ce59f..d03c186a6c8 100644 --- a/node/full_node_integration_test.go +++ b/node/full_node_integration_test.go @@ -53,7 +53,9 @@ func TestAggregatorMode(t *testing.T) { BlockTime: 1 * time.Second, NamespaceID: types.NamespaceID{1, 2, 3, 4, 5, 6, 7, 8}, } - node, err := newFullNode(context.Background(), config.NodeConfig{DALayer: "mock", Aggregator: true, BlockManagerConfig: blockManagerConfig}, key, signingKey, proxy.NewLocalClientCreator(app), &tmtypes.GenesisDoc{ChainID: "test", Validators: genesisValidators}, log.TestingLogger()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + node, err := newFullNode(ctx, config.NodeConfig{DALayer: "mock", Aggregator: true, BlockManagerConfig: blockManagerConfig}, key, signingKey, proxy.NewLocalClientCreator(app), &tmtypes.GenesisDoc{ChainID: "test", Validators: genesisValidators}, log.TestingLogger()) require.NoError(err) require.NotNil(node) @@ -62,14 +64,14 @@ func TestAggregatorMode(t *testing.T) { err = node.Start() assert.NoError(err) defer func() { - err := node.Stop() - assert.NoError(err) + require.NoError(node.Stop()) }() assert.True(node.IsRunning()) pid, err := peer.IDFromPrivateKey(anotherKey) require.NoError(err) - ctx, cancel := context.WithCancel(context.TODO()) + ctx, cancel = context.WithCancel(context.TODO()) + defer cancel() go func() { for { select { @@ -81,8 +83,6 @@ func TestAggregatorMode(t *testing.T) { } } }() - time.Sleep(3 * time.Second) - cancel() } // TestTxGossipingAndAggregation setups a network of nodes, with single aggregator and multiple producers. @@ -161,8 +161,9 @@ func TestLazyAggregator(t *testing.T) { BlockTime: 1 * time.Second, NamespaceID: types.NamespaceID{1, 2, 3, 4, 5, 6, 7, 8}, } - - node, err := NewNode(context.Background(), config.NodeConfig{ + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + node, err := NewNode(ctx, config.NodeConfig{ DALayer: "mock", Aggregator: true, BlockManagerConfig: blockManagerConfig, @@ -173,8 +174,7 @@ func TestLazyAggregator(t *testing.T) { err = node.Start() assert.NoError(err) defer func() { - err := node.Stop() - assert.NoError(err) + require.NoError(node.Stop()) }() assert.True(node.IsRunning()) @@ -228,7 +228,9 @@ func testSingleAggregatorSingleFullNode(t *testing.T, useBlockExchange bool) { require := require.New(t) aggCtx, aggCancel := context.WithCancel(context.Background()) + defer aggCancel() ctx, cancel := context.WithCancel(context.Background()) + defer cancel() clientNodes := 1 nodes, _ := createNodes(aggCtx, ctx, clientNodes+1, false, t) @@ -236,26 +238,28 @@ func testSingleAggregatorSingleFullNode(t *testing.T, useBlockExchange bool) { node2 := nodes[1] require.NoError(node1.Start()) + defer func() { + require.NoError(node1.Stop()) + }() require.NoError(waitForFirstBlock(node1, useBlockExchange)) require.NoError(node2.Start()) - require.NoError(waitForAtLeastNBlocks(node2, 2, useBlockExchange)) - - aggCancel() - require.NoError(node1.Stop()) + defer func() { + require.NoError(node2.Stop()) + }() + require.NoError(waitForAtLeastNBlocks(node2, 2, useBlockExchange)) require.NoError(verifyNodesSynced(node1, node2, useBlockExchange)) - - cancel() - require.NoError(node2.Stop()) } func testSingleAggregatorTwoFullNode(t *testing.T, useBlockExchange bool) { require := require.New(t) aggCtx, aggCancel := context.WithCancel(context.Background()) + defer aggCancel() ctx, cancel := context.WithCancel(context.Background()) + defer cancel() clientNodes := 2 nodes, _ := createNodes(aggCtx, ctx, clientNodes+1, false, t) @@ -264,26 +268,30 @@ func testSingleAggregatorTwoFullNode(t *testing.T, useBlockExchange bool) { node3 := nodes[2] require.NoError(node1.Start()) + defer func() { + require.NoError(node1.Stop()) + }() require.NoError(waitForFirstBlock(node1, useBlockExchange)) require.NoError(node2.Start()) + defer func() { + require.NoError(node2.Stop()) + }() require.NoError(node3.Start()) + defer func() { + require.NoError(node3.Stop()) + }() require.NoError(waitForAtLeastNBlocks(node2, 2, useBlockExchange)) - - aggCancel() - require.NoError(node1.Stop()) - require.NoError(verifyNodesSynced(node1, node2, useBlockExchange)) - cancel() - require.NoError(node2.Stop()) - require.NoError(node3.Stop()) } func testSingleAggregatorSingleFullNodeTrustedHash(t *testing.T, useBlockExchange bool) { require := require.New(t) aggCtx, aggCancel := context.WithCancel(context.Background()) + defer aggCancel() ctx, cancel := context.WithCancel(context.Background()) + defer cancel() clientNodes := 1 nodes, _ := createNodes(aggCtx, ctx, clientNodes+1, false, t) @@ -291,6 +299,9 @@ func testSingleAggregatorSingleFullNodeTrustedHash(t *testing.T, useBlockExchang node2 := nodes[1] require.NoError(node1.Start()) + defer func() { + require.NoError(node1.Stop()) + }() require.NoError(waitForFirstBlock(node1, useBlockExchange)) @@ -299,23 +310,21 @@ func testSingleAggregatorSingleFullNodeTrustedHash(t *testing.T, useBlockExchang require.NoError(err) node2.conf.TrustedHash = trustedHash.Hash().String() require.NoError(node2.Start()) + defer func() { + require.NoError(node2.Stop()) + }() require.NoError(waitForAtLeastNBlocks(node1, 2, useBlockExchange)) - - aggCancel() - require.NoError(node1.Stop()) - require.NoError(verifyNodesSynced(node1, node2, useBlockExchange)) - cancel() - require.NoError(node2.Stop()) } func testSingleAggregatorSingleFullNodeSingleLightNode(t *testing.T) { require := require.New(t) aggCtx, aggCancel := context.WithCancel(context.Background()) + defer aggCancel() ctx, cancel := context.WithCancel(context.Background()) - + defer cancel() num := 3 keys := make([]crypto.PrivKey, num) for i := 0; i < num; i++ { @@ -325,6 +334,9 @@ func testSingleAggregatorSingleFullNodeSingleLightNode(t *testing.T) { ds, _ := store.NewDefaultInMemoryKVStore() _ = dalc.Init([8]byte{}, nil, ds, log.TestingLogger()) _ = dalc.Start() + defer func() { + require.NoError(dalc.Stop()) + }() sequencer, _ := createNode(aggCtx, 0, false, true, false, keys, t) fullNode, _ := createNode(ctx, 1, false, false, false, keys, t) @@ -336,18 +348,20 @@ func testSingleAggregatorSingleFullNodeSingleLightNode(t *testing.T) { lightNode, _ := createNode(ctx, 2, false, false, true, keys, t) require.NoError(sequencer.Start()) + defer func() { + require.NoError(sequencer.Stop()) + }() require.NoError(fullNode.Start()) + defer func() { + require.NoError(fullNode.Stop()) + }() require.NoError(lightNode.Start()) + defer func() { + require.NoError(lightNode.Stop()) + }() require.NoError(waitForAtLeastNBlocks(sequencer.(*FullNode), 2, false)) - - aggCancel() - require.NoError(sequencer.Stop()) - require.NoError(verifyNodesSynced(fullNode, lightNode, false)) - cancel() - require.NoError(fullNode.Stop()) - require.NoError(lightNode.Stop()) } func testSingleAggregatorSingleFullNodeFraudProofGossip(t *testing.T) { @@ -356,7 +370,9 @@ func testSingleAggregatorSingleFullNodeFraudProofGossip(t *testing.T) { var wg sync.WaitGroup aggCtx, aggCancel := context.WithCancel(context.Background()) + defer aggCancel() ctx, cancel := context.WithCancel(context.Background()) + defer cancel() clientNodes := 1 nodes, apps := createNodes(aggCtx, ctx, clientNodes+1, true, t) @@ -371,8 +387,14 @@ func testSingleAggregatorSingleFullNodeFraudProofGossip(t *testing.T) { wg.Add(clientNodes + 1) require.NoError(aggNode.Start()) + defer func() { + require.NoError(aggNode.Stop()) + }() require.NoError(waitForAtLeastNBlocks(aggNode, 2, false)) require.NoError(fullNode.Start()) + defer func() { + require.NoError(fullNode.Stop()) + }() wg.Wait() // aggregator should have 0 GenerateFraudProof calls and 1 VerifyFraudProof calls @@ -384,13 +406,9 @@ func testSingleAggregatorSingleFullNodeFraudProofGossip(t *testing.T) { n1Frauds, err := aggNode.fraudService.Get(aggCtx, types.StateFraudProofType) require.NoError(err) - aggCancel() - require.NoError(aggNode.Stop()) n2Frauds, err := fullNode.fraudService.Get(aggCtx, types.StateFraudProofType) require.NoError(err) - cancel() - require.NoError(fullNode.Stop()) assert.Equal(len(n1Frauds), 1, "number of fraud proofs received via gossip should be 1") assert.Equal(len(n2Frauds), 1, "number of fraud proofs received via gossip should be 1") @@ -403,7 +421,9 @@ func testSingleAggregatorTwoFullNodeFraudProofSync(t *testing.T) { var wg sync.WaitGroup aggCtx, aggCancel := context.WithCancel(context.Background()) + defer aggCancel() ctx, cancel := context.WithCancel(context.Background()) + defer cancel() clientNodes := 2 nodes, apps := createNodes(aggCtx, ctx, clientNodes+1, true, t) @@ -419,9 +439,13 @@ func testSingleAggregatorTwoFullNodeFraudProofSync(t *testing.T) { wg.Add(clientNodes) require.NoError(aggNode.Start()) - time.Sleep(2 * time.Second) + defer func() { + require.NoError(aggNode.Stop()) + }() require.NoError(fullNode1.Start()) - + defer func() { + require.NoError(fullNode1.Stop()) + }() wg.Wait() // aggregator should have 0 GenerateFraudProof calls and 1 VerifyFraudProof calls apps[0].AssertNumberOfCalls(t, "GenerateFraudProof", 0) @@ -430,31 +454,28 @@ func testSingleAggregatorTwoFullNodeFraudProofSync(t *testing.T) { apps[1].AssertNumberOfCalls(t, "GenerateFraudProof", 1) apps[1].AssertNumberOfCalls(t, "VerifyFraudProof", 1) - n1Frauds, err := aggNode.fraudService.Get(aggCtx, types.StateFraudProofType) + n1Frauds, err := aggNode.fraudService.Get(ctx, types.StateFraudProofType) require.NoError(err) - n2Frauds, err := fullNode1.fraudService.Get(aggCtx, types.StateFraudProofType) + n2Frauds, err := fullNode1.fraudService.Get(ctx, types.StateFraudProofType) require.NoError(err) assert.Equal(n1Frauds, n2Frauds, "number of fraud proofs gossiped between nodes must match") wg.Add(1) // delay start node3 such that it can sync the fraud proof from peers, instead of listening to gossip require.NoError(fullNode2.Start()) + defer func() { + require.NoError(fullNode2.Stop()) + }() wg.Wait() // fullnode2 should have 1 GenerateFraudProof calls and 1 VerifyFraudProof calls apps[2].AssertNumberOfCalls(t, "GenerateFraudProof", 1) apps[2].AssertNumberOfCalls(t, "VerifyFraudProof", 1) - n3Frauds, err := fullNode2.fraudService.Get(aggCtx, types.StateFraudProofType) + n3Frauds, err := fullNode2.fraudService.Get(ctx, types.StateFraudProofType) require.NoError(err) assert.Equal(n1Frauds, n3Frauds, "number of fraud proofs gossiped between nodes must match") - - aggCancel() - require.NoError(aggNode.Stop()) - cancel() - require.NoError(fullNode1.Stop()) - require.NoError(fullNode2.Stop()) } func TestFraudProofService(t *testing.T) { @@ -465,16 +486,16 @@ func TestFraudProofService(t *testing.T) { // Creates a starts the given number of client nodes along with an aggregator node. Uses the given flag to decide whether to have the aggregator produce malicious blocks. func createAndStartNodes(clientNodes int, isMalicious bool, t *testing.T) ([]*FullNode, []*mocks.Application) { aggCtx, aggCancel := context.WithCancel(context.Background()) + defer aggCancel() ctx, cancel := context.WithCancel(context.Background()) + defer cancel() nodes, apps := createNodes(aggCtx, ctx, clientNodes+1, isMalicious, t) startNodes(nodes, apps, t) - aggCancel() - time.Sleep(100 * time.Millisecond) - for _, n := range nodes { - require.NoError(t, n.Stop()) - } - cancel() - time.Sleep(100 * time.Millisecond) + defer func() { + for _, n := range nodes { + assert.NoError(t, n.Stop()) + } + }() return nodes, apps } diff --git a/node/full_node_test.go b/node/full_node_test.go index 8ecb666d1c8..0d3f835eaff 100644 --- a/node/full_node_test.go +++ b/node/full_node_test.go @@ -34,7 +34,9 @@ func TestStartup(t *testing.T) { app.On("InitChain", mock.Anything).Return(abci.ResponseInitChain{}) key, _, _ := crypto.GenerateEd25519Key(rand.Reader) signingKey, _, _ := crypto.GenerateEd25519Key(rand.Reader) - node, err := newFullNode(context.Background(), config.NodeConfig{DALayer: "mock"}, key, signingKey, proxy.NewLocalClientCreator(app), &types.GenesisDoc{ChainID: "test"}, log.TestingLogger()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + node, err := newFullNode(ctx, config.NodeConfig{DALayer: "mock"}, key, signingKey, proxy.NewLocalClientCreator(app), &types.GenesisDoc{ChainID: "test"}, log.TestingLogger()) require.NoError(err) require.NotNil(node) @@ -43,13 +45,13 @@ func TestStartup(t *testing.T) { err = node.Start() assert.NoError(err) defer func() { - err := node.Stop() - assert.NoError(err) + assert.NoError(node.Stop()) }() assert.True(node.IsRunning()) } func TestMempoolDirectly(t *testing.T) { + assert := assert.New(t) require := require.New(t) app := &mocks.Application{} @@ -59,12 +61,17 @@ func TestMempoolDirectly(t *testing.T) { signingKey, _, _ := crypto.GenerateEd25519Key(rand.Reader) anotherKey, _, _ := crypto.GenerateEd25519Key(rand.Reader) - node, err := newFullNode(context.Background(), config.NodeConfig{DALayer: "mock"}, key, signingKey, proxy.NewLocalClientCreator(app), &types.GenesisDoc{ChainID: "test"}, log.TestingLogger()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + node, err := newFullNode(ctx, config.NodeConfig{DALayer: "mock"}, key, signingKey, proxy.NewLocalClientCreator(app), &types.GenesisDoc{ChainID: "test"}, log.TestingLogger()) require.NoError(err) require.NotNil(node) err = node.Start() require.NoError(err) + defer func() { + assert.NoError(node.Stop()) + }() pid, err := peer.IDFromPrivateKey(anotherKey) require.NoError(err) diff --git a/node/helpers_test.go b/node/helpers_test.go index 834098b505e..861fa6a25ff 100644 --- a/node/helpers_test.go +++ b/node/helpers_test.go @@ -26,11 +26,15 @@ func TestMockTester(t *testing.T) { func TestGetNodeHeight(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() dalc := &mockda.DataAvailabilityLayerClient{} ds, _ := store.NewDefaultInMemoryKVStore() _ = dalc.Init([8]byte{}, nil, ds, log.TestingLogger()) _ = dalc.Start() + defer func() { + require.NoError(dalc.Stop()) + }() num := 2 keys := make([]crypto.PrivKey, num) for i := 0; i < num; i++ { @@ -41,7 +45,15 @@ func TestGetNodeHeight(t *testing.T) { fullNode.(*FullNode).dalc = dalc fullNode.(*FullNode).blockManager.SetDALC(dalc) require.NoError(fullNode.Start()) + defer func() { + require.NoError(fullNode.Stop()) + }() + require.NoError(lightNode.Start()) + defer func() { + require.NoError(lightNode.Stop()) + }() + require.NoError(testutils.Retry(1000, 100*time.Millisecond, func() error { num, err := getNodeHeight(fullNode, false) if err != nil { diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go index 2028767732e..85af66fb666 100644 --- a/state/txindex/kv/kv_test.go +++ b/state/txindex/kv/kv_test.go @@ -164,7 +164,8 @@ func TestTxSearchWithCancelation(t *testing.T) { } func TestTxSearchDeprecatedIndexing(t *testing.T) { - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() kvStore, _ := store.NewDefaultInMemoryKVStore() indexer := NewTxIndex(ctx, kvStore) @@ -246,7 +247,10 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { kvStore, _ := store.NewDefaultInMemoryKVStore() - indexer := NewTxIndex(context.Background(), kvStore) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + indexer := NewTxIndex(ctx, kvStore) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: []byte("number"), Value: []byte("1"), Index: true}}}, @@ -256,8 +260,6 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { err := indexer.Index(txResult) require.NoError(t, err) - ctx := context.Background() - results, err := indexer.Search(ctx, query.MustParse("account.number >= 1")) assert.NoError(t, err) @@ -269,7 +271,9 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { func TestTxSearchMultipleTxs(t *testing.T) { kvStore, _ := store.NewDefaultInMemoryKVStore() - indexer := NewTxIndex(context.Background(), kvStore) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + indexer := NewTxIndex(ctx, kvStore) // indexed first, but bigger height (to test the order of transactions) txResult := txResultWithEvents([]abci.Event{ @@ -314,8 +318,6 @@ func TestTxSearchMultipleTxs(t *testing.T) { err = indexer.Index(txResult4) require.NoError(t, err) - ctx := context.Background() - results, err := indexer.Search(ctx, query.MustParse("account.number >= 1")) assert.NoError(t, err) @@ -344,7 +346,9 @@ func benchmarkTxIndex(txsCount int64, b *testing.B) { store, err := store.NewDefaultKVStore(dir, "db", "tx_index") require.NoError(b, err) - indexer := NewTxIndex(context.Background(), store) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + indexer := NewTxIndex(ctx, store) batch := txindex.NewBatch(txsCount) txIndex := uint32(0) diff --git a/store/prefix_test.go b/store/prefix_test.go index 0578afb2bf6..42f354a03a1 100644 --- a/store/prefix_test.go +++ b/store/prefix_test.go @@ -16,7 +16,8 @@ func TestPrefixKV1(t *testing.T) { assert := assert.New(t) require := require.New(t) - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() base, _ := NewDefaultInMemoryKVStore() p1 := ktds.Wrap(base, ktds.PrefixTransform{Prefix: ds.NewKey("1")}) @@ -82,8 +83,8 @@ func TestPrefixKVBatch(t *testing.T) { assert := assert.New(t) require := require.New(t) - ctx := context.Background() - + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() basekv, _ := NewDefaultInMemoryKVStore() prefixkv := ktds.Wrap(basekv, ktds.PrefixTransform{Prefix: ds.NewKey("prefix1")}).Children()[0] diff --git a/store/store_test.go b/store/store_test.go index dcceb9a77c6..a77cd98ff05 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -38,12 +38,13 @@ func TestStoreHeight(t *testing.T) { getRandomBlock(10, 0), }, 10}, } - + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for _, c := range cases { t.Run(c.name, func(t *testing.T) { assert := assert.New(t) ds, _ := NewDefaultInMemoryKVStore() - bstore := New(context.Background(), ds) + bstore := New(ctx, ds) assert.Equal(uint64(0), bstore.Height()) for _, block := range c.blocks { @@ -89,13 +90,15 @@ func TestStoreLoad(t *testing.T) { mKV, _ := NewDefaultInMemoryKVStore() dKV, _ := NewDefaultKVStore(tmpDir, "db", "test") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() for _, kv := range []ds.TxnDatastore{mKV, dKV} { for _, c := range cases { t.Run(c.name, func(t *testing.T) { assert := assert.New(t) require := require.New(t) - bstore := New(context.Background(), kv) + bstore := New(ctx, kv) lastCommit := &types.Commit{} for _, block := range c.blocks { @@ -129,7 +132,8 @@ func TestRestart(t *testing.T) { validatorSet := types.GetRandomValidatorSet() - ctx := context.Background() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() kv, _ := NewDefaultInMemoryKVStore() s1 := New(ctx, kv) expectedHeight := uint64(10) @@ -152,8 +156,10 @@ func TestBlockResponses(t *testing.T) { t.Parallel() assert := assert.New(t) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() kv, _ := NewDefaultInMemoryKVStore() - s := New(context.Background(), kv) + s := New(ctx, kv) expected := &tmstate.ABCIResponses{ BeginBlock: &abcitypes.ResponseBeginBlock{