From a2771baa255f4ca02c731c439d5140a67e03597d Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 12:55:27 -0400 Subject: [PATCH 01/79] ban `require.Equal` when testing for `0` --- codec/test_codec.go | 46 ++++++++-------- database/manager/manager_test.go | 12 ++--- database/test_database.go | 2 +- scripts/lint.sh | 11 +++- snow/engine/common/requests_test.go | 8 +-- snow/networking/router/chain_router_test.go | 4 +- utils/bag/bag_test.go | 28 +++++----- utils/bag/unique_bag_test.go | 4 +- utils/beacon/set_test.go | 2 +- .../buffer/bounded_nonblocking_queue_test.go | 8 +-- utils/buffer/unbounded_deque_test.go | 52 +++++++++---------- utils/linkedhashmap/linkedhashmap_test.go | 12 ++--- utils/math/continuous_averager_test.go | 2 +- utils/math/safe_math_test.go | 26 +++++----- utils/sampler/weighted_test.go | 2 +- utils/sorting_test.go | 2 +- utils/window/window_test.go | 10 ++-- vms/avm/service_test.go | 6 +-- vms/secp256k1fx/keychain_test.go | 2 +- x/merkledb/cache_test.go | 12 ++--- x/merkledb/path_test.go | 2 +- x/merkledb/trie_test.go | 2 +- x/sync/sync_test.go | 4 +- x/sync/syncworkheap_test.go | 28 +++++----- 24 files changed, 148 insertions(+), 139 deletions(-) diff --git a/codec/test_codec.go b/codec/test_codec.go index e8903dd9747..e82593e1e3f 100644 --- a/codec/test_codec.go +++ b/codec/test_codec.go @@ -149,7 +149,7 @@ func TestStruct(codec GeneralCodec, t testing.TB) { version, err := manager.Unmarshal(myStructBytes, myStructUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myStructInstance, *myStructUnmarshaled) } @@ -180,7 +180,7 @@ func TestUInt32(codec GeneralCodec, t testing.TB) { var numberUnmarshaled uint32 version, err := manager.Unmarshal(bytes, &numberUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(number, numberUnmarshaled) } @@ -215,7 +215,7 @@ func TestSlice(codec GeneralCodec, t testing.TB) { var sliceUnmarshaled []bool version, err := manager.Unmarshal(bytes, &sliceUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(mySlice, sliceUnmarshaled) } @@ -240,7 +240,7 @@ func TestMaxSizeSlice(codec GeneralCodec, t testing.TB) { var sliceUnmarshaled []string version, err := manager.Unmarshal(bytes, &sliceUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(mySlice, sliceUnmarshaled) } @@ -263,7 +263,7 @@ func TestBool(codec GeneralCodec, t testing.TB) { var boolUnmarshaled bool version, err := manager.Unmarshal(bytes, &boolUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myBool, boolUnmarshaled) } @@ -286,7 +286,7 @@ func TestArray(codec GeneralCodec, t testing.TB) { var myArrUnmarshaled [5]uint64 version, err := manager.Unmarshal(bytes, &myArrUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myArr, myArrUnmarshaled) } @@ -309,7 +309,7 @@ func TestBigArray(codec GeneralCodec, t testing.TB) { var myArrUnmarshaled [30000]uint64 version, err := manager.Unmarshal(bytes, &myArrUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myArr, myArrUnmarshaled) } @@ -332,7 +332,7 @@ func TestPointerToStruct(codec GeneralCodec, t testing.TB) { var myPtrUnmarshaled *MyInnerStruct version, err := manager.Unmarshal(bytes, &myPtrUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myPtr, myPtrUnmarshaled) } @@ -369,7 +369,7 @@ func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { var mySliceUnmarshaled []MyInnerStruct3 version, err := manager.Unmarshal(bytes, &mySliceUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(mySlice, mySliceUnmarshaled) } @@ -395,7 +395,7 @@ func TestInterface(codec GeneralCodec, t testing.TB) { var unmarshaledFoo Foo version, err := manager.Unmarshal(bytes, &unmarshaledFoo) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(f, unmarshaledFoo) } @@ -428,7 +428,7 @@ func TestSliceOfInterface(codec GeneralCodec, t testing.TB) { var mySliceUnmarshaled []Foo version, err := manager.Unmarshal(bytes, &mySliceUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(mySlice, mySliceUnmarshaled) } @@ -461,7 +461,7 @@ func TestArrayOfInterface(codec GeneralCodec, t testing.TB) { var myArrayUnmarshaled [2]Foo version, err := manager.Unmarshal(bytes, &myArrayUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myArray, myArrayUnmarshaled) } @@ -489,7 +489,7 @@ func TestPointerToInterface(codec GeneralCodec, t testing.TB) { var myPtrUnmarshaled *Foo version, err := manager.Unmarshal(bytes, &myPtrUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myPtr, myPtrUnmarshaled) } @@ -512,7 +512,7 @@ func TestString(codec GeneralCodec, t testing.TB) { var stringUnmarshaled string version, err := manager.Unmarshal(bytes, &stringUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myString, stringUnmarshaled) } @@ -539,8 +539,8 @@ func TestNilSlice(codec GeneralCodec, t testing.TB) { var structUnmarshaled structWithSlice version, err := manager.Unmarshal(bytes, &structUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) - require.Equal(0, len(structUnmarshaled.Slice)) + require.Zero(version) + require.Empty(structUnmarshaled.Slice) } // Ensure that trying to serialize a struct with an unexported member @@ -596,7 +596,7 @@ func TestSerializeOfNoSerializeField(codec GeneralCodec, t testing.TB) { unmarshalled := s{} version, err := manager.Unmarshal(marshalled, &unmarshalled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) expectedUnmarshalled := s{SerializedField: "Serialize me"} require.Equal(expectedUnmarshalled, unmarshalled) @@ -627,8 +627,8 @@ func TestNilSliceSerialization(codec GeneralCodec, t testing.TB) { valUnmarshaled := &simpleSliceStruct{} version, err := manager.Unmarshal(result, &valUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) - require.Equal(0, len(valUnmarshaled.Arr)) + require.Zero(version) + require.Empty(valUnmarshaled.Arr) } // Test marshaling a slice that has 0 elements (but isn't nil) @@ -656,7 +656,7 @@ func TestEmptySliceSerialization(codec GeneralCodec, t testing.TB) { valUnmarshaled := &simpleSliceStruct{} version, err := manager.Unmarshal(result, &valUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(val, valUnmarshaled) } @@ -689,7 +689,7 @@ func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { unmarshaled := nestedSliceStruct{} version, err := manager.Unmarshal(expected, &unmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(1000, len(unmarshaled.Arr)) } @@ -756,7 +756,7 @@ func TestNegativeNumbers(codec GeneralCodec, t testing.TB) { mySUnmarshaled := s{} version, err := manager.Unmarshal(bytes, &mySUnmarshaled) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) require.Equal(myS, mySUnmarshaled) } @@ -808,7 +808,7 @@ func TestUnmarshalInvalidInterface(codec GeneralCodec, t testing.TB) { s := outer{} version, err := manager.Unmarshal(bytes, &s) require.NoError(err) - require.Equal(uint16(0), version) + require.Zero(version) } { bytes := []byte{0, 0, 0, 0, 0, 1} diff --git a/database/manager/manager_test.go b/database/manager/manager_test.go index 0a24753f04c..7784835ed5e 100644 --- a/database/manager/manager_test.go +++ b/database/manager/manager_test.go @@ -39,7 +39,7 @@ func TestNewSingleLevelDB(t *testing.T) { semDB := manager.Current() cmp := semDB.Version.Compare(v1) - require.Equal(0, cmp, "incorrect version on current database") + require.Zero(cmp, "incorrect version on current database") _, exists := manager.Previous() require.False(exists, "there should be no previous database") @@ -63,7 +63,7 @@ func TestNewCreatesSingleDB(t *testing.T) { semDB := manager.Current() cmp := semDB.Version.Compare(v1) - require.Equal(0, cmp, "incorrect version on current database") + require.Zero(cmp, "incorrect version on current database") _, exists := manager.Previous() require.False(exists, "there should be no previous database") @@ -173,19 +173,19 @@ func TestNewSortsDatabases(t *testing.T) { semDB := manager.Current() cmp := semDB.Version.Compare(vers[0]) - require.Equal(0, cmp, "incorrect version on current database") + require.Zero(cmp, "incorrect version on current database") prev, exists := manager.Previous() require.True(exists, "expected to find a previous database") cmp = prev.Version.Compare(vers[1]) - require.Equal(0, cmp, "incorrect version on previous database") + require.Zero(cmp, "incorrect version on previous database") dbs := manager.GetDatabases() require.Equal(len(vers), len(dbs)) for i, db := range dbs { cmp = db.Version.Compare(vers[i]) - require.Equal(0, cmp, "expected to find database version %s, but found %s", vers[i], db.Version.String()) + require.Zero(cmp, "expected to find database version %s, but found %s", vers[i], db.Version.String()) } } @@ -401,7 +401,7 @@ func TestNewManagerFromDBs(t *testing.T) { dbs := m.GetDatabases() require.Len(dbs, len(versions)) for i, db := range dbs { - require.Equal(0, db.Version.Compare(versions[i])) + require.Zero(db.Version.Compare(versions[i])) } } diff --git a/database/test_database.go b/database/test_database.go index 51039171ec3..ef99302e1e2 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -949,7 +949,7 @@ func TestClear(t *testing.T, db Database) { count, err = Count(db) require.NoError(err) - require.Equal(0, count) + require.Zero(count) require.NoError(db.Close()) } diff --git a/scripts/lint.sh b/scripts/lint.sh index 6c5094017db..47865c37e46 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -21,7 +21,7 @@ fi # by default, "./scripts/lint.sh" runs all lint tests # to run only "license_header" test # TESTS='license_header' ./scripts/lint.sh -TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil"} +TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_equal_zero"} function test_golangci_lint { go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 @@ -57,6 +57,15 @@ function test_require_error_is_no_funcs_as_params { fi } +function test_require_equal_zero { + if grep -R -o -P 'require\.Equal\((t, )?(u?int\d+\(0\)|0)' .; then + echo "" + echo "Use require.Zero instead of require.Equal when testing for 0." + echo "" + return 1 + fi +} + # Ref: https://go.dev/doc/effective_go#blank_implements function test_interface_compliance_nil { if grep -R -o -P '_ .+? = &.+?\{\}' .; then diff --git a/snow/engine/common/requests_test.go b/snow/engine/common/requests_test.go index 4d779a64028..02213469c95 100644 --- a/snow/engine/common/requests_test.go +++ b/snow/engine/common/requests_test.go @@ -15,7 +15,7 @@ func TestRequests(t *testing.T) { req := Requests{} length := req.Len() - require.Equal(t, 0, length, "should have had no outstanding requests") + require.Zero(t, length, "should have had no outstanding requests") _, removed := req.Remove(ids.EmptyNodeID, 0) require.False(t, removed, "shouldn't have removed the request") @@ -69,7 +69,7 @@ func TestRequests(t *testing.T) { require.True(t, removed, "should have removed the request") length = req.Len() - require.Equal(t, 0, length, "should have had no outstanding requests") + require.Zero(t, length, "should have had no outstanding requests") req.Add(ids.EmptyNodeID, 0, ids.Empty) @@ -80,11 +80,11 @@ func TestRequests(t *testing.T) { require.True(t, removed, "should have removed the request") length = req.Len() - require.Equal(t, 0, length, "should have had no outstanding requests") + require.Zero(t, length, "should have had no outstanding requests") removed = req.RemoveAny(ids.Empty) require.False(t, removed, "shouldn't have removed the request") length = req.Len() - require.Equal(t, 0, length, "should have had no outstanding requests") + require.Zero(t, length, "should have had no outstanding requests") } diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index b843f17cd5b..17d6cec196a 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -801,7 +801,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { chainRouter.HandleInbound(context.Background(), msg) } - require.Equal(0, chainRouter.timedRequests.Len()) + require.Zero(chainRouter.timedRequests.Len()) } func TestRouterClearTimeouts(t *testing.T) { @@ -1087,7 +1087,7 @@ func TestRouterClearTimeouts(t *testing.T) { chainRouter.HandleInbound(context.Background(), msg) } - require.Equal(t, 0, chainRouter.timedRequests.Len()) + require.Zero(t, chainRouter.timedRequests.Len()) } func TestValidatorOnlyMessageDrops(t *testing.T) { diff --git a/utils/bag/bag_test.go b/utils/bag/bag_test.go index 6a9dece6e6d..3ce7f8b1b26 100644 --- a/utils/bag/bag_test.go +++ b/utils/bag/bag_test.go @@ -17,19 +17,19 @@ func TestBagAdd(t *testing.T) { bag := Bag[int]{} - require.Equal(0, bag.Count(elt0)) - require.Equal(0, bag.Count(elt1)) - require.Equal(0, bag.Len()) + require.Zero(bag.Count(elt0)) + require.Zero(bag.Count(elt1)) + require.Zero(bag.Len()) require.Len(bag.List(), 0) mode, freq := bag.Mode() require.Equal(elt0, mode) - require.Equal(0, freq) + require.Zero(freq) require.Len(bag.Threshold(), 0) bag.Add(elt0) require.Equal(1, bag.Count(elt0)) - require.Equal(0, bag.Count(elt1)) + require.Zero(bag.Count(elt1)) require.Equal(1, bag.Len()) require.Len(bag.List(), 1) mode, freq = bag.Mode() @@ -40,7 +40,7 @@ func TestBagAdd(t *testing.T) { bag.Add(elt0) require.Equal(2, bag.Count(elt0)) - require.Equal(0, bag.Count(elt1)) + require.Zero(bag.Count(elt1)) require.Equal(2, bag.Len()) require.Len(bag.List(), 1) mode, freq = bag.Mode() @@ -113,7 +113,7 @@ func TestBagFilter(t *testing.T) { even := bag.Filter(filterFunc) require.Equal(1, even.Count(elt0)) - require.Equal(0, even.Count(elt1)) + require.Zero(even.Count(elt1)) require.Equal(5, even.Count(elt2)) } @@ -138,11 +138,11 @@ func TestBagSplit(t *testing.T) { odds := bags[1] require.Equal(1, evens.Count(elt0)) - require.Equal(0, evens.Count(elt1)) + require.Zero(evens.Count(elt1)) require.Equal(5, evens.Count(elt2)) - require.Equal(0, odds.Count(elt0)) + require.Zero(odds.Count(elt0)) require.Equal(3, odds.Count(elt1)) - require.Equal(0, odds.Count(elt2)) + require.Zero(odds.Count(elt2)) } func TestBagString(t *testing.T) { @@ -168,7 +168,7 @@ func TestBagRemove(t *testing.T) { bag := Bag[int]{} bag.Remove(elt0) - require.Equal(0, bag.Len()) + require.Zero(bag.Len()) bag.AddCount(elt0, 3) bag.AddCount(elt1, 2) @@ -181,7 +181,7 @@ func TestBagRemove(t *testing.T) { bag.Remove(elt0) - require.Equal(0, bag.Count(elt0)) + require.Zero(bag.Count(elt0)) require.Equal(2, bag.Count(elt1)) require.Equal(1, bag.Count(elt2)) require.Equal(3, bag.Len()) @@ -191,8 +191,8 @@ func TestBagRemove(t *testing.T) { require.Equal(2, freq) bag.Remove(elt1) - require.Equal(0, bag.Count(elt0)) - require.Equal(0, bag.Count(elt1)) + require.Zero(bag.Count(elt0)) + require.Zero(bag.Count(elt1)) require.Equal(1, bag.Count(elt2)) require.Equal(1, bag.Len()) require.Len(bag.counts, 1) diff --git a/utils/bag/unique_bag_test.go b/utils/bag/unique_bag_test.go index e1920a21621..d15ecbf3a5c 100644 --- a/utils/bag/unique_bag_test.go +++ b/utils/bag/unique_bag_test.go @@ -96,8 +96,8 @@ func TestUniqueBagClear(t *testing.T) { require.Empty(b.List()) bs := b.GetSet(elt1) - require.Equal(0, bs.Len()) + require.Zero(bs.Len()) bs = b.GetSet(elt2) - require.Equal(0, bs.Len()) + require.Zero(bs.Len()) } diff --git a/utils/beacon/set_test.go b/utils/beacon/set_test.go index 4e8ada45795..da396631a98 100644 --- a/utils/beacon/set_test.go +++ b/utils/beacon/set_test.go @@ -44,7 +44,7 @@ func TestSet(t *testing.T) { ipsArg := s.IPsArg() require.Equal("", ipsArg) len := s.Len() - require.Equal(0, len) + require.Zero(len) err := s.Add(b0) require.NoError(err) diff --git a/utils/buffer/bounded_nonblocking_queue_test.go b/utils/buffer/bounded_nonblocking_queue_test.go index ab2f7e209a5..e6a6fdac3e4 100644 --- a/utils/buffer/bounded_nonblocking_queue_test.go +++ b/utils/buffer/bounded_nonblocking_queue_test.go @@ -36,7 +36,7 @@ func TestBoundedQueue(t *testing.T) { b, err := NewBoundedQueue(maxSize, onEvict) require.NoError(err) - require.Equal(0, b.Len()) + require.Zero(b.Len()) // Fill the queue for i := 0; i < maxSize; i++ { @@ -44,7 +44,7 @@ func TestBoundedQueue(t *testing.T) { require.Equal(i+1, b.Len()) got, ok := b.Peek() require.True(ok) - require.Equal(0, got) + require.Zero(got) got, ok = b.Index(i) require.True(ok) require.Equal(i, got) @@ -71,7 +71,7 @@ func TestBoundedQueue(t *testing.T) { require.False(ok) _, ok = b.Index(0) require.False(ok) - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.Empty(b.List()) // Fill the queue again @@ -131,7 +131,7 @@ func TestBoundedQueue(t *testing.T) { // Queue is empty require.Empty(b.List()) - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.Equal([]int{0, 1, 2}, evicted) _, ok = b.Pop() require.False(ok) diff --git a/utils/buffer/unbounded_deque_test.go b/utils/buffer/unbounded_deque_test.go index ea9ccd8782a..fa0d7761a39 100644 --- a/utils/buffer/unbounded_deque_test.go +++ b/utils/buffer/unbounded_deque_test.go @@ -16,7 +16,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.IsType(&unboundedSliceDeque[int]{}, bIntf) b := bIntf.(*unboundedSliceDeque[int]) require.Empty(b.List()) - require.Equal(0, b.Len()) + require.Zero(b.Len()) _, ok := b.Index(0) require.False(ok) @@ -30,7 +30,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.False(ok) got, ok = b.PopLeft() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) _, ok = b.Index(0) @@ -44,7 +44,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(1, got) got, ok = b.PopRight() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) require.Empty(b.List()) @@ -59,7 +59,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(1, got) got, ok = b.PopRight() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) require.Empty(b.List()) @@ -74,7 +74,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(1, got) got, ok = b.PopLeft() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) require.Empty(b.List()) @@ -102,7 +102,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(1, got) got, ok = b.PopLeft() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) require.Empty(b.List()) @@ -130,7 +130,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(1, got) got, ok = b.PopRight() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(1, got) require.Empty(b.List()) @@ -158,7 +158,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(2, got) got, ok = b.PopLeft() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(2, got) require.Empty(b.List()) @@ -186,7 +186,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(2, got) got, ok = b.PopLeft() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(2, got) require.Empty(b.List()) @@ -214,7 +214,7 @@ func TestUnboundedDeque_InitialCapGreaterThanMin(t *testing.T) { require.Equal(2, got) got, ok = b.PopRight() - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.True(ok) require.Equal(2, got) require.Empty(b.List()) @@ -235,9 +235,9 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { bIntf := NewUnboundedDeque[int](2) require.IsType(&unboundedSliceDeque[int]{}, bIntf) b := bIntf.(*unboundedSliceDeque[int]) - require.Equal(0, bIntf.Len()) + require.Zero(bIntf.Len()) require.Equal(2, len(b.data)) - require.Equal(0, b.left) + require.Zero(b.left) require.Equal(1, b.right) require.Empty(b.List()) // slice is [EMPTY] @@ -343,7 +343,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { require.Equal(2, got) require.Equal(1, b.Len()) require.Equal(4, len(b.data)) - require.Equal(0, b.left) + require.Zero(b.left) require.Equal(2, b.right) require.Equal([]int{1}, b.List()) got, ok = b.Index(0) @@ -385,7 +385,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { require.Equal(2, got) require.Equal(1, b.Len()) require.Equal(4, len(b.data)) - require.Equal(0, b.left) + require.Zero(b.left) require.Equal(2, b.right) require.Equal([]int{1}, b.List()) got, ok = b.Index(0) @@ -395,7 +395,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { got, ok = b.PopLeft() // slice is [EMPTY,EMPTY,EMPTY,EMPTY] require.True(ok) require.Equal(1, got) - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.Equal(4, len(b.data)) require.Equal(1, b.left) require.Equal(2, b.right) @@ -418,9 +418,9 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { bIntf := NewUnboundedDeque[int](2) require.IsType(&unboundedSliceDeque[int]{}, bIntf) b := bIntf.(*unboundedSliceDeque[int]) - require.Equal(0, bIntf.Len()) + require.Zero(bIntf.Len()) require.Equal(2, len(b.data)) - require.Equal(0, b.left) + require.Zero(b.left) require.Equal(1, b.right) require.Empty(b.List()) // slice is [EMPTY] @@ -435,8 +435,8 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { b.PushRight(1) // slice is [1,EMPTY] require.Equal(1, b.Len()) require.Equal(2, len(b.data)) - require.Equal(0, b.left) - require.Equal(0, b.right) + require.Zero(b.left) + require.Zero(b.right) require.Equal([]int{1}, b.List()) got, ok := b.Index(0) require.True(ok) @@ -548,12 +548,12 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { got, ok = b.PopRight() // slice is [EMPTY,EMPTY,EMPTY,EMPTY] require.True(ok) require.Equal(1, got) - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.Equal(4, len(b.data)) require.Equal(3, b.left) - require.Equal(0, b.right) + require.Zero(b.right) require.Empty(b.List()) - require.Equal(0, b.Len()) + require.Zero(b.Len()) _, ok = b.Index(0) require.False(ok) @@ -568,7 +568,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { require.Equal(1, b.Len()) require.Equal(4, len(b.data)) require.Equal(2, b.left) - require.Equal(0, b.right) + require.Zero(b.right) require.Equal([]int{1}, b.List()) got, ok = b.Index(0) require.True(ok) @@ -586,12 +586,12 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { got, ok = b.PopRight() // slice is [EMPTY,EMPTY,EMPTY,EMPTY] require.True(ok) require.Equal(1, got) - require.Equal(0, b.Len()) + require.Zero(b.Len()) require.Equal(4, len(b.data)) require.Equal(2, b.left) require.Equal(3, b.right) require.Empty(b.List()) - require.Equal(0, b.Len()) + require.Zero(b.Len()) _, ok = b.Index(0) require.False(ok) @@ -606,7 +606,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { require.Equal(1, b.Len()) require.Equal(4, len(b.data)) require.Equal(2, b.left) - require.Equal(0, b.right) + require.Zero(b.right) require.Equal([]int{2}, b.List()) got, ok = b.Index(0) require.True(ok) diff --git a/utils/linkedhashmap/linkedhashmap_test.go b/utils/linkedhashmap/linkedhashmap_test.go index c9144f7f977..8bd7239ed5d 100644 --- a/utils/linkedhashmap/linkedhashmap_test.go +++ b/utils/linkedhashmap/linkedhashmap_test.go @@ -15,7 +15,7 @@ func TestLinkedHashmap(t *testing.T) { require := require.New(t) lh := New[ids.ID, int]() - require.Equal(0, lh.Len(), "a new hashmap should be empty") + require.Zero(lh.Len(), "a new hashmap should be empty") key0 := ids.GenerateTestID() _, exists := lh.Get(key0) @@ -32,17 +32,17 @@ func TestLinkedHashmap(t *testing.T) { val0, exists := lh.Get(key0) require.True(exists, "should have found the value") - require.Equal(0, val0, "wrong value") + require.Zero(val0, "wrong value") rkey0, val0, exists := lh.Oldest() require.True(exists, "should have found the value") require.Equal(key0, rkey0, "wrong key") - require.Equal(0, val0, "wrong value") + require.Zero(val0, "wrong value") rkey0, val0, exists = lh.Newest() require.True(exists, "should have found the value") require.Equal(key0, rkey0, "wrong key") - require.Equal(0, val0, "wrong value") + require.Zero(val0, "wrong value") key1 := ids.GenerateTestID() lh.Put(key1, 1) @@ -55,7 +55,7 @@ func TestLinkedHashmap(t *testing.T) { rkey0, val0, exists = lh.Oldest() require.True(exists, "should have found the value") require.Equal(key0, rkey0, "wrong key") - require.Equal(0, val0, "wrong value") + require.Zero(val0, "wrong value") rkey1, val1, exists := lh.Newest() require.True(exists, "should have found the value") @@ -87,7 +87,7 @@ func TestLinkedHashmap(t *testing.T) { rkey0, val0, exists = lh.Oldest() require.True(exists, "should have found the value") require.Equal(key0, rkey0, "wrong key") - require.Equal(0, val0, "wrong value") + require.Zero(val0, "wrong value") rkey1, val1, exists = lh.Newest() require.True(exists, "should have found the value") diff --git a/utils/math/continuous_averager_test.go b/utils/math/continuous_averager_test.go index e7595537191..7eb4f25825a 100644 --- a/utils/math/continuous_averager_test.go +++ b/utils/math/continuous_averager_test.go @@ -45,7 +45,7 @@ func TestUninitializedAverager(t *testing.T) { firstObservation := float64(10) a := NewUninitializedAverager(halfLife) - require.Equal(t, 0.0, a.Read()) + require.Zero(t, a.Read()) a.Observe(firstObservation, currentTime) require.Equal(t, firstObservation, a.Read()) diff --git a/utils/math/safe_math_test.go b/utils/math/safe_math_test.go index 5d9bb702314..b4ef771eb45 100644 --- a/utils/math/safe_math_test.go +++ b/utils/math/safe_math_test.go @@ -19,18 +19,18 @@ func TestMax(t *testing.T) { require.Equal(maxUint64, Max(maxUint64, 0)) require.Equal(1, Max(1, 0)) require.Equal(1, Max(0, 1)) - require.Equal(0, Max(0, 0)) + require.Zero(Max(0, 0)) require.Equal(2, Max(2, 2)) } func TestMin(t *testing.T) { require := require.New(t) - require.Equal(uint64(0), Min(uint64(0), maxUint64)) - require.Equal(uint64(0), Min(maxUint64, uint64(0))) - require.Equal(0, Min(1, 0)) - require.Equal(0, Min(0, 1)) - require.Equal(0, Min(0, 0)) + require.Zero(Min(uint64(0), maxUint64)) + require.Zero(Min(maxUint64, uint64(0))) + require.Zero(Min(1, 0)) + require.Zero(Min(0, 1)) + require.Zero(Min(0, 0)) require.Equal(2, Min(2, 2)) require.Equal(1, Min(1, 2)) } @@ -69,11 +69,11 @@ func TestSub(t *testing.T) { got, err = Sub(uint64(2), uint64(2)) require.NoError(err) - require.Equal(uint64(0), got) + require.Zero(got) got, err = Sub(maxUint64, maxUint64) require.NoError(err) - require.Equal(uint64(0), got) + require.Zero(got) got, err = Sub(uint64(3), uint64(2)) require.NoError(err) @@ -91,11 +91,11 @@ func TestMul64(t *testing.T) { got, err := Mul64(0, maxUint64) require.NoError(err) - require.Equal(uint64(0), got) + require.Zero(got) got, err = Mul64(maxUint64, 0) require.NoError(err) - require.Equal(uint64(0), got) + require.Zero(got) got, err = Mul64(uint64(1), uint64(3)) require.NoError(err) @@ -111,7 +111,7 @@ func TestMul64(t *testing.T) { got, err = Mul64(maxUint64, 0) require.NoError(err) - require.Equal(uint64(0), got) + require.Zero(got) _, err = Mul64(maxUint64-1, 2) require.ErrorIs(err, ErrOverflow) @@ -124,6 +124,6 @@ func TestAbsDiff(t *testing.T) { require.Equal(maxUint64, AbsDiff(maxUint64, 0)) require.Equal(uint64(2), AbsDiff(uint64(3), uint64(1))) require.Equal(uint64(2), AbsDiff(uint64(1), uint64(3))) - require.Equal(uint64(0), AbsDiff(uint64(1), uint64(1))) - require.Equal(uint64(0), AbsDiff(uint64(0), uint64(0))) + require.Zero(AbsDiff(uint64(1), uint64(1))) + require.Zero(AbsDiff(uint64(0), uint64(0))) } diff --git a/utils/sampler/weighted_test.go b/utils/sampler/weighted_test.go index 6830a07e387..5bd29fff79f 100644 --- a/utils/sampler/weighted_test.go +++ b/utils/sampler/weighted_test.go @@ -107,7 +107,7 @@ func WeightedSingletonTest(t *testing.T, s Weighted) { index, err := s.Sample(0) require.NoError(t, err) - require.Equal(t, 0, index, "should have selected the first element") + require.Zero(t, index, "should have selected the first element") } func WeightedWithZeroTest(t *testing.T, s Weighted) { diff --git a/utils/sorting_test.go b/utils/sorting_test.go index 9de3dffd441..86d4fecf864 100644 --- a/utils/sorting_test.go +++ b/utils/sorting_test.go @@ -23,7 +23,7 @@ func TestSortSliceSortable(t *testing.T) { var s []sortable Sort(s) require.True(IsSortedAndUniqueSortable(s)) - require.Equal(0, len(s)) + require.Empty(s) s = []sortable{1} Sort(s) diff --git a/utils/window/window_test.go b/utils/window/window_test.go index b98bcc04f8c..9e36658850b 100644 --- a/utils/window/window_test.go +++ b/utils/window/window_test.go @@ -105,10 +105,10 @@ func TestTTLAdd(t *testing.T) { // Now the window should look like this: // [] - require.Equal(t, 0, window.Length()) + require.Zero(t, window.Length()) oldest, ok = window.Oldest() - require.Equal(t, 0, oldest) + require.Zero(t, oldest) require.False(t, ok) } @@ -138,7 +138,7 @@ func TestTTLLength(t *testing.T) { clock.Set(epochStart.Add(11 * time.Second)) // No more elements should be present in the window. - require.Equal(t, 0, window.Length()) + require.Zero(t, window.Length()) } // TestTTLReadOnly tests that stale elements are still evicted on calling Oldest @@ -173,9 +173,9 @@ func TestTTLOldest(t *testing.T) { // Now there shouldn't be any elements in the window oldest, ok = window.Oldest() - require.Equal(t, 0, oldest) + require.Zero(t, oldest) require.False(t, ok) - require.Equal(t, 0, window.elements.Len()) + require.Zero(t, window.elements.Len()) } // Tests that we bound the amount of elements in the window diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index fc39a8cb41f..e3087affefd 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -325,7 +325,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { err = s.GetBalance(nil, balanceArgs, balanceReply) require.NoError(t, err) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Equal(t, uint64(0), uint64(balanceReply.Balance)) + require.Zero(t, balanceReply.Balance) require.Len(t, balanceReply.UTXOIDs, 0, "should have returned 0 utxoIDs") // A UTXO with a 1 out of 2 multisig @@ -370,7 +370,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { err = s.GetBalance(nil, balanceArgs, balanceReply) require.NoError(t, err) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Equal(t, uint64(0), uint64(balanceReply.Balance)) + require.Zero(t, balanceReply.Balance) require.Len(t, balanceReply.UTXOIDs, 0, "should have returned 0 utxoIDs") // A UTXO with a 1 out of 1 multisig @@ -417,7 +417,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { err = s.GetBalance(nil, balanceArgs, balanceReply) require.NoError(t, err) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Equal(t, uint64(0), uint64(balanceReply.Balance)) + require.Zero(t, balanceReply.Balance) require.Len(t, balanceReply.UTXOIDs, 0, "should have returned 0 utxoIDs") } diff --git a/vms/secp256k1fx/keychain_test.go b/vms/secp256k1fx/keychain_test.go index 19da3e4224a..44118b9ff0a 100644 --- a/vms/secp256k1fx/keychain_test.go +++ b/vms/secp256k1fx/keychain_test.go @@ -67,7 +67,7 @@ func TestKeychainNew(t *testing.T) { require := require.New(t) kc := NewKeychain() - require.Equal(0, kc.Addresses().Len()) + require.Zero(kc.Addresses().Len()) sk, err := kc.New() require.NoError(err) diff --git a/x/merkledb/cache_test.go b/x/merkledb/cache_test.go index fb35c27afcf..d0c9943c2aa 100644 --- a/x/merkledb/cache_test.go +++ b/x/merkledb/cache_test.go @@ -25,7 +25,7 @@ func TestNewOnEvictCache(t *testing.T) { cache := newOnEvictCache[int](maxSize, onEviction) require.Equal(maxSize, cache.maxSize) require.NotNil(cache.fifo) - require.Equal(0, cache.fifo.Len()) + require.Zero(cache.fifo.Len()) // Can't test function equality directly so do this // to make sure it was assigned correctly err := cache.onEviction(0) @@ -60,7 +60,7 @@ func TestOnEvictCacheNoOnEvictionError(t *testing.T) { // Get key val, ok := cache.Get(0) require.True(ok) - require.Equal(0, val) + require.Zero(val) // Get non-existent key _, ok = cache.Get(1) @@ -81,7 +81,7 @@ func TestOnEvictCacheNoOnEvictionError(t *testing.T) { require.NoError(err) require.Equal(maxSize, cache.fifo.Len()) require.Len(evicted, 1) - require.Equal(0, evicted[0]) + require.Zero(evicted[0]) // Cache has [1,2,3] iter := cache.fifo.NewIterator() @@ -148,10 +148,10 @@ func TestOnEvictCacheNoOnEvictionError(t *testing.T) { require.NoError(err) // Cache should be empty - require.Equal(0, cache.fifo.Len()) + require.Zero(cache.fifo.Len()) require.Len(evicted, 5) require.Equal([]int{0, 1, 2, 3, 4}, evicted) - require.Equal(0, cache.fifo.Len()) + require.Zero(cache.fifo.Len()) require.Equal(maxSize, cache.maxSize) // Should be unchanged } @@ -204,7 +204,7 @@ func TestOnEvictCacheOnEvictionError(t *testing.T) { require.ErrorIs(err, errTest) // Should still be empty. - require.Equal(0, cache.fifo.Len()) + require.Zero(cache.fifo.Len()) require.Equal(evicted, []int{0, 1, 2}) _, ok = cache.Get(0) require.False(ok) diff --git a/x/merkledb/path_test.go b/x/merkledb/path_test.go index 5b68eadff2a..851d6ca01f2 100644 --- a/x/merkledb/path_test.go +++ b/x/merkledb/path_test.go @@ -19,7 +19,7 @@ func Test_SerializedPath_NibbleVal(t *testing.T) { func Test_SerializedPath_AppendNibble(t *testing.T) { path := SerializedPath{Value: []byte{}} - require.Equal(t, 0, path.NibbleLength) + require.Zero(t, path.NibbleLength) path = path.AppendNibble(1) require.Equal(t, 1, path.NibbleLength) diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index 88ca81f26a2..d39b6cc201e 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -703,7 +703,7 @@ func Test_Trie_ChainDeletion(t *testing.T) { root, err = newTrie.getEditableNode(EmptyPath) require.NoError(t, err) // since all values have been deleted, the nodes should have been cleaned up - require.Equal(t, 0, len(root.children)) + require.Empty(t, root.children) } func Test_Trie_Invalidate_Children_On_Edits(t *testing.T) { diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index 1ed6530d760..2d2fb33bac3 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -120,7 +120,7 @@ func Test_Completion(t *testing.T) { err = syncer.Wait(context.Background()) require.NoError(t, err) syncer.workLock.Lock() - require.Equal(t, 0, syncer.unprocessedWork.Len()) + require.Zero(t, syncer.unprocessedWork.Len()) require.Equal(t, 1, syncer.processedWork.Len()) syncer.workLock.Unlock() } @@ -1100,7 +1100,7 @@ func Test_Sync_UpdateSyncTarget(t *testing.T) { <-gotSignalChan require.Equal(newSyncRoot, m.config.TargetRoot) - require.Equal(0, m.processedWork.Len()) + require.Zero(m.processedWork.Len()) require.Equal(1, m.unprocessedWork.Len()) } diff --git a/x/sync/syncworkheap_test.go b/x/sync/syncworkheap_test.go index d20025e93f5..4725a838bba 100644 --- a/x/sync/syncworkheap_test.go +++ b/x/sync/syncworkheap_test.go @@ -16,7 +16,7 @@ func Test_SyncWorkHeap_Heap_Methods(t *testing.T) { require := require.New(t) h := newSyncWorkHeap() - require.Equal(0, h.Len()) + require.Zero(h.Len()) item1 := &heapItem{ workItem: &syncWorkItem{ @@ -30,16 +30,16 @@ func Test_SyncWorkHeap_Heap_Methods(t *testing.T) { require.Equal(1, h.Len()) require.Len(h.priorityHeap, 1) require.Equal(item1, h.priorityHeap[0]) - require.Equal(0, h.priorityHeap[0].heapIndex) + require.Zero(h.priorityHeap[0].heapIndex) require.Equal(1, h.sortedItems.Len()) gotItem, ok := h.sortedItems.Get(item1) require.True(ok) require.Equal(item1, gotItem) h.Pop() - require.Equal(0, h.Len()) + require.Zero(h.Len()) require.Len(h.priorityHeap, 0) - require.Equal(0, h.sortedItems.Len()) + require.Zero(h.sortedItems.Len()) item2 := &heapItem{ workItem: &syncWorkItem{ @@ -55,7 +55,7 @@ func Test_SyncWorkHeap_Heap_Methods(t *testing.T) { require.Len(h.priorityHeap, 2) require.Equal(item1, h.priorityHeap[0]) require.Equal(item2, h.priorityHeap[1]) - require.Equal(0, item1.heapIndex) + require.Zero(item1.heapIndex) require.Equal(1, item2.heapIndex) require.Equal(2, h.sortedItems.Len()) gotItem, ok = h.sortedItems.Get(item1) @@ -71,7 +71,7 @@ func Test_SyncWorkHeap_Heap_Methods(t *testing.T) { require.Equal(item2, h.priorityHeap[0]) require.Equal(item1, h.priorityHeap[1]) require.Equal(1, item1.heapIndex) - require.Equal(0, item2.heapIndex) + require.Zero(item2.heapIndex) require.False(h.Less(0, 1)) @@ -84,9 +84,9 @@ func Test_SyncWorkHeap_Heap_Methods(t *testing.T) { gotItem = h.Pop().(*heapItem) require.Equal(item2, gotItem) - require.Equal(0, h.Len()) + require.Zero(h.Len()) require.Len(h.priorityHeap, 0) - require.Equal(0, h.sortedItems.Len()) + require.Zero(h.sortedItems.Len()) } // Tests Insert and GetWork @@ -137,7 +137,7 @@ func Test_SyncWorkHeap_Insert_GetWork(t *testing.T) { gotItem = h.GetWork() require.Nil(gotItem) - require.Equal(0, h.Len()) + require.Zero(h.Len()) } func Test_SyncWorkHeap_remove(t *testing.T) { @@ -157,9 +157,9 @@ func Test_SyncWorkHeap_remove(t *testing.T) { heapItem1 := h.priorityHeap[0] h.remove(heapItem1) - require.Equal(0, h.Len()) + require.Zero(h.Len()) require.Len(h.priorityHeap, 0) - require.Equal(0, h.sortedItems.Len()) + require.Zero(h.sortedItems.Len()) item2 := &syncWorkItem{ start: []byte{2}, @@ -177,15 +177,15 @@ func Test_SyncWorkHeap_remove(t *testing.T) { require.Equal(1, h.Len()) require.Len(h.priorityHeap, 1) require.Equal(1, h.sortedItems.Len()) - require.Equal(0, h.priorityHeap[0].heapIndex) + require.Zero(h.priorityHeap[0].heapIndex) require.Equal(item1, h.priorityHeap[0].workItem) heapItem1 = h.priorityHeap[0] require.Equal(item1, heapItem1.workItem) h.remove(heapItem1) - require.Equal(0, h.Len()) + require.Zero(h.Len()) require.Len(h.priorityHeap, 0) - require.Equal(0, h.sortedItems.Len()) + require.Zero(h.sortedItems.Len()) } func Test_SyncWorkHeap_Merge_Insert(t *testing.T) { From b065a98567216ce32f72d7371fc1868237cd3618 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 13:07:49 -0400 Subject: [PATCH 02/79] ban usage of `require.Zero` when testing for length `0` --- indexer/indexer_test.go | 20 +++++------ .../throttling/bandwidth_throttler_test.go | 4 +-- .../inbound_msg_buffer_throttler_test.go | 2 +- .../inbound_msg_byte_throttler_test.go | 20 +++++------ .../throttling/outbound_msg_throttler_test.go | 14 ++++---- scripts/lint.sh | 11 +++++- snow/consensus/snowman/poll/set_test.go | 34 +++++++++---------- snow/engine/snowman/block/batched_vm_test.go | 2 +- snow/networking/benchlist/benchlist_test.go | 10 +++--- snow/networking/handler/message_queue_test.go | 10 +++--- utils/bag/bag_test.go | 4 +-- utils/set/set_test.go | 8 ++--- utils/sorting_test.go | 2 +- vms/avm/service_test.go | 8 ++--- vms/components/chain/state_test.go | 4 +-- x/merkledb/cache_test.go | 2 +- x/merkledb/history_test.go | 2 +- x/sync/syncworkheap_test.go | 8 ++--- 18 files changed, 87 insertions(+), 78 deletions(-) diff --git a/indexer/indexer_test.go b/indexer/indexer_test.go index 5a5bb912f01..9bd708863c9 100644 --- a/indexer/indexer_test.go +++ b/indexer/indexer_test.go @@ -77,11 +77,11 @@ func TestNewIndexer(t *testing.T) { require.True(idxr.indexingEnabled) require.True(idxr.allowIncompleteIndex) require.NotNil(idxr.blockIndices) - require.Len(idxr.blockIndices, 0) + require.Empty(idxr.blockIndices) require.NotNil(idxr.txIndices) - require.Len(idxr.txIndices, 0) + require.Empty(idxr.txIndices) require.NotNil(idxr.vtxIndices) - require.Len(idxr.vtxIndices, 0) + require.Empty(idxr.vtxIndices) require.NotNil(idxr.blockAcceptorGroup) require.NotNil(idxr.txAcceptorGroup) require.NotNil(idxr.vertexAcceptorGroup) @@ -178,8 +178,8 @@ func TestIndexer(t *testing.T) { require.Equal("index/chain1", server.bases[0]) require.Equal("/block", server.endpoints[0]) require.Len(idxr.blockIndices, 1) - require.Len(idxr.txIndices, 0) - require.Len(idxr.vtxIndices, 0) + require.Empty(idxr.txIndices) + require.Empty(idxr.vtxIndices) // Accept a container blkID, blkBytes := ids.GenerateTestID(), utils.RandomBytes(32) @@ -236,9 +236,9 @@ func TestIndexer(t *testing.T) { idxr = idxrIntf.(*indexer) now = time.Now() idxr.clock.Set(now) - require.Len(idxr.blockIndices, 0) - require.Len(idxr.txIndices, 0) - require.Len(idxr.vtxIndices, 0) + require.Empty(idxr.blockIndices) + require.Empty(idxr.txIndices) + require.Empty(idxr.vtxIndices) require.True(idxr.hasRunBefore) previouslyIndexed, err = idxr.previouslyIndexed(chain1Ctx.ChainID) require.NoError(err) @@ -445,7 +445,7 @@ func TestIncompleteIndex(t *testing.T) { isIncomplete, err = idxr.isIncomplete(chain1Ctx.ChainID) require.NoError(err) require.True(isIncomplete) - require.Len(idxr.blockIndices, 0) + require.Empty(idxr.blockIndices) // Close and re-open the indexer, this time with indexing enabled require.NoError(config.DB.(*versiondb.Database).Commit()) @@ -523,5 +523,5 @@ func TestIgnoreNonDefaultChains(t *testing.T) { // RegisterChain should return without adding an index for this chain chainVM := mocks.NewMockChainVM(ctrl) idxr.RegisterChain("chain1", chain1Ctx, chainVM) - require.Len(idxr.blockIndices, 0) + require.Empty(idxr.blockIndices) } diff --git a/network/throttling/bandwidth_throttler_test.go b/network/throttling/bandwidth_throttler_test.go index f2a5e094b72..5d51555baa9 100644 --- a/network/throttling/bandwidth_throttler_test.go +++ b/network/throttling/bandwidth_throttler_test.go @@ -31,7 +31,7 @@ func TestBandwidthThrottler(t *testing.T) { require.NotNil(throttler.limiters) require.Equal(config.RefillRate, throttler.RefillRate) require.Equal(config.MaxBurstSize, throttler.MaxBurstSize) - require.Len(throttler.limiters, 0) + require.Empty(throttler.limiters) // Add a node nodeID1 := ids.GenerateTestNodeID() @@ -40,7 +40,7 @@ func TestBandwidthThrottler(t *testing.T) { // Remove the node throttler.RemoveNode(nodeID1) - require.Len(throttler.limiters, 0) + require.Empty(throttler.limiters) // Add the node back throttler.AddNode(nodeID1) diff --git a/network/throttling/inbound_msg_buffer_throttler_test.go b/network/throttling/inbound_msg_buffer_throttler_test.go index e6c0c6c087c..f7cf6d790d9 100644 --- a/network/throttling/inbound_msg_buffer_throttler_test.go +++ b/network/throttling/inbound_msg_buffer_throttler_test.go @@ -64,7 +64,7 @@ func TestMsgBufferThrottler(t *testing.T) { throttler.release(nodeID1) throttler.release(nodeID1) throttler.release(nodeID1) - require.Len(throttler.nodeToNumProcessingMsgs, 0) + require.Empty(throttler.nodeToNumProcessingMsgs) } // Test inboundMsgBufferThrottler when an acquire is cancelled diff --git a/network/throttling/inbound_msg_byte_throttler_test.go b/network/throttling/inbound_msg_byte_throttler_test.go index 37f04214137..60869dc4191 100644 --- a/network/throttling/inbound_msg_byte_throttler_test.go +++ b/network/throttling/inbound_msg_byte_throttler_test.go @@ -139,7 +139,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { throttler.Acquire(context.Background(), 1, vdr1ID) require.Equal(config.AtLargeAllocSize-1, throttler.remainingAtLargeBytes) require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) + require.Empty(throttler.nodeToVdrBytesUsed) require.Len(throttler.nodeToAtLargeBytesUsed, 1) require.Equal(uint64(1), throttler.nodeToAtLargeBytesUsed[vdr1ID]) @@ -147,8 +147,8 @@ func TestInboundMsgByteThrottler(t *testing.T) { throttler.release(&msgMetadata{msgSize: 1}, vdr1ID) require.Equal(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) - require.Len(throttler.nodeToAtLargeBytesUsed, 0) + require.Empty(throttler.nodeToVdrBytesUsed) + require.Empty(throttler.nodeToAtLargeBytesUsed) // Use all the at-large allocation bytes and 1 of the validator allocation bytes // Should return immediately. @@ -170,7 +170,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { require.Equal(config.VdrAllocSize/2, throttler.nodeToVdrBytesUsed[vdr2ID]) require.Len(throttler.nodeToVdrBytesUsed, 2) require.Len(throttler.nodeToAtLargeBytesUsed, 1) - require.Len(throttler.nodeToWaitingMsgID, 0) + require.Empty(throttler.nodeToWaitingMsgID) require.Zero(throttler.waitingToAcquire.Len()) // vdr1 should be able to acquire the rest of the validator allocation @@ -256,14 +256,14 @@ func TestInboundMsgByteThrottler(t *testing.T) { require.Len(throttler.nodeToVdrBytesUsed, 1) require.Zero(throttler.nodeToVdrBytesUsed[vdr1ID]) require.Equal(config.AtLargeAllocSize/2-2, throttler.remainingAtLargeBytes) - require.Len(throttler.nodeToWaitingMsgID, 0) + require.Empty(throttler.nodeToWaitingMsgID) require.Zero(throttler.waitingToAcquire.Len()) // Non-validator should be able to take the rest of the at-large bytes throttler.Acquire(context.Background(), config.AtLargeAllocSize/2-2, nonVdrID) require.Zero(throttler.remainingAtLargeBytes) require.Equal(config.AtLargeAllocSize/2-1, throttler.nodeToAtLargeBytesUsed[nonVdrID]) - require.Len(throttler.nodeToWaitingMsgID, 0) + require.Empty(throttler.nodeToWaitingMsgID) require.Zero(throttler.waitingToAcquire.Len()) // But should block on subsequent Acquires @@ -292,7 +292,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { require.Zero(throttler.nodeToAtLargeBytesUsed[vdr2ID]) require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) + require.Empty(throttler.nodeToVdrBytesUsed) require.Zero(throttler.remainingAtLargeBytes) require.NotContains(throttler.nodeToWaitingMsgID, nonVdrID) require.Zero(throttler.waitingToAcquire.Len()) @@ -300,7 +300,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { // Release all of vdr1's messages throttler.release(&msgMetadata{msgSize: 1}, vdr1ID) throttler.release(&msgMetadata{msgSize: config.AtLargeAllocSize/2 - 1}, vdr1ID) - require.Len(throttler.nodeToVdrBytesUsed, 0) + require.Empty(throttler.nodeToVdrBytesUsed) require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) require.Equal(config.AtLargeAllocSize/2, throttler.remainingAtLargeBytes) require.Zero(throttler.nodeToAtLargeBytesUsed[vdr1ID]) @@ -311,10 +311,10 @@ func TestInboundMsgByteThrottler(t *testing.T) { throttler.release(&msgMetadata{msgSize: 1}, nonVdrID) throttler.release(&msgMetadata{msgSize: 1}, nonVdrID) throttler.release(&msgMetadata{msgSize: config.AtLargeAllocSize/2 - 2}, nonVdrID) - require.Len(throttler.nodeToVdrBytesUsed, 0) + require.Empty(throttler.nodeToVdrBytesUsed) require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) require.Equal(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) - require.Len(throttler.nodeToAtLargeBytesUsed, 0) + require.Empty(throttler.nodeToAtLargeBytesUsed) require.Zero(throttler.nodeToAtLargeBytesUsed[nonVdrID]) require.NotContains(throttler.nodeToWaitingMsgID, nonVdrID) require.Zero(throttler.waitingToAcquire.Len()) diff --git a/network/throttling/outbound_msg_throttler_test.go b/network/throttling/outbound_msg_throttler_test.go index 207041cad1b..e629944204e 100644 --- a/network/throttling/outbound_msg_throttler_test.go +++ b/network/throttling/outbound_msg_throttler_test.go @@ -57,7 +57,7 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { require.True(acquired) require.Equal(config.AtLargeAllocSize-1, throttler.remainingAtLargeBytes) require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) + require.Empty(throttler.nodeToVdrBytesUsed) require.Len(throttler.nodeToAtLargeBytesUsed, 1) require.Equal(uint64(1), throttler.nodeToAtLargeBytesUsed[vdr1ID]) @@ -65,8 +65,8 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { throttlerIntf.Release(msg, vdr1ID) require.Equal(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) - require.Len(throttler.nodeToAtLargeBytesUsed, 0) + require.Empty(throttler.nodeToVdrBytesUsed) + require.Empty(throttler.nodeToAtLargeBytesUsed) // Use all the at-large allocation bytes and 1 of the validator allocation bytes msg = testMsgWithSize(ctrl, config.AtLargeAllocSize+1) @@ -142,13 +142,13 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { throttlerIntf.Release(msg, vdr2ID) require.Zero(throttler.nodeToAtLargeBytesUsed[vdr2ID]) require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) - require.Len(throttler.nodeToVdrBytesUsed, 0) + require.Empty(throttler.nodeToVdrBytesUsed) require.Zero(throttler.remainingAtLargeBytes) // Release all of vdr1's messages msg = testMsgWithSize(ctrl, config.VdrAllocSize/2-1) throttlerIntf.Release(msg, vdr1ID) - require.Len(throttler.nodeToVdrBytesUsed, 0) + require.Empty(throttler.nodeToVdrBytesUsed) require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) require.Equal(config.AtLargeAllocSize/2-1, throttler.remainingAtLargeBytes) require.Zero(throttler.nodeToAtLargeBytesUsed[vdr1ID]) @@ -156,10 +156,10 @@ func TestSybilOutboundMsgThrottler(t *testing.T) { // Release nonVdr's messages msg = testMsgWithSize(ctrl, config.AtLargeAllocSize/2+1) throttlerIntf.Release(msg, nonVdrID) - require.Len(throttler.nodeToVdrBytesUsed, 0) + require.Empty(throttler.nodeToVdrBytesUsed) require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) require.Equal(config.AtLargeAllocSize, throttler.remainingAtLargeBytes) - require.Len(throttler.nodeToAtLargeBytesUsed, 0) + require.Empty(throttler.nodeToAtLargeBytesUsed) require.Zero(throttler.nodeToAtLargeBytesUsed[nonVdrID]) } diff --git a/scripts/lint.sh b/scripts/lint.sh index 47865c37e46..a181c49f045 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -21,7 +21,7 @@ fi # by default, "./scripts/lint.sh" runs all lint tests # to run only "license_header" test # TESTS='license_header' ./scripts/lint.sh -TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_equal_zero"} +TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_equal_zero require_len_zero"} function test_golangci_lint { go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 @@ -66,6 +66,15 @@ function test_require_equal_zero { fi } +function test_require_len_zero { + if grep -R -zo -P 'require\.Len\((t, )?.+?, 0\)' .; then + echo "" + echo "Use require.Empty instead of require.Len when testing for 0 length." + echo "" + return 1 + fi +} + # Ref: https://go.dev/doc/effective_go#blank_implements function test_interface_compliance_nil { if grep -R -o -P '_ .+? = &.+?\{\}' .; then diff --git a/snow/consensus/snowman/poll/set_test.go b/snow/consensus/snowman/poll/set_test.go index 277d6d3da79..75d82355eec 100644 --- a/snow/consensus/snowman/poll/set_test.go +++ b/snow/consensus/snowman/poll/set_test.go @@ -76,17 +76,17 @@ func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { // vote out of order results = s.Vote(1, vdr1, vtx1) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(2, vdr2, vtx2) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(2, vdr3, vtx2) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(2, vdr1, vtx2) // poll 2 finished - require.Len(t, results, 0) // expect 2 to not have finished because 1 is still pending + require.Empty(t, results) // expect 2 to not have finished because 1 is still pending results = s.Vote(1, vdr2, vtx1) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(1, vdr3, vtx1) // poll 1 finished, poll 2 should be finished as well require.Len(t, results, 2) @@ -129,14 +129,14 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { // vote out of order results = s.Vote(1, vdr1, vtx1) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(2, vdr2, vtx2) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(2, vdr3, vtx2) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(1, vdr2, vtx1) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(1, vdr3, vtx1) // poll 1 finished, poll 2 still remaining require.Len(t, results, 1) // because 1 is the oldest @@ -190,25 +190,25 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { // vote out of order // 2 finishes first to create a gap of finished poll between two unfinished polls 1 and 3 results = s.Vote(2, vdr3, vtx2) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(2, vdr2, vtx2) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(2, vdr1, vtx2) - require.Len(t, results, 0) + require.Empty(t, results) // 3 finishes now, 2 has already finished but 1 is not finished so we expect to receive no results still results = s.Vote(3, vdr2, vtx3) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(3, vdr3, vtx3) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(3, vdr1, vtx3) - require.Len(t, results, 0) + require.Empty(t, results) // 1 finishes now, 2 and 3 have already finished so we expect 3 items in results results = s.Vote(1, vdr1, vtx1) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(1, vdr2, vtx1) - require.Len(t, results, 0) + require.Empty(t, results) results = s.Vote(1, vdr3, vtx1) require.Len(t, results, 3) require.Equal(t, vtx1, results[0].List()[0]) diff --git a/snow/engine/snowman/block/batched_vm_test.go b/snow/engine/snowman/block/batched_vm_test.go index be536d03e02..553490b00cb 100644 --- a/snow/engine/snowman/block/batched_vm_test.go +++ b/snow/engine/snowman/block/batched_vm_test.go @@ -27,7 +27,7 @@ func TestGetAncestorsDatabaseNotFound(t *testing.T) { } containers, err := GetAncestors(context.Background(), vm, someID, 10, 10, 1*time.Second) require.NoError(t, err) - require.Len(t, containers, 0) + require.Empty(t, containers) } // TestGetAncestorsPropagatesErrors checks errors other than diff --git a/snow/networking/benchlist/benchlist_test.go b/snow/networking/benchlist/benchlist_test.go index f3f36d738ae..11372f1a501 100644 --- a/snow/networking/benchlist/benchlist_test.go +++ b/snow/networking/benchlist/benchlist_test.go @@ -72,7 +72,7 @@ func TestBenchlistAdd(t *testing.T) { require.False(t, b.isBenched(vdrID2)) require.False(t, b.isBenched(vdrID3)) require.False(t, b.isBenched(vdrID4)) - require.Len(t, b.failureStreaks, 0) + require.Empty(t, b.failureStreaks) require.Equal(t, b.benchedQueue.Len(), 0) require.Equal(t, b.benchlistSet.Len(), 0) b.lock.Unlock() @@ -126,7 +126,7 @@ func TestBenchlistAdd(t *testing.T) { require.Equal(t, vdrID0, next.nodeID) require.True(t, !next.benchedUntil.After(now.Add(duration))) require.True(t, !next.benchedUntil.Before(now.Add(duration/2))) - require.Len(t, b.failureStreaks, 0) + require.Empty(t, b.failureStreaks) require.True(t, benched) benchable.BenchedF = nil b.lock.Unlock() @@ -146,7 +146,7 @@ func TestBenchlistAdd(t *testing.T) { require.False(t, b.isBenched(vdrID1)) require.Equal(t, b.benchedQueue.Len(), 1) require.Equal(t, b.benchlistSet.Len(), 1) - require.Len(t, b.failureStreaks, 0) + require.Empty(t, b.failureStreaks) b.lock.Unlock() // Register another failure for vdr0, who is benched @@ -154,7 +154,7 @@ func TestBenchlistAdd(t *testing.T) { // A failure for an already benched validator should not count against it b.lock.Lock() - require.Len(t, b.failureStreaks, 0) + require.Empty(t, b.failureStreaks) b.lock.Unlock() } @@ -369,7 +369,7 @@ func TestBenchlistRemove(t *testing.T) { require.True(t, b.isBenched(vdrID2)) require.Equal(t, 3, b.benchedQueue.Len()) require.Equal(t, 3, b.benchlistSet.Len()) - require.Len(t, b.failureStreaks, 0) + require.Empty(t, b.failureStreaks) // Ensure the benched queue root has the min end time minEndTime := b.benchedQueue[0].benchedUntil diff --git a/snow/networking/handler/message_queue_test.go b/snow/networking/handler/message_queue_test.go index cfe5d82768a..62f29db0e96 100644 --- a/snow/networking/handler/message_queue_test.go +++ b/snow/networking/handler/message_queue_test.go @@ -60,7 +60,7 @@ func TestQueue(t *testing.T) { require.Equal(1, u.Len()) _, gotMsg1, ok := u.Pop() require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) + require.Empty(u.nodeToUnprocessedMsgs) require.Zero(u.Len()) require.Equal(msg1, gotMsg1) @@ -70,7 +70,7 @@ func TestQueue(t *testing.T) { require.Equal(1, u.Len()) _, gotMsg1, ok = u.Pop() require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) + require.Empty(u.nodeToUnprocessedMsgs) require.Zero(u.Len()) require.Equal(msg1, gotMsg1) @@ -80,7 +80,7 @@ func TestQueue(t *testing.T) { require.Equal(1, u.Len()) _, gotMsg1, ok = u.Pop() require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) + require.Empty(u.nodeToUnprocessedMsgs) require.Zero(u.Len()) require.Equal(msg1, gotMsg1) @@ -90,7 +90,7 @@ func TestQueue(t *testing.T) { require.Equal(1, u.Len()) _, gotMsg1, ok = u.Pop() require.True(ok) - require.Len(u.nodeToUnprocessedMsgs, 0) + require.Empty(u.nodeToUnprocessedMsgs) require.Zero(u.Len()) require.Equal(msg1, gotMsg1) @@ -126,7 +126,7 @@ func TestQueue(t *testing.T) { _, gotMsg1, ok = u.Pop() require.True(ok) require.Equal(msg1, gotMsg1) - require.Len(u.nodeToUnprocessedMsgs, 0) + require.Empty(u.nodeToUnprocessedMsgs) require.Zero(u.Len()) // u is now empty diff --git a/utils/bag/bag_test.go b/utils/bag/bag_test.go index 3ce7f8b1b26..2f28f0f049d 100644 --- a/utils/bag/bag_test.go +++ b/utils/bag/bag_test.go @@ -20,11 +20,11 @@ func TestBagAdd(t *testing.T) { require.Zero(bag.Count(elt0)) require.Zero(bag.Count(elt1)) require.Zero(bag.Len()) - require.Len(bag.List(), 0) + require.Empty(bag.List()) mode, freq := bag.Mode() require.Equal(elt0, mode) require.Zero(freq) - require.Len(bag.Threshold(), 0) + require.Empty(bag.Threshold()) bag.Add(elt0) diff --git a/utils/set/set_test.go b/utils/set/set_test.go index ed516cd33eb..6ecd69ca25e 100644 --- a/utils/set/set_test.go +++ b/utils/set/set_test.go @@ -53,11 +53,11 @@ func TestSetCappedList(t *testing.T) { id := 0 - require.Len(s.CappedList(0), 0) + require.Empty(s.CappedList(0)) s.Add(id) - require.Len(s.CappedList(0), 0) + require.Empty(s.CappedList(0)) require.Len(s.CappedList(1), 1) require.Equal(s.CappedList(1)[0], id) require.Len(s.CappedList(2), 1) @@ -66,7 +66,7 @@ func TestSetCappedList(t *testing.T) { id2 := 1 s.Add(id2) - require.Len(s.CappedList(0), 0) + require.Empty(s.CappedList(0)) require.Len(s.CappedList(1), 1) require.Len(s.CappedList(2), 2) require.Len(s.CappedList(3), 2) @@ -82,7 +82,7 @@ func TestSetClear(t *testing.T) { set.Add(i) } set.Clear() - require.Len(t, set, 0) + require.Empty(t, set) set.Add(1337) require.Len(t, set, 1) } diff --git a/utils/sorting_test.go b/utils/sorting_test.go index 86d4fecf864..8be6f26ff47 100644 --- a/utils/sorting_test.go +++ b/utils/sorting_test.go @@ -115,7 +115,7 @@ func TestSortByHash(t *testing.T) { s := [][]byte{} SortByHash(s) - require.Len(s, 0) + require.Empty(s) s = [][]byte{{1}} SortByHash(s) diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index e3087affefd..5714cf60792 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -519,7 +519,7 @@ func TestServiceGetAllBalances(t *testing.T) { reply = &GetAllBalancesReply{} err = s.GetAllBalances(nil, balanceArgs, reply) require.NoError(t, err) - require.Len(t, reply.Balances, 0) + require.Empty(t, reply.Balances) // A UTXO with a 1 out of 2 multisig // where one of the addresses is [addr] @@ -562,7 +562,7 @@ func TestServiceGetAllBalances(t *testing.T) { err = s.GetAllBalances(nil, balanceArgs, reply) require.NoError(t, err) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Len(t, reply.Balances, 0) + require.Empty(t, reply.Balances) // A UTXO with a 1 out of 1 multisig // but with a locktime in the future @@ -607,7 +607,7 @@ func TestServiceGetAllBalances(t *testing.T) { err = s.GetAllBalances(nil, balanceArgs, reply) require.NoError(t, err) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Len(t, reply.Balances, 0) + require.Empty(t, reply.Balances) // A UTXO for a different asset otherAssetID := ids.GenerateTestID() @@ -654,7 +654,7 @@ func TestServiceGetAllBalances(t *testing.T) { err = s.GetAllBalances(nil, balanceArgs, reply) require.NoError(t, err) // The balance should include the UTXO since it is partly owned by [addr] - require.Len(t, reply.Balances, 0) + require.Empty(t, reply.Balances) } func TestServiceGetTx(t *testing.T) { diff --git a/vms/components/chain/state_test.go b/vms/components/chain/state_test.go index 30a14fa67d6..9fdad9d8f84 100644 --- a/vms/components/chain/state_test.go +++ b/vms/components/chain/state_test.go @@ -385,7 +385,7 @@ func TestBuildBlock(t *testing.T) { if err != nil { t.Fatal(err) } - require.Len(t, chainState.verifiedBlocks, 0) + require.Empty(t, chainState.verifiedBlocks) if err := builtBlk.Verify(context.Background()); err != nil { t.Fatalf("Built block failed verification due to %s", err) @@ -433,7 +433,7 @@ func TestStateDecideBlock(t *testing.T) { t.Fatal("Bad block should have failed verification") } // Ensure a block that fails verification is not marked as processing - require.Len(t, chainState.verifiedBlocks, 0) + require.Empty(t, chainState.verifiedBlocks) // Ensure that an error during block acceptance is propagated correctly badBlk, err = chainState.ParseBlock(context.Background(), badAcceptBlk.Bytes()) diff --git a/x/merkledb/cache_test.go b/x/merkledb/cache_test.go index d0c9943c2aa..ba66268cce0 100644 --- a/x/merkledb/cache_test.go +++ b/x/merkledb/cache_test.go @@ -72,7 +72,7 @@ func TestOnEvictCacheNoOnEvictionError(t *testing.T) { require.NoError(err) require.Equal(i+1, cache.fifo.Len()) } - require.Len(evicted, 0) + require.Empty(evicted) // Cache has [0,1,2] diff --git a/x/merkledb/history_test.go b/x/merkledb/history_test.go index 0151cde1774..438be08903b 100644 --- a/x/merkledb/history_test.go +++ b/x/merkledb/history_test.go @@ -242,7 +242,7 @@ func Test_History_Bad_GetValueChanges_Input(t *testing.T) { // same start/end roots should yield an empty changelist changes, err := db.history.getValueChanges(endRoot, endRoot, nil, nil, 10) require.NoError(err) - require.Len(changes.values, 0) + require.Empty(changes.values) } func Test_History_Trigger_History_Queue_Looping(t *testing.T) { diff --git a/x/sync/syncworkheap_test.go b/x/sync/syncworkheap_test.go index 4725a838bba..03c0fbb9e30 100644 --- a/x/sync/syncworkheap_test.go +++ b/x/sync/syncworkheap_test.go @@ -38,7 +38,7 @@ func Test_SyncWorkHeap_Heap_Methods(t *testing.T) { h.Pop() require.Zero(h.Len()) - require.Len(h.priorityHeap, 0) + require.Empty(h.priorityHeap) require.Zero(h.sortedItems.Len()) item2 := &heapItem{ @@ -85,7 +85,7 @@ func Test_SyncWorkHeap_Heap_Methods(t *testing.T) { require.Equal(item2, gotItem) require.Zero(h.Len()) - require.Len(h.priorityHeap, 0) + require.Empty(h.priorityHeap) require.Zero(h.sortedItems.Len()) } @@ -158,7 +158,7 @@ func Test_SyncWorkHeap_remove(t *testing.T) { h.remove(heapItem1) require.Zero(h.Len()) - require.Len(h.priorityHeap, 0) + require.Empty(h.priorityHeap) require.Zero(h.sortedItems.Len()) item2 := &syncWorkItem{ @@ -184,7 +184,7 @@ func Test_SyncWorkHeap_remove(t *testing.T) { require.Equal(item1, heapItem1.workItem) h.remove(heapItem1) require.Zero(h.Len()) - require.Len(h.priorityHeap, 0) + require.Empty(h.priorityHeap) require.Zero(h.sortedItems.Len()) } From fd714b1f1830d4c2f68ac486ae4da8eb6a2d8f45 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 13:14:55 -0400 Subject: [PATCH 03/79] add failing test --- scripts/lint.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index a181c49f045..b81a6761411 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -21,7 +21,7 @@ fi # by default, "./scripts/lint.sh" runs all lint tests # to run only "license_header" test # TESTS='license_header' ./scripts/lint.sh -TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_equal_zero require_len_zero"} +TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_equal_zero require_len_zero require_equal_len"} function test_golangci_lint { go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 @@ -75,6 +75,15 @@ function test_require_len_zero { fi } +function test_require_equal_len { + if grep -R -o -P 'require\.Equal\((t, )?\d+, len\(' .; then + echo "" + echo "Use require.Len instead of require.Equal when testing for length." + echo "" + return 1 + fi +} + # Ref: https://go.dev/doc/effective_go#blank_implements function test_interface_compliance_nil { if grep -R -o -P '_ .+? = &.+?\{\}' .; then From d4d925bcdc1dccfe4bc2d8801a7e1699cf2f21a9 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 13:17:02 -0400 Subject: [PATCH 04/79] fix --- codec/test_codec.go | 2 +- snow/engine/common/queue/jobs_test.go | 2 +- utils/buffer/unbounded_deque_test.go | 38 +++++++++++++-------------- vms/platformvm/service_test.go | 2 +- x/merkledb/history_test.go | 2 +- x/merkledb/node_test.go | 2 +- x/merkledb/trie_test.go | 12 ++++----- 7 files changed, 30 insertions(+), 30 deletions(-) diff --git a/codec/test_codec.go b/codec/test_codec.go index e82593e1e3f..40e91b7df5a 100644 --- a/codec/test_codec.go +++ b/codec/test_codec.go @@ -690,7 +690,7 @@ func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { version, err := manager.Unmarshal(expected, &unmarshaled) require.NoError(err) require.Zero(version) - require.Equal(1000, len(unmarshaled.Arr)) + require.Len(unmarshaled.Arr, 1000) } func TestSliceWithEmptySerializationOutOfMemory(codec GeneralCodec, t testing.TB) { diff --git a/snow/engine/common/queue/jobs_test.go b/snow/engine/common/queue/jobs_test.go index d89d01640dc..8a5205af698 100644 --- a/snow/engine/common/queue/jobs_test.go +++ b/snow/engine/common/queue/jobs_test.go @@ -411,7 +411,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { } missingIDs := jobs.MissingIDs() - require.Equal(1, len(missingIDs)) + require.Len(missingIDs, 1) require.Equal(missingIDs[0], job0.ID()) diff --git a/utils/buffer/unbounded_deque_test.go b/utils/buffer/unbounded_deque_test.go index fa0d7761a39..22396e4504a 100644 --- a/utils/buffer/unbounded_deque_test.go +++ b/utils/buffer/unbounded_deque_test.go @@ -236,7 +236,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { require.IsType(&unboundedSliceDeque[int]{}, bIntf) b := bIntf.(*unboundedSliceDeque[int]) require.Zero(bIntf.Len()) - require.Equal(2, len(b.data)) + require.Len(b.data, 2) require.Zero(b.left) require.Equal(1, b.right) require.Empty(b.List()) @@ -251,7 +251,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { b.PushLeft(1) // slice is [1,EMPTY] require.Equal(1, b.Len()) - require.Equal(2, len(b.data)) + require.Len(b.data, 2) require.Equal(1, b.left) require.Equal(1, b.right) require.Equal([]int{1}, b.List()) @@ -267,7 +267,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { // This causes a resize b.PushLeft(2) // slice is [2,1,EMPTY,EMPTY] require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(2, b.right) require.Equal([]int{2, 1}, b.List()) @@ -289,7 +289,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { // Tests left moving left with no wrap around. b.PushLeft(3) // slice is [2,1,EMPTY,3] require.Equal(3, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(2, b.left) require.Equal(2, b.right) require.Equal([]int{3, 2, 1}, b.List()) @@ -318,7 +318,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { require.True(ok) require.Equal(3, got) require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(2, b.right) require.Equal([]int{2, 1}, b.List()) @@ -342,7 +342,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { require.True(ok) require.Equal(2, got) require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Zero(b.left) require.Equal(2, b.right) require.Equal([]int{1}, b.List()) @@ -361,7 +361,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { // Test left wrapping around to the right side. b.PushLeft(2) // slice is [2,1,EMPTY,EMPTY] require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(2, b.right) require.Equal([]int{2, 1}, b.List()) @@ -384,7 +384,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { require.True(ok) require.Equal(2, got) require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Zero(b.left) require.Equal(2, b.right) require.Equal([]int{1}, b.List()) @@ -396,7 +396,7 @@ func TestUnboundedSliceDequePushLeftPopLeft(t *testing.T) { require.True(ok) require.Equal(1, got) require.Zero(b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(1, b.left) require.Equal(2, b.right) require.Empty(b.List()) @@ -419,7 +419,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { require.IsType(&unboundedSliceDeque[int]{}, bIntf) b := bIntf.(*unboundedSliceDeque[int]) require.Zero(bIntf.Len()) - require.Equal(2, len(b.data)) + require.Len(b.data, 2) require.Zero(b.left) require.Equal(1, b.right) require.Empty(b.List()) @@ -434,7 +434,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { b.PushRight(1) // slice is [1,EMPTY] require.Equal(1, b.Len()) - require.Equal(2, len(b.data)) + require.Len(b.data, 2) require.Zero(b.left) require.Zero(b.right) require.Equal([]int{1}, b.List()) @@ -453,7 +453,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { // This causes a resize b.PushRight(2) // slice is [1,2,EMPTY,EMPTY] require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(2, b.right) require.Equal([]int{1, 2}, b.List()) @@ -475,7 +475,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { // Tests right moving right with no wrap around b.PushRight(3) // slice is [1,2,3,EMPTY] require.Equal(3, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(3, b.right) require.Equal([]int{1, 2, 3}, b.List()) @@ -502,7 +502,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { require.True(ok) require.Equal(3, got) require.Equal(2, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(2, b.right) require.Equal([]int{1, 2}, b.List()) @@ -527,7 +527,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { require.True(ok) require.Equal(2, got) require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Equal(1, b.right) require.Equal([]int{1}, b.List()) @@ -549,7 +549,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { require.True(ok) require.Equal(1, got) require.Zero(b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(3, b.left) require.Zero(b.right) require.Empty(b.List()) @@ -566,7 +566,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { b.PushLeft(1) // slice is [EMPTY,EMPTY,EMPTY,1] require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(2, b.left) require.Zero(b.right) require.Equal([]int{1}, b.List()) @@ -587,7 +587,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { require.True(ok) require.Equal(1, got) require.Zero(b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(2, b.left) require.Equal(3, b.right) require.Empty(b.List()) @@ -604,7 +604,7 @@ func TestUnboundedSliceDequePushRightPopRight(t *testing.T) { // Tests right wrapping around to the left b.PushRight(2) // slice is [EMPTY,EMPTY,EMPTY,2] require.Equal(1, b.Len()) - require.Equal(4, len(b.data)) + require.Len(b.data, 4) require.Equal(2, b.left) require.Zero(b.right) require.Equal([]int{2}, b.List()) diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 8195a7ac3c9..c38c044c7e0 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -676,7 +676,7 @@ func TestGetCurrentValidators(t *testing.T) { require.Equal(vdr.NodeID, innerVdr.NodeID) require.NotNil(innerVdr.Delegators) - require.Equal(1, len(*innerVdr.Delegators)) + require.Len(*innerVdr.Delegators, 1) delegator := (*innerVdr.Delegators)[0] require.Equal(delegator.NodeID, innerVdr.NodeID) require.Equal(uint64(delegator.StartTime), delegatorStartTime) diff --git a/x/merkledb/history_test.go b/x/merkledb/history_test.go index 438be08903b..704dae9bb3c 100644 --- a/x/merkledb/history_test.go +++ b/x/merkledb/history_test.go @@ -692,7 +692,7 @@ func Test_Change_List(t *testing.T) { changes, err := db.history.getValueChanges(startRoot, endRoot, nil, nil, 8) require.NoError(err) - require.Equal(8, len(changes.values)) + require.Len(changes.values, 8) } func TestHistoryRecord(t *testing.T) { diff --git a/x/merkledb/node_test.go b/x/merkledb/node_test.go index 7c7c2578eb7..a4282df12f9 100644 --- a/x/merkledb/node_test.go +++ b/x/merkledb/node_test.go @@ -27,7 +27,7 @@ func Test_Node_Marshal(t *testing.T) { require.NoError(t, err) rootParsed, err := parseNode(newPath([]byte("")), data) require.NoError(t, err) - require.Equal(t, 1, len(rootParsed.children)) + require.Len(t, rootParsed.children, 1) rootIndex := root.getSingleChildPath()[len(root.key)] parsedIndex := rootParsed.getSingleChildPath()[len(rootParsed.key)] diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index d39b6cc201e..53f69f33d09 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -688,7 +688,7 @@ func Test_Trie_ChainDeletion(t *testing.T) { require.NoError(t, err) root, err := newTrie.getEditableNode(EmptyPath) require.NoError(t, err) - require.Equal(t, 1, len(root.children)) + require.Len(t, root.children, 1) err = newTrie.Remove(context.Background(), []byte("k")) require.NoError(t, err) @@ -782,15 +782,15 @@ func Test_Trie_NodeCollapse(t *testing.T) { require.NoError(t, err) root, err := trie.getEditableNode(EmptyPath) require.NoError(t, err) - require.Equal(t, 1, len(root.children)) + require.Len(t, root.children, 1) root, err = trie.getEditableNode(EmptyPath) require.NoError(t, err) - require.Equal(t, 1, len(root.children)) + require.Len(t, root.children, 1) firstNode, err := trie.getEditableNode(root.getSingleChildPath()) require.NoError(t, err) - require.Equal(t, 1, len(firstNode.children)) + require.Len(t, firstNode.children, 1) // delete the middle values err = trie.Remove(context.Background(), []byte("k")) @@ -805,11 +805,11 @@ func Test_Trie_NodeCollapse(t *testing.T) { root, err = trie.getEditableNode(EmptyPath) require.NoError(t, err) - require.Equal(t, 1, len(root.children)) + require.Len(t, root.children, 1) firstNode, err = trie.getEditableNode(root.getSingleChildPath()) require.NoError(t, err) - require.Equal(t, 2, len(firstNode.children)) + require.Len(t, firstNode.children, 2) } func Test_Trie_MultipleStates(t *testing.T) { From e961d37d17dadd0ff28fdbf7e18c10b449d2e7f8 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 13:19:23 -0400 Subject: [PATCH 05/79] nit --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index a181c49f045..c330b8e234c 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -67,7 +67,7 @@ function test_require_equal_zero { } function test_require_len_zero { - if grep -R -zo -P 'require\.Len\((t, )?.+?, 0\)' .; then + if grep -R -o -P 'require\.Len\((t, )?.+?, 0\)' .; then echo "" echo "Use require.Empty instead of require.Len when testing for 0 length." echo "" From cfd7ca055437ea544540bd5678092099f101108b Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 13:33:57 -0400 Subject: [PATCH 06/79] push --- codec/test_codec.go | 40 +++++++++++----------- database/manager/manager_test.go | 2 +- scripts/lint.sh | 7 ++++ utils/buffer/unbounded_deque_test.go | 2 +- utils/sampler/rand_test.go | 2 +- utils/set/bits_test.go | 2 +- utils/set/set_test.go | 2 +- utils/window/window_test.go | 2 +- vms/platformvm/service_test.go | 9 ++--- vms/platformvm/txs/executor/import_test.go | 2 +- vms/platformvm/vm_test.go | 4 +-- vms/platformvm/warp/validator_test.go | 2 +- x/merkledb/proof_test.go | 2 +- 13 files changed, 43 insertions(+), 35 deletions(-) diff --git a/codec/test_codec.go b/codec/test_codec.go index 40e91b7df5a..fe2bab11860 100644 --- a/codec/test_codec.go +++ b/codec/test_codec.go @@ -143,7 +143,7 @@ func TestStruct(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myStructInstance) require.NoError(err) - require.Equal(len(myStructBytes), bytesLen) + require.Len(myStructBytes, bytesLen) myStructUnmarshaled := &myStruct{} version, err := manager.Unmarshal(myStructBytes, myStructUnmarshaled) @@ -175,7 +175,7 @@ func TestUInt32(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, number) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var numberUnmarshaled uint32 version, err := manager.Unmarshal(bytes, &numberUnmarshaled) @@ -210,7 +210,7 @@ func TestSlice(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var sliceUnmarshaled []bool version, err := manager.Unmarshal(bytes, &sliceUnmarshaled) @@ -235,7 +235,7 @@ func TestMaxSizeSlice(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var sliceUnmarshaled []string version, err := manager.Unmarshal(bytes, &sliceUnmarshaled) @@ -258,7 +258,7 @@ func TestBool(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myBool) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var boolUnmarshaled bool version, err := manager.Unmarshal(bytes, &boolUnmarshaled) @@ -281,7 +281,7 @@ func TestArray(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myArr) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var myArrUnmarshaled [5]uint64 version, err := manager.Unmarshal(bytes, &myArrUnmarshaled) @@ -304,7 +304,7 @@ func TestBigArray(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myArr) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var myArrUnmarshaled [30000]uint64 version, err := manager.Unmarshal(bytes, &myArrUnmarshaled) @@ -327,7 +327,7 @@ func TestPointerToStruct(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myPtr) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var myPtrUnmarshaled *MyInnerStruct version, err := manager.Unmarshal(bytes, &myPtrUnmarshaled) @@ -364,7 +364,7 @@ func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var mySliceUnmarshaled []MyInnerStruct3 version, err := manager.Unmarshal(bytes, &mySliceUnmarshaled) @@ -390,7 +390,7 @@ func TestInterface(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, &f) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var unmarshaledFoo Foo version, err := manager.Unmarshal(bytes, &unmarshaledFoo) @@ -423,7 +423,7 @@ func TestSliceOfInterface(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var mySliceUnmarshaled []Foo version, err := manager.Unmarshal(bytes, &mySliceUnmarshaled) @@ -456,7 +456,7 @@ func TestArrayOfInterface(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myArray) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var myArrayUnmarshaled [2]Foo version, err := manager.Unmarshal(bytes, &myArrayUnmarshaled) @@ -484,7 +484,7 @@ func TestPointerToInterface(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, &myPtr) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var myPtrUnmarshaled *Foo version, err := manager.Unmarshal(bytes, &myPtrUnmarshaled) @@ -507,7 +507,7 @@ func TestString(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myString) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var stringUnmarshaled string version, err := manager.Unmarshal(bytes, &stringUnmarshaled) @@ -534,7 +534,7 @@ func TestNilSlice(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myStruct) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) var structUnmarshaled structWithSlice version, err := manager.Unmarshal(bytes, &structUnmarshaled) @@ -591,7 +591,7 @@ func TestSerializeOfNoSerializeField(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myS) require.NoError(err) - require.Equal(len(marshalled), bytesLen) + require.Len(marshalled, bytesLen) unmarshalled := s{} version, err := manager.Unmarshal(marshalled, &unmarshalled) @@ -622,7 +622,7 @@ func TestNilSliceSerialization(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, val) require.NoError(err) - require.Equal(len(result), bytesLen) + require.Len(result, bytesLen) valUnmarshaled := &simpleSliceStruct{} version, err := manager.Unmarshal(result, &valUnmarshaled) @@ -651,7 +651,7 @@ func TestEmptySliceSerialization(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, val) require.NoError(err) - require.Equal(len(result), bytesLen) + require.Len(result, bytesLen) valUnmarshaled := &simpleSliceStruct{} version, err := manager.Unmarshal(result, &valUnmarshaled) @@ -684,7 +684,7 @@ func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, val) require.NoError(err) - require.Equal(len(result), bytesLen) + require.Len(result, bytesLen) unmarshaled := nestedSliceStruct{} version, err := manager.Unmarshal(expected, &unmarshaled) @@ -751,7 +751,7 @@ func TestNegativeNumbers(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myS) require.NoError(err) - require.Equal(len(bytes), bytesLen) + require.Len(bytes, bytesLen) mySUnmarshaled := s{} version, err := manager.Unmarshal(bytes, &mySUnmarshaled) diff --git a/database/manager/manager_test.go b/database/manager/manager_test.go index 7784835ed5e..d4ca0c45e42 100644 --- a/database/manager/manager_test.go +++ b/database/manager/manager_test.go @@ -181,7 +181,7 @@ func TestNewSortsDatabases(t *testing.T) { require.Zero(cmp, "incorrect version on previous database") dbs := manager.GetDatabases() - require.Equal(len(vers), len(dbs)) + require.Len(dbs, len(vers)) for i, db := range dbs { cmp = db.Version.Compare(vers[i]) diff --git a/scripts/lint.sh b/scripts/lint.sh index 28727d90f92..28700aa1887 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -82,6 +82,13 @@ function test_require_equal_len { echo "" return 1 fi + + if grep -R -o -P 'require\.Equal\((t, )?len\(' .; then + echo "" + echo "Use require.Len instead of require.Equal when testing for length." + echo "" + return 1 + fi } # Ref: https://go.dev/doc/effective_go#blank_implements diff --git a/utils/buffer/unbounded_deque_test.go b/utils/buffer/unbounded_deque_test.go index 22396e4504a..06b35b338b7 100644 --- a/utils/buffer/unbounded_deque_test.go +++ b/utils/buffer/unbounded_deque_test.go @@ -648,7 +648,7 @@ func FuzzUnboundedSliceDeque(f *testing.F) { } list := b.List() - require.Equal(len(input), len(list)) + require.Len(input, len(list)) for i, n := range input { require.Equal(n, list[i]) } diff --git a/utils/sampler/rand_test.go b/utils/sampler/rand_test.go index 7fc5b446144..acfdb673495 100644 --- a/utils/sampler/rand_test.go +++ b/utils/sampler/rand_test.go @@ -205,6 +205,6 @@ func FuzzRNG(f *testing.F) { mathRNG := rand.New(stdSource) //#nosec G404 stdVal := mathRNG.Int63n(int64(max + 1)) require.Equal(val, uint64(stdVal)) - require.Equal(len(source.nums), len(stdSource.nums)) + require.Len(source.nums, len(stdSource.nums)) }) } diff --git a/utils/set/bits_test.go b/utils/set/bits_test.go index 5541395a5aa..0fa47cc41fb 100644 --- a/utils/set/bits_test.go +++ b/utils/set/bits_test.go @@ -499,7 +499,7 @@ func Test_Bits_Bytes(t *testing.T) { bytes := b.Bytes() fromBytes := BitsFromBytes(bytes) - require.Equal(len(tt.elts), fromBytes.Len()) + require.Len(tt.elts, fromBytes.Len()) for _, elt := range tt.elts { require.True(fromBytes.Contains(elt)) } diff --git a/utils/set/set_test.go b/utils/set/set_test.go index 6ecd69ca25e..52eca368d2f 100644 --- a/utils/set/set_test.go +++ b/utils/set/set_test.go @@ -26,7 +26,7 @@ func TestSet(t *testing.T) { s.Add(id1) require.True(s.Contains(id1)) require.Len(s.List(), 1) - require.Equal(len(s.List()), 1) + require.Len(s.List(), 1) require.Equal(id1, s.List()[0]) s.Clear() diff --git a/utils/window/window_test.go b/utils/window/window_test.go index 9e36658850b..b3809ccb119 100644 --- a/utils/window/window_test.go +++ b/utils/window/window_test.go @@ -55,7 +55,7 @@ func TestAdd(t *testing.T) { window.Add(test.newlyAdded) - require.Equal(t, len(test.window)+1, window.Length()) + require.Len(t, test.window, window.Length()-1) oldest, ok := window.Oldest() require.Equal(t, test.expectedOldest, oldest) require.True(t, ok) diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index c38c044c7e0..da17a619028 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -468,7 +468,8 @@ func TestGetStake(t *testing.T) { } response := GetStakeReply{} require.NoError(service.GetStake(nil, &args, &response)) - require.Equal(len(genesis.Validators)*int(defaultWeight), int(response.Staked)) + totalWeight := len(genesis.Validators) * int(defaultWeight) + require.Equal(totalWeight, int(response.Staked)) require.Len(response.Outputs, len(genesis.Validators)) for _, outputStr := range response.Outputs { @@ -600,7 +601,7 @@ func TestGetCurrentValidators(t *testing.T) { err := service.GetCurrentValidators(nil, &args, &response) require.NoError(err) - require.Equal(len(genesis.Validators), len(response.Validators)) + require.Len(response.Validators, len(genesis.Validators)) for _, vdr := range genesis.Validators { found := false @@ -650,7 +651,7 @@ func TestGetCurrentValidators(t *testing.T) { args = GetCurrentValidatorsArgs{SubnetID: constants.PrimaryNetworkID} err = service.GetCurrentValidators(nil, &args, &response) require.NoError(err) - require.Equal(len(genesis.Validators), len(response.Validators)) + require.Len(response.Validators, len(genesis.Validators)) // Make sure the delegator is there found := false @@ -696,7 +697,7 @@ func TestGetCurrentValidators(t *testing.T) { // Call getValidators response = GetCurrentValidatorsReply{} require.NoError(service.GetCurrentValidators(nil, &args, &response)) - require.Equal(len(genesis.Validators), len(response.Validators)) + require.Len(response.Validators, len(genesis.Validators)) for i := 0; i < len(response.Validators); i++ { vdr := response.Validators[i].(pchainapi.PermissionlessValidator) diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index 9c618d80bd6..8705dbec43a 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -168,7 +168,7 @@ func TestNewImportTx(t *testing.T) { unsignedTx := tx.Unsigned.(*txs.ImportTx) require.NotEmpty(unsignedTx.ImportedInputs) - require.Equal(len(tx.Creds), len(unsignedTx.Ins)+len(unsignedTx.ImportedInputs), "should have the same number of credentials as inputs") + require.Len(tx.Creds, len(unsignedTx.Ins)+len(unsignedTx.ImportedInputs), "should have the same number of credentials as inputs") totalIn := uint64(0) for _, in := range unsignedTx.Ins { diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index fcf7b45a8c8..0f3261fa89b 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -549,7 +549,7 @@ func TestGenesis(t *testing.T) { require.True(ok) currentValidators := vdrSet.List() - require.Equal(len(currentValidators), len(genesisState.Validators)) + require.Len(currentValidators, len(genesisState.Validators)) for _, key := range keys { nodeID := ids.NodeID(key.PublicKey().Address()) @@ -2776,7 +2776,7 @@ func TestVM_GetValidatorSet(t *testing.T) { if tt.expectedErr != nil { return } - require.Equal(len(tt.expectedVdrSet), len(gotVdrSet)) + require.Len(gotVdrSet, len(tt.expectedVdrSet)) for nodeID, vdr := range tt.expectedVdrSet { otherVdr, ok := gotVdrSet[nodeID] require.True(ok) diff --git a/vms/platformvm/warp/validator_test.go b/vms/platformvm/warp/validator_test.go index eeec82bffed..1631f50d312 100644 --- a/vms/platformvm/warp/validator_test.go +++ b/vms/platformvm/warp/validator_test.go @@ -147,7 +147,7 @@ func TestGetCanonicalValidatorSet(t *testing.T) { require.Equal(tt.expectedWeight, weight) // These are pointers so have to test equality like this - require.Equal(len(tt.expectedVdrs), len(vdrs)) + require.Len(vdrs, len(tt.expectedVdrs)) for i, expectedVdr := range tt.expectedVdrs { gotVdr := vdrs[i] expectedPKBytes := bls.PublicKeyToBytes(expectedVdr.PublicKey) diff --git a/x/merkledb/proof_test.go b/x/merkledb/proof_test.go index 02910f49c74..d845e4f0a34 100644 --- a/x/merkledb/proof_test.go +++ b/x/merkledb/proof_test.go @@ -122,7 +122,7 @@ func Test_Proof_Marshal_Errors(t *testing.T) { } func verifyPath(t *testing.T, path1, path2 []ProofNode) { - require.Equal(t, len(path1), len(path2)) + require.Len(t, path1, len(path2)) for i := range path1 { require.True(t, bytes.Equal(path1[i].KeyPath.Value, path2[i].KeyPath.Value)) require.Equal(t, path1[i].KeyPath.hasOddLength(), path2[i].KeyPath.hasOddLength()) From 946588af6df14154af3527c3147fdf4b87831a5e Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 13:42:52 -0400 Subject: [PATCH 07/79] ban usage of `nil` in require functions --- api/keystore/service_test.go | 2 +- scripts/lint.sh | 11 +++++- snow/engine/snowman/syncer/utils_test.go | 2 +- vms/avm/txs/mempool/mempool_test.go | 2 +- vms/platformvm/blocks/builder/network_test.go | 4 +-- .../txs/executor/create_subnet_test.go | 35 ++++++++++--------- vms/platformvm/txs/mempool/mempool_test.go | 6 ++-- 7 files changed, 36 insertions(+), 26 deletions(-) diff --git a/api/keystore/service_test.go b/api/keystore/service_test.go index 3747db8f88f..d26445878e1 100644 --- a/api/keystore/service_test.go +++ b/api/keystore/service_test.go @@ -251,7 +251,7 @@ func TestServiceExportImport(t *testing.T) { User: exportReply.User, Encoding: encoding, }, &api.EmptyReply{}) - require.ErrorIs(err, nil) + require.NoError(err) } { diff --git a/scripts/lint.sh b/scripts/lint.sh index 28700aa1887..d0475ade269 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -21,7 +21,7 @@ fi # by default, "./scripts/lint.sh" runs all lint tests # to run only "license_header" test # TESTS='license_header' ./scripts/lint.sh -TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_equal_zero require_len_zero require_equal_len"} +TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_equal_zero require_len_zero require_equal_len require_nil"} function test_golangci_lint { go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 @@ -91,6 +91,15 @@ function test_require_equal_len { fi } +function test_require_nil { + if grep -R -zo -P 'require.+?nil\)\n' .; then + echo "" + echo "Use a require function to test for 'nil'" + echo "" + return 1 + fi +} + # Ref: https://go.dev/doc/effective_go#blank_implements function test_interface_compliance_nil { if grep -R -o -P '_ .+? = &.+?\{\}' .; then diff --git a/snow/engine/snowman/syncer/utils_test.go b/snow/engine/snowman/syncer/utils_test.go index 0150a876d97..a8d24297927 100644 --- a/snow/engine/snowman/syncer/utils_test.go +++ b/snow/engine/snowman/syncer/utils_test.go @@ -91,7 +91,7 @@ func buildTestsObjects(t *testing.T, commonCfg *common.Config) ( }) require.IsType(t, &stateSyncer{}, commonSyncer) syncer := commonSyncer.(*stateSyncer) - require.True(t, syncer.stateSyncVM != nil) + require.NotNil(t, syncer.stateSyncVM) fullVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { return nil, database.ErrNotFound diff --git a/vms/avm/txs/mempool/mempool_test.go b/vms/avm/txs/mempool/mempool_test.go index d02abc48c36..0f633b75e87 100644 --- a/vms/avm/txs/mempool/mempool_test.go +++ b/vms/avm/txs/mempool/mempool_test.go @@ -83,7 +83,7 @@ func TestTxsInMempool(t *testing.T) { require.True(mempool.Has(txID)) retrieved := mempool.Get(txID) - require.True(retrieved != nil) + require.NotNil(retrieved) require.Equal(tx, retrieved) // tx exists in mempool diff --git a/vms/platformvm/blocks/builder/network_test.go b/vms/platformvm/blocks/builder/network_test.go index d1cdb5dc266..51777fdd824 100644 --- a/vms/platformvm/blocks/builder/network_test.go +++ b/vms/platformvm/blocks/builder/network_test.go @@ -67,7 +67,7 @@ func TestMempoolValidGossipedTxIsAddedToMempool(t *testing.T) { env.ctx.Lock.Lock() // and gossiped if it has just been discovered - require.True(gossipedBytes != nil) + require.NotNil(gossipedBytes) // show gossiped bytes can be decoded to the original tx replyIntf, err := message.Parse(gossipedBytes) @@ -129,7 +129,7 @@ func TestMempoolNewLocaTxIsGossiped(t *testing.T) { err := env.Builder.AddUnverifiedTx(tx) require.NoError(err) - require.True(gossipedBytes != nil) + require.NotNil(gossipedBytes) // show gossiped bytes can be decoded to the original tx replyIntf, err := message.Parse(gossipedBytes) diff --git a/vms/platformvm/txs/executor/create_subnet_test.go b/vms/platformvm/txs/executor/create_subnet_test.go index 2446d8415b4..f348d7624b3 100644 --- a/vms/platformvm/txs/executor/create_subnet_test.go +++ b/vms/platformvm/txs/executor/create_subnet_test.go @@ -14,34 +14,35 @@ import ( "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestCreateSubnetTxAP3FeeChange(t *testing.T) { ap3Time := defaultGenesisTime.Add(time.Hour) tests := []struct { - name string - time time.Time - fee uint64 - expectsError bool + name string + time time.Time + fee uint64 + expectedErr error }{ { - name: "pre-fork - correctly priced", - time: defaultGenesisTime, - fee: 0, - expectsError: false, + name: "pre-fork - correctly priced", + time: defaultGenesisTime, + fee: 0, + expectedErr: nil, }, { - name: "post-fork - incorrectly priced", - time: ap3Time, - fee: 100*defaultTxFee - 1*units.NanoAvax, - expectsError: true, + name: "post-fork - incorrectly priced", + time: ap3Time, + fee: 100*defaultTxFee - 1*units.NanoAvax, + expectedErr: utxo.ErrInsufficientUnlockedFunds, }, { - name: "post-fork - correctly priced", - time: ap3Time, - fee: 100 * defaultTxFee, - expectsError: false, + name: "post-fork - correctly priced", + time: ap3Time, + fee: 100 * defaultTxFee, + expectedErr: nil, }, } for _, test := range tests { @@ -82,7 +83,7 @@ func TestCreateSubnetTxAP3FeeChange(t *testing.T) { Tx: tx, } err = tx.Unsigned.Visit(&executor) - require.Equal(test.expectsError, err != nil) + require.ErrorIs(err, test.expectedErr) }) } } diff --git a/vms/platformvm/txs/mempool/mempool_test.go b/vms/platformvm/txs/mempool/mempool_test.go index ba25fac2ae8..d191fd02453 100644 --- a/vms/platformvm/txs/mempool/mempool_test.go +++ b/vms/platformvm/txs/mempool/mempool_test.go @@ -79,7 +79,7 @@ func TestDecisionTxsInMempool(t *testing.T) { require.True(mpool.Has(tx.ID())) retrieved := mpool.Get(tx.ID()) - require.True(retrieved != nil) + require.NotNil(retrieved) require.Equal(tx, retrieved) // we can peek it @@ -134,13 +134,13 @@ func TestProposalTxsInMempool(t *testing.T) { require.True(mpool.Has(tx.ID())) retrieved := mpool.Get(tx.ID()) - require.True(retrieved != nil) + require.NotNil(retrieved) require.Equal(tx, retrieved) { // we can peek it peeked := mpool.PeekStakerTx() - require.True(peeked != nil) + require.NotNil(peeked) require.Equal(tx, peeked) } From 5fcf0f0cbd1ead2afe3a3c1c6ce22827f2270979 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 13:48:31 -0400 Subject: [PATCH 08/79] nit --- scripts/lint.sh | 2 +- utils/sampler/weighted_without_replacement_test.go | 4 ++-- vms/avm/service_test.go | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index c330b8e234c..334343ef268 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -67,7 +67,7 @@ function test_require_equal_zero { } function test_require_len_zero { - if grep -R -o -P 'require\.Len\((t, )?.+?, 0\)' .; then + if grep -R -o -P 'require\.Len\((t, )?.+?, 0,' .; then echo "" echo "Use require.Empty instead of require.Len when testing for 0 length." echo "" diff --git a/utils/sampler/weighted_without_replacement_test.go b/utils/sampler/weighted_without_replacement_test.go index c62f5621525..a73cac19071 100644 --- a/utils/sampler/weighted_without_replacement_test.go +++ b/utils/sampler/weighted_without_replacement_test.go @@ -111,7 +111,7 @@ func WeightedWithoutReplacementEmptyWithoutWeightTest( indices, err := s.Sample(0) require.NoError(t, err) - require.Len(t, indices, 0, "shouldn't have selected any elements") + require.Empty(t, indices, "shouldn't have selected any elements") } func WeightedWithoutReplacementEmptyTest( @@ -123,7 +123,7 @@ func WeightedWithoutReplacementEmptyTest( indices, err := s.Sample(0) require.NoError(t, err) - require.Len(t, indices, 0, "shouldn't have selected any elements") + require.Empty(t, indices, "shouldn't have selected any elements") } func WeightedWithoutReplacementSingletonTest( diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 5714cf60792..58e719d5ef3 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -326,7 +326,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { require.NoError(t, err) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(t, balanceReply.Balance) - require.Len(t, balanceReply.UTXOIDs, 0, "should have returned 0 utxoIDs") + require.Empty(t, balanceReply.UTXOIDs, "should have returned 0 utxoIDs") // A UTXO with a 1 out of 2 multisig // where one of the addresses is [addr] @@ -371,7 +371,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { require.NoError(t, err) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(t, balanceReply.Balance) - require.Len(t, balanceReply.UTXOIDs, 0, "should have returned 0 utxoIDs") + require.Empty(t, balanceReply.UTXOIDs, "should have returned 0 utxoIDs") // A UTXO with a 1 out of 1 multisig // but with a locktime in the future @@ -418,7 +418,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { require.NoError(t, err) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(t, balanceReply.Balance) - require.Len(t, balanceReply.UTXOIDs, 0, "should have returned 0 utxoIDs") + require.Empty(t, balanceReply.UTXOIDs, "should have returned 0 utxoIDs") } func TestServiceGetTxs(t *testing.T) { From 10dff9561e0d979502f08ed65c68cdb3d06e4852 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 13:50:15 -0400 Subject: [PATCH 09/79] fix regex --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 334343ef268..45deb891492 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -67,7 +67,7 @@ function test_require_equal_zero { } function test_require_len_zero { - if grep -R -o -P 'require\.Len\((t, )?.+?, 0,' .; then + if grep -R -o -P 'require\.Len\((t, )?.+?, 0(,|\))' .; then echo "" echo "Use require.Empty instead of require.Len when testing for 0 length." echo "" From 9fdc7c56a15d9158324916898a498f6e629bd17c Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 14:03:11 -0400 Subject: [PATCH 10/79] nit --- scripts/lint.sh | 7 +++++++ snow/networking/benchlist/benchlist_test.go | 12 ++++++------ 2 files changed, 13 insertions(+), 6 deletions(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 47865c37e46..7ebf782e4df 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -64,6 +64,13 @@ function test_require_equal_zero { echo "" return 1 fi + + if grep -R -zo -P 'require\.Equal\(.+?, (u?int\d+\(0\)|0)\)\n' .; then + echo "" + echo "Use require.Zero instead of require.Equal when testing for 0." + echo "" + return 1 + fi } # Ref: https://go.dev/doc/effective_go#blank_implements diff --git a/snow/networking/benchlist/benchlist_test.go b/snow/networking/benchlist/benchlist_test.go index f3f36d738ae..3ebb375cbdc 100644 --- a/snow/networking/benchlist/benchlist_test.go +++ b/snow/networking/benchlist/benchlist_test.go @@ -73,8 +73,8 @@ func TestBenchlistAdd(t *testing.T) { require.False(t, b.isBenched(vdrID3)) require.False(t, b.isBenched(vdrID4)) require.Len(t, b.failureStreaks, 0) - require.Equal(t, b.benchedQueue.Len(), 0) - require.Equal(t, b.benchlistSet.Len(), 0) + require.Zero(t, b.benchedQueue.Len()) + require.Zero(t, b.benchlistSet.Len()) b.lock.Unlock() // Register [threshold - 1] failures in a row for vdr0 @@ -84,8 +84,8 @@ func TestBenchlistAdd(t *testing.T) { // Still shouldn't be benched due to not enough consecutive failure require.False(t, b.isBenched(vdrID0)) - require.Equal(t, b.benchedQueue.Len(), 0) - require.Equal(t, b.benchlistSet.Len(), 0) + require.Zero(t, b.benchedQueue.Len()) + require.Zero(t, b.benchlistSet.Len()) require.Len(t, b.failureStreaks, 1) fs := b.failureStreaks[vdrID0] require.Equal(t, threshold-1, fs.consecutive) @@ -98,8 +98,8 @@ func TestBenchlistAdd(t *testing.T) { // has passed since the first failure b.lock.Lock() require.False(t, b.isBenched(vdrID0)) - require.Equal(t, b.benchedQueue.Len(), 0) - require.Equal(t, b.benchlistSet.Len(), 0) + require.Zero(t, b.benchedQueue.Len()) + require.Zero(t, b.benchlistSet.Len()) b.lock.Unlock() // Move the time up From 421b72d79b196df3318fadf1c4bf9788005c9063 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 14:44:03 -0400 Subject: [PATCH 11/79] add comment --- scripts/lint.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/lint.sh b/scripts/lint.sh index 7ebf782e4df..4362cad88f3 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -58,6 +58,7 @@ function test_require_error_is_no_funcs_as_params { } function test_require_equal_zero { + # check if the first arg, other than t, is 0 if grep -R -o -P 'require\.Equal\((t, )?(u?int\d+\(0\)|0)' .; then echo "" echo "Use require.Zero instead of require.Equal when testing for 0." @@ -65,6 +66,7 @@ function test_require_equal_zero { return 1 fi + # check if the last arg is 0 if grep -R -zo -P 'require\.Equal\(.+?, (u?int\d+\(0\)|0)\)\n' .; then echo "" echo "Use require.Zero instead of require.Equal when testing for 0." From 0c919fd81ddd5f80d09ab6a0bc90006d0d3b13fb Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 14:58:27 -0400 Subject: [PATCH 12/79] match uint(0) --- scripts/lint.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 4362cad88f3..55a5df29f64 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -59,7 +59,7 @@ function test_require_error_is_no_funcs_as_params { function test_require_equal_zero { # check if the first arg, other than t, is 0 - if grep -R -o -P 'require\.Equal\((t, )?(u?int\d+\(0\)|0)' .; then + if grep -R -o -P 'require\.Equal\((t, )?(u?int\d*\(0\)|0)' .; then echo "" echo "Use require.Zero instead of require.Equal when testing for 0." echo "" @@ -67,7 +67,7 @@ function test_require_equal_zero { fi # check if the last arg is 0 - if grep -R -zo -P 'require\.Equal\(.+?, (u?int\d+\(0\)|0)\)\n' .; then + if grep -R -zo -P 'require\.Equal\(.+?, (u?int\d*\(0\)|0)\)\n' .; then echo "" echo "Use require.Zero instead of require.Equal when testing for 0." echo "" From f8c4359c1e61f1755bdfb3d09e40c252da467b9b Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 16:39:28 -0400 Subject: [PATCH 13/79] Update scripts/lint.sh Co-authored-by: Stephen Buttolph Signed-off-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index f896931bbe2..80e7ff1e7b5 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -74,7 +74,7 @@ function test_require_equal_zero { } function test_require_len_zero { - if grep -R -o -P 'require\.Len\((t, )?.+?, 0(,|\))' .; then + if grep -R -o -P 'require\.Len\((t, )?.+, 0(,|\))' .; then echo "" echo "Use require.Empty instead of require.Len when testing for 0 length." echo "" From 7e40d3894396256746deae567e9bf05fdf65ba17 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 16:42:59 -0400 Subject: [PATCH 14/79] nit --- utils/set/set_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/utils/set/set_test.go b/utils/set/set_test.go index 52eca368d2f..a0ae3eb9de1 100644 --- a/utils/set/set_test.go +++ b/utils/set/set_test.go @@ -26,7 +26,6 @@ func TestSet(t *testing.T) { s.Add(id1) require.True(s.Contains(id1)) require.Len(s.List(), 1) - require.Len(s.List(), 1) require.Equal(id1, s.List()[0]) s.Clear() From fb10a1a1fb7cfdf3dd7abe1ca7adf8281798d177 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 16:57:40 -0400 Subject: [PATCH 15/79] pr review --- scripts/lint.sh | 9 +-------- utils/buffer/unbounded_deque_test.go | 2 +- utils/sampler/rand_test.go | 2 +- utils/window/window_test.go | 2 +- vms/platformvm/service_test.go | 8 ++++---- vms/platformvm/txs/executor/import_test.go | 3 ++- vms/platformvm/vm_test.go | 2 +- x/merkledb/proof_test.go | 2 +- 8 files changed, 12 insertions(+), 18 deletions(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index fae196ccfee..27e0b148625 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -85,14 +85,7 @@ function test_require_len_zero { } function test_require_equal_len { - if grep -R -o -P 'require\.Equal\((t, )?\d+, len\(' .; then - echo "" - echo "Use require.Len instead of require.Equal when testing for length." - echo "" - return 1 - fi - - if grep -R -o -P 'require\.Equal\((t, )?len\(' .; then + if grep -R -o -P 'require\.Equal\((t, )?.*, len\(' .; then echo "" echo "Use require.Len instead of require.Equal when testing for length." echo "" diff --git a/utils/buffer/unbounded_deque_test.go b/utils/buffer/unbounded_deque_test.go index 06b35b338b7..5b759da1c0e 100644 --- a/utils/buffer/unbounded_deque_test.go +++ b/utils/buffer/unbounded_deque_test.go @@ -648,7 +648,7 @@ func FuzzUnboundedSliceDeque(f *testing.F) { } list := b.List() - require.Len(input, len(list)) + require.Len(list, len(input)) for i, n := range input { require.Equal(n, list[i]) } diff --git a/utils/sampler/rand_test.go b/utils/sampler/rand_test.go index acfdb673495..b2ef3dfb0f6 100644 --- a/utils/sampler/rand_test.go +++ b/utils/sampler/rand_test.go @@ -205,6 +205,6 @@ func FuzzRNG(f *testing.F) { mathRNG := rand.New(stdSource) //#nosec G404 stdVal := mathRNG.Int63n(int64(max + 1)) require.Equal(val, uint64(stdVal)) - require.Len(source.nums, len(stdSource.nums)) + require.Len(stdSource.nums, len(source.nums)) }) } diff --git a/utils/window/window_test.go b/utils/window/window_test.go index b3809ccb119..24cb57d478e 100644 --- a/utils/window/window_test.go +++ b/utils/window/window_test.go @@ -55,7 +55,7 @@ func TestAdd(t *testing.T) { window.Add(test.newlyAdded) - require.Len(t, test.window, window.Length()-1) + require.Equal(t, window.Length(), len(test.window)+1) oldest, ok := window.Oldest() require.Equal(t, test.expectedOldest, oldest) require.True(t, ok) diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index da17a619028..3df928a523b 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -699,12 +699,12 @@ func TestGetCurrentValidators(t *testing.T) { require.NoError(service.GetCurrentValidators(nil, &args, &response)) require.Len(response.Validators, len(genesis.Validators)) - for i := 0; i < len(response.Validators); i++ { - vdr := response.Validators[i].(pchainapi.PermissionlessValidator) - if vdr.NodeID != validatorNodeID { + for _, vdr := range response.Validators { + castVdr := vdr.(pchainapi.PermissionlessValidator) + if castVdr.NodeID != validatorNodeID { continue } - require.Equal(uint64(100000), uint64(*vdr.AccruedDelegateeReward)) + require.Equal(uint64(100000), uint64(*castVdr.AccruedDelegateeReward)) } } diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index 8705dbec43a..7a17dbd7b04 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -168,7 +168,8 @@ func TestNewImportTx(t *testing.T) { unsignedTx := tx.Unsigned.(*txs.ImportTx) require.NotEmpty(unsignedTx.ImportedInputs) - require.Len(tx.Creds, len(unsignedTx.Ins)+len(unsignedTx.ImportedInputs), "should have the same number of credentials as inputs") + numInputs := len(unsignedTx.Ins) + len(unsignedTx.ImportedInputs) + require.Equal(len(tx.Creds), numInputs, "should have the same number of credentials as inputs") totalIn := uint64(0) for _, in := range unsignedTx.Ins { diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 0f3261fa89b..fca28a74d58 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -549,7 +549,7 @@ func TestGenesis(t *testing.T) { require.True(ok) currentValidators := vdrSet.List() - require.Len(currentValidators, len(genesisState.Validators)) + require.Len(genesisState.Validators, len(currentValidators)) for _, key := range keys { nodeID := ids.NodeID(key.PublicKey().Address()) diff --git a/x/merkledb/proof_test.go b/x/merkledb/proof_test.go index d845e4f0a34..87014af854a 100644 --- a/x/merkledb/proof_test.go +++ b/x/merkledb/proof_test.go @@ -122,7 +122,7 @@ func Test_Proof_Marshal_Errors(t *testing.T) { } func verifyPath(t *testing.T, path1, path2 []ProofNode) { - require.Len(t, path1, len(path2)) + require.Len(t, path2, len(path1)) for i := range path1 { require.True(t, bytes.Equal(path1[i].KeyPath.Value, path2[i].KeyPath.Value)) require.Equal(t, path1[i].KeyPath.hasOddLength(), path2[i].KeyPath.hasOddLength()) From 7aadaf43b4eef1856179d8fa6dd78ba34f3d23c8 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 17:03:51 -0400 Subject: [PATCH 16/79] revert --- codec/test_codec.go | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/codec/test_codec.go b/codec/test_codec.go index fe2bab11860..e82593e1e3f 100644 --- a/codec/test_codec.go +++ b/codec/test_codec.go @@ -143,7 +143,7 @@ func TestStruct(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myStructInstance) require.NoError(err) - require.Len(myStructBytes, bytesLen) + require.Equal(len(myStructBytes), bytesLen) myStructUnmarshaled := &myStruct{} version, err := manager.Unmarshal(myStructBytes, myStructUnmarshaled) @@ -175,7 +175,7 @@ func TestUInt32(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, number) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var numberUnmarshaled uint32 version, err := manager.Unmarshal(bytes, &numberUnmarshaled) @@ -210,7 +210,7 @@ func TestSlice(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var sliceUnmarshaled []bool version, err := manager.Unmarshal(bytes, &sliceUnmarshaled) @@ -235,7 +235,7 @@ func TestMaxSizeSlice(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var sliceUnmarshaled []string version, err := manager.Unmarshal(bytes, &sliceUnmarshaled) @@ -258,7 +258,7 @@ func TestBool(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myBool) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var boolUnmarshaled bool version, err := manager.Unmarshal(bytes, &boolUnmarshaled) @@ -281,7 +281,7 @@ func TestArray(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myArr) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var myArrUnmarshaled [5]uint64 version, err := manager.Unmarshal(bytes, &myArrUnmarshaled) @@ -304,7 +304,7 @@ func TestBigArray(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myArr) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var myArrUnmarshaled [30000]uint64 version, err := manager.Unmarshal(bytes, &myArrUnmarshaled) @@ -327,7 +327,7 @@ func TestPointerToStruct(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myPtr) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var myPtrUnmarshaled *MyInnerStruct version, err := manager.Unmarshal(bytes, &myPtrUnmarshaled) @@ -364,7 +364,7 @@ func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var mySliceUnmarshaled []MyInnerStruct3 version, err := manager.Unmarshal(bytes, &mySliceUnmarshaled) @@ -390,7 +390,7 @@ func TestInterface(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, &f) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var unmarshaledFoo Foo version, err := manager.Unmarshal(bytes, &unmarshaledFoo) @@ -423,7 +423,7 @@ func TestSliceOfInterface(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, mySlice) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var mySliceUnmarshaled []Foo version, err := manager.Unmarshal(bytes, &mySliceUnmarshaled) @@ -456,7 +456,7 @@ func TestArrayOfInterface(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myArray) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var myArrayUnmarshaled [2]Foo version, err := manager.Unmarshal(bytes, &myArrayUnmarshaled) @@ -484,7 +484,7 @@ func TestPointerToInterface(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, &myPtr) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var myPtrUnmarshaled *Foo version, err := manager.Unmarshal(bytes, &myPtrUnmarshaled) @@ -507,7 +507,7 @@ func TestString(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myString) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var stringUnmarshaled string version, err := manager.Unmarshal(bytes, &stringUnmarshaled) @@ -534,7 +534,7 @@ func TestNilSlice(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myStruct) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) var structUnmarshaled structWithSlice version, err := manager.Unmarshal(bytes, &structUnmarshaled) @@ -591,7 +591,7 @@ func TestSerializeOfNoSerializeField(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myS) require.NoError(err) - require.Len(marshalled, bytesLen) + require.Equal(len(marshalled), bytesLen) unmarshalled := s{} version, err := manager.Unmarshal(marshalled, &unmarshalled) @@ -622,7 +622,7 @@ func TestNilSliceSerialization(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, val) require.NoError(err) - require.Len(result, bytesLen) + require.Equal(len(result), bytesLen) valUnmarshaled := &simpleSliceStruct{} version, err := manager.Unmarshal(result, &valUnmarshaled) @@ -651,7 +651,7 @@ func TestEmptySliceSerialization(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, val) require.NoError(err) - require.Len(result, bytesLen) + require.Equal(len(result), bytesLen) valUnmarshaled := &simpleSliceStruct{} version, err := manager.Unmarshal(result, &valUnmarshaled) @@ -684,13 +684,13 @@ func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, val) require.NoError(err) - require.Len(result, bytesLen) + require.Equal(len(result), bytesLen) unmarshaled := nestedSliceStruct{} version, err := manager.Unmarshal(expected, &unmarshaled) require.NoError(err) require.Zero(version) - require.Len(unmarshaled.Arr, 1000) + require.Equal(1000, len(unmarshaled.Arr)) } func TestSliceWithEmptySerializationOutOfMemory(codec GeneralCodec, t testing.TB) { @@ -751,7 +751,7 @@ func TestNegativeNumbers(codec GeneralCodec, t testing.TB) { bytesLen, err := manager.Size(0, myS) require.NoError(err) - require.Len(bytes, bytesLen) + require.Equal(len(bytes), bytesLen) mySUnmarshaled := s{} version, err := manager.Unmarshal(bytes, &mySUnmarshaled) From af4382d4d2fdd01c2bdab8d0cf60b8f63cd2a0b1 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 17:04:38 -0400 Subject: [PATCH 17/79] nit --- utils/window/window_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/window/window_test.go b/utils/window/window_test.go index 24cb57d478e..9e36658850b 100644 --- a/utils/window/window_test.go +++ b/utils/window/window_test.go @@ -55,7 +55,7 @@ func TestAdd(t *testing.T) { window.Add(test.newlyAdded) - require.Equal(t, window.Length(), len(test.window)+1) + require.Equal(t, len(test.window)+1, window.Length()) oldest, ok := window.Oldest() require.Equal(t, test.expectedOldest, oldest) require.True(t, ok) From 5301a7dec8ac1c0fbad8c67da02dbcb97e755747 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 17:07:18 -0400 Subject: [PATCH 18/79] nits --- codec/test_codec.go | 2 +- utils/window/window_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/codec/test_codec.go b/codec/test_codec.go index e82593e1e3f..40e91b7df5a 100644 --- a/codec/test_codec.go +++ b/codec/test_codec.go @@ -690,7 +690,7 @@ func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { version, err := manager.Unmarshal(expected, &unmarshaled) require.NoError(err) require.Zero(version) - require.Equal(1000, len(unmarshaled.Arr)) + require.Len(unmarshaled.Arr, 1000) } func TestSliceWithEmptySerializationOutOfMemory(codec GeneralCodec, t testing.TB) { diff --git a/utils/window/window_test.go b/utils/window/window_test.go index 9e36658850b..24cb57d478e 100644 --- a/utils/window/window_test.go +++ b/utils/window/window_test.go @@ -55,7 +55,7 @@ func TestAdd(t *testing.T) { window.Add(test.newlyAdded) - require.Equal(t, len(test.window)+1, window.Length()) + require.Equal(t, window.Length(), len(test.window)+1) oldest, ok := window.Oldest() require.Equal(t, test.expectedOldest, oldest) require.True(t, ok) From e249d2b6fda68502a2c021703a8d28b8e98c2d59 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 17:14:33 -0400 Subject: [PATCH 19/79] reduce diff --- vms/platformvm/service_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 3df928a523b..124b159ea29 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -468,8 +468,7 @@ func TestGetStake(t *testing.T) { } response := GetStakeReply{} require.NoError(service.GetStake(nil, &args, &response)) - totalWeight := len(genesis.Validators) * int(defaultWeight) - require.Equal(totalWeight, int(response.Staked)) + require.Equal(len(genesis.Validators)*int(defaultWeight), int(response.Staked)) require.Len(response.Outputs, len(genesis.Validators)) for _, outputStr := range response.Outputs { From be8230553099fdc671590f1cff51d24a9f9a085e Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 17:16:02 -0400 Subject: [PATCH 20/79] reduce diff --- utils/set/bits_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/set/bits_test.go b/utils/set/bits_test.go index 0fa47cc41fb..5541395a5aa 100644 --- a/utils/set/bits_test.go +++ b/utils/set/bits_test.go @@ -499,7 +499,7 @@ func Test_Bits_Bytes(t *testing.T) { bytes := b.Bytes() fromBytes := BitsFromBytes(bytes) - require.Len(tt.elts, fromBytes.Len()) + require.Equal(len(tt.elts), fromBytes.Len()) for _, elt := range tt.elts { require.True(fromBytes.Contains(elt)) } From 8aa012d3a5e8636fbd915dc380f52d752551c1fe Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 17:18:30 -0400 Subject: [PATCH 21/79] reduce diff --- utils/window/window_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utils/window/window_test.go b/utils/window/window_test.go index 24cb57d478e..9e36658850b 100644 --- a/utils/window/window_test.go +++ b/utils/window/window_test.go @@ -55,7 +55,7 @@ func TestAdd(t *testing.T) { window.Add(test.newlyAdded) - require.Equal(t, window.Length(), len(test.window)+1) + require.Equal(t, len(test.window)+1, window.Length()) oldest, ok := window.Oldest() require.Equal(t, test.expectedOldest, oldest) require.True(t, ok) From 24fb051d349e781023c08a9e31f2e3c1ce29b947 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 17:31:15 -0400 Subject: [PATCH 22/79] nit --- database/test_database.go | 3 ++- scripts/lint.sh | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/database/test_database.go b/database/test_database.go index ef99302e1e2..1fcaeb49da6 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -921,7 +921,8 @@ func TestCompactNoPanic(t *testing.T, db Database) { require.NoError(db.Compact(nil, nil)) require.NoError(db.Close()) - require.Equal(ErrClosed, db.Compact(nil, nil)) + err := db.Compact(nil, nil) + require.ErrorIs(err, ErrClosed) } // TestClear tests to make sure the deletion helper works as expected. diff --git a/scripts/lint.sh b/scripts/lint.sh index a52ce11625c..aa3a276df33 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -99,7 +99,7 @@ function test_require_equal_len { } function test_require_nil { - if grep -R -zo -P 'require.+?nil\)\n' .; then + if grep -R -o -P 'require\..+?(!|=)= nil' .; then echo "" echo "Use a require function to test for 'nil'" echo "" From dfeb84be7aca8675c9cfeab759299799e76e97b0 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 17:36:23 -0400 Subject: [PATCH 23/79] more nil checks --- scripts/lint.sh | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index d5253effb8d..9f125a6ffd1 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -94,9 +94,23 @@ function test_require_equal_len { } function test_require_nil { - if grep -R -o -P 'require\..+?(!|=)= nil' .; then + if grep -R -o -P 'require\..+?!= nil' .; then echo "" - echo "Use a require function to test for 'nil'" + echo "Use require.NotNil for nil inequality." + echo "" + return 1 + fi + + if grep -R -o -P 'require\..+?== nil' .; then + echo "" + echo "Use require.Nil to test for nil equality." + echo "" + return 1 + fi + + if grep -R -zo -P 'require\.ErrorIs.+?nil\)\n' .; then + echo "" + echo "Use require.NoError instead of require.ErrorIs nil." echo "" return 1 fi From 7901762dbfa21d3501122da0f8fb79a06b7ab100 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 17:37:02 -0400 Subject: [PATCH 24/79] wording --- scripts/lint.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 9f125a6ffd1..e55607a4823 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -96,21 +96,21 @@ function test_require_equal_len { function test_require_nil { if grep -R -o -P 'require\..+?!= nil' .; then echo "" - echo "Use require.NotNil for nil inequality." + echo "Use require.NotNil when testing for nil inequality." echo "" return 1 fi if grep -R -o -P 'require\..+?== nil' .; then echo "" - echo "Use require.Nil to test for nil equality." + echo "Use require.Nil when testing for nil equality." echo "" return 1 fi if grep -R -zo -P 'require\.ErrorIs.+?nil\)\n' .; then echo "" - echo "Use require.NoError instead of require.ErrorIs nil." + echo "Use require.NoError instead of require.ErrorIs when testing for nil error." echo "" return 1 fi From 2f1ba2d7dcd8d4e9c1b9a0fc47c4f1722458b0e3 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Fri, 12 May 2023 18:19:31 -0400 Subject: [PATCH 25/79] regex nit --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 27e0b148625..ac9c1e4c365 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -85,7 +85,7 @@ function test_require_len_zero { } function test_require_equal_len { - if grep -R -o -P 'require\.Equal\((t, )?.*, len\(' .; then + if grep -R -o -P 'require\.Equal\((t, )?.*, len\([^,]*$' .; then echo "" echo "Use require.Len instead of require.Equal when testing for length." echo "" From 4680df511f73aa3f6fbf33b50bc57fa7d90ae066 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 15:54:16 -0400 Subject: [PATCH 26/79] add linter --- scripts/lint.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index f294ac188a5..16b02973e52 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -21,7 +21,7 @@ fi # by default, "./scripts/lint.sh" runs all lint tests # to run only "license_header" test # TESTS='license_header' ./scripts/lint.sh -TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_equal_zero require_len_zero require_equal_len require_nil"} +TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_equal_zero require_len_zero require_equal_len require_nil require_no_error_inline_func"} function test_golangci_lint { go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 @@ -116,6 +116,15 @@ function test_require_nil { fi } +function test_require_no_error_inline_func { + if grep -R -zo -P '\t+err :?=.*\n\t+require\.NoError\(err\)' .; then + echo "" + echo "Checking that a function with a single error return doesn't error should be done in-line." + echo "" + return 1 + fi +} + # Ref: https://go.dev/doc/effective_go#blank_implements function test_interface_compliance_nil { if grep -R -o -P '_ .+? = &.+?\{\}' .; then From e9677711a86d60d70b9ab39e608f24a8bd226721 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 16:12:23 -0400 Subject: [PATCH 27/79] fix --- api/auth/auth_test.go | 3 +- .../gsharedmemory/shared_memory_test.go | 7 +- chains/atomic/test_shared_memory.go | 109 ++---- codec/test_codec.go | 115 ++---- database/linkeddb/linkeddb_test.go | 117 ++---- database/manager/manager_test.go | 24 +- ids/aliases_test.go | 7 +- ids/test_aliases.go | 39 +- indexer/index_test.go | 15 +- network/network_test.go | 32 +- network/peer/peer_test.go | 24 +- .../throttling/inbound_conn_throttler_test.go | 5 +- pubsub/filter_test.go | 3 +- snow/consensus/snowman/consensus_test.go | 21 +- snow/engine/common/queue/jobs_test.go | 26 +- snow/engine/common/tracker/peers_test.go | 6 +- snow/engine/snowman/transitive_test.go | 37 +- snow/networking/router/chain_router_test.go | 3 +- snow/networking/sender/sender_test.go | 9 +- snow/uptime/manager_test.go | 114 ++---- snow/validators/manager_test.go | 24 +- snow/validators/set_test.go | 131 +++---- staking/tls_test.go | 3 +- utils/beacon/set_test.go | 17 +- utils/crypto/secp256k1/secp256k1_test.go | 3 +- utils/profiler/profiler_test.go | 14 +- utils/sampler/weighted_heap_test.go | 3 +- utils/set/set_test.go | 21 +- vms/avm/blocks/builder/builder_test.go | 3 +- vms/avm/blocks/executor/block_test.go | 3 +- vms/avm/blocks/executor/manager_test.go | 6 +- vms/avm/network/network_test.go | 3 +- vms/avm/pubsub_filterer_test.go | 3 +- vms/avm/service_test.go | 25 +- vms/avm/states/state_test.go | 9 +- vms/avm/txs/mempool/mempool_test.go | 3 +- vms/components/avax/utxo_fetching_test.go | 12 +- vms/components/avax/utxo_state_test.go | 12 +- vms/components/keystore/user_test.go | 9 +- vms/components/message/handler_test.go | 3 +- vms/platformvm/blocks/builder/builder_test.go | 3 +- vms/platformvm/blocks/builder/network_test.go | 9 +- .../blocks/executor/acceptor_test.go | 15 +- .../blocks/executor/rejector_test.go | 3 +- .../blocks/executor/verifier_test.go | 30 +- vms/platformvm/health_test.go | 12 +- vms/platformvm/service_test.go | 78 ++-- .../state/validator_metadata_test.go | 18 +- .../status/blockchain_status_test.go | 6 +- vms/platformvm/status/status_test.go | 6 +- .../txs/executor/advance_time_test.go | 24 +- .../txs/executor/create_chain_test.go | 6 +- vms/platformvm/txs/executor/export_test.go | 3 +- vms/platformvm/txs/executor/import_test.go | 3 +- .../txs/executor/proposal_tx_executor_test.go | 21 +- .../txs/executor/reward_validator_test.go | 9 +- .../txs/executor/standard_tx_executor_test.go | 18 +- vms/platformvm/txs/txheap/by_end_time_test.go | 9 +- .../txs/txheap/by_start_time_test.go | 9 +- vms/platformvm/vm_regression_test.go | 99 ++--- vms/platformvm/vm_test.go | 90 ++--- vms/proposervm/block/build_test.go | 6 +- vms/proposervm/tree/tree_test.go | 9 +- vms/proposervm/vm_test.go | 65 +-- vms/rpcchainvm/batched_vm_test.go | 3 +- vms/rpcchainvm/ghttp/http_test.go | 5 +- vms/rpcchainvm/grpcutils/client_test.go | 3 +- vms/rpcchainvm/vm_test.go | 3 +- vms/rpcchainvm/with_context_vm_test.go | 6 +- x/merkledb/cache_test.go | 21 +- x/merkledb/codec_test.go | 21 +- x/merkledb/db_test.go | 45 +-- x/merkledb/history_test.go | 369 ++++++------------ x/merkledb/trie_test.go | 87 ++--- x/sync/client_test.go | 18 +- x/sync/sync_test.go | 24 +- 76 files changed, 735 insertions(+), 1444 deletions(-) diff --git a/api/auth/auth_test.go b/api/auth/auth_test.go index 0094f2bec5b..e691b11d868 100644 --- a/api/auth/auth_test.go +++ b/api/auth/auth_test.go @@ -144,8 +144,7 @@ func TestRevokeToken(t *testing.T) { tokenStr, err := auth.NewToken(testPassword, defaultTokenLifespan, endpoints) require.NoError(err) - err = auth.RevokeToken(tokenStr, testPassword) - require.NoError(err) + require.NoError(auth.RevokeToken(tokenStr, testPassword)) require.Len(auth.revoked, 1) } diff --git a/chains/atomic/gsharedmemory/shared_memory_test.go b/chains/atomic/gsharedmemory/shared_memory_test.go index 715e0e43e0d..f8ae05ad620 100644 --- a/chains/atomic/gsharedmemory/shared_memory_test.go +++ b/chains/atomic/gsharedmemory/shared_memory_test.go @@ -37,11 +37,8 @@ func TestInterface(t *testing.T) { test(t, chainID0, chainID1, sm0, sm1, testDB) - err := conn0.Close() - require.NoError(err) - - err = conn1.Close() - require.NoError(err) + require.NoError(conn0.Close()) + require.NoError(conn1.Close()) } } diff --git a/chains/atomic/test_shared_memory.go b/chains/atomic/test_shared_memory.go index 7d3c9ded1a7..d89940c31c2 100644 --- a/chains/atomic/test_shared_memory.go +++ b/chains/atomic/test_shared_memory.go @@ -31,12 +31,10 @@ var SharedMemoryTests = []func(t *testing.T, chainID0, chainID1 ids.ID, sm0, sm1 func TestSharedMemoryPutAndGet(t *testing.T, chainID0, chainID1 ids.ID, sm0, sm1 SharedMemory, _ database.Database) { require := require.New(t) - err := sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{0}, Value: []byte{1}, - }}}}) - - require.NoError(err) + }}}})) values, err := sm1.Get(chainID0, [][]byte{{0}}) require.NoError(err) @@ -73,12 +71,11 @@ func TestSharedMemoryLargePutGetAndRemove(t *testing.T, chainID0, chainID1 ids.I keys = append(keys, key) } - err = sm0.Apply(map[ids.ID]*Requests{ + require.NoError(sm0.Apply(map[ids.ID]*Requests{ chainID1: { PutRequests: elems, }, - }) - require.NoError(err) + })) values, err := sm1.Get( chainID0, @@ -89,37 +86,33 @@ func TestSharedMemoryLargePutGetAndRemove(t *testing.T, chainID0, chainID1 ids.I require.Equal(elems[i].Value, value) } - err = sm1.Apply(map[ids.ID]*Requests{ + require.NoError(sm1.Apply(map[ids.ID]*Requests{ chainID0: { RemoveRequests: keys, }, - }) - - require.NoError(err) + })) } func TestSharedMemoryIndexed(t *testing.T, chainID0, chainID1 ids.ID, sm0, sm1 SharedMemory, _ database.Database) { require := require.New(t) - err := sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{0}, Value: []byte{1}, Traits: [][]byte{ {2}, {3}, }, - }}}}) - require.NoError(err) + }}}})) - err = sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{4}, Value: []byte{5}, Traits: [][]byte{ {2}, {3}, }, - }}}}) - require.NoError(err) + }}}})) values, _, _, err := sm0.Indexed(chainID1, [][]byte{{2}}, nil, nil, 1) require.NoError(err) @@ -183,8 +176,7 @@ func TestSharedMemoryLargeIndexed(t *testing.T, chainID0, chainID1 ids.ID, sm0, }) } - err = sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: elems}}) - require.NoError(err) + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: elems}})) values, _, _, err := sm1.Indexed(chainID0, allTraits, nil, nil, len(elems)+1) require.NoError(err) @@ -207,11 +199,10 @@ func TestSharedMemoryCantDuplicatePut(t *testing.T, _, chainID1 ids.ID, sm0, _ S // TODO: require error to be errDuplicatedOperation require.Error(err) //nolint:forbidigo // currently returns grpc errors too - err = sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{0}, Value: []byte{1}, - }}}}) - require.NoError(err) + }}}})) err = sm0.Apply(map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{0}, @@ -224,10 +215,9 @@ func TestSharedMemoryCantDuplicatePut(t *testing.T, _, chainID1 ids.ID, sm0, _ S func TestSharedMemoryCantDuplicateRemove(t *testing.T, _, chainID1 ids.ID, sm0, _ SharedMemory, _ database.Database) { require := require.New(t) - err := sm0.Apply(map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}}) - require.NoError(err) + require.NoError(sm0.Apply(map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}})) - err = sm0.Apply(map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}}) + err := sm0.Apply(map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}}) // TODO: require error to be errDuplicatedOperation require.Error(err) //nolint:forbidigo // currently returns grpc errors too } @@ -235,25 +225,21 @@ func TestSharedMemoryCantDuplicateRemove(t *testing.T, _, chainID1 ids.ID, sm0, func TestSharedMemoryCommitOnPut(t *testing.T, _, chainID1 ids.ID, sm0, _ SharedMemory, db database.Database) { require := require.New(t) - err := db.Put([]byte{1}, []byte{2}) - require.NoError(err) + require.NoError(db.Put([]byte{1}, []byte{2})) batch := db.NewBatch() - err = batch.Put([]byte{0}, []byte{1}) - require.NoError(err) + require.NoError(batch.Put([]byte{0}, []byte{1})) - err = batch.Delete([]byte{1}) - require.NoError(err) + require.NoError(batch.Delete([]byte{1})) - err = sm0.Apply( + require.NoError(sm0.Apply( map[ids.ID]*Requests{chainID1: {PutRequests: []*Element{{ Key: []byte{0}, Value: []byte{1}, }}}}, batch, - ) - require.NoError(err) + )) val, err := db.Get([]byte{0}) require.NoError(err) @@ -267,22 +253,18 @@ func TestSharedMemoryCommitOnPut(t *testing.T, _, chainID1 ids.ID, sm0, _ Shared func TestSharedMemoryCommitOnRemove(t *testing.T, _, chainID1 ids.ID, sm0, _ SharedMemory, db database.Database) { require := require.New(t) - err := db.Put([]byte{1}, []byte{2}) - require.NoError(err) + require.NoError(db.Put([]byte{1}, []byte{2})) batch := db.NewBatch() - err = batch.Put([]byte{0}, []byte{1}) - require.NoError(err) + require.NoError(batch.Put([]byte{0}, []byte{1})) - err = batch.Delete([]byte{1}) - require.NoError(err) + require.NoError(batch.Delete([]byte{1})) - err = sm0.Apply( + require.NoError(sm0.Apply( map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}}, batch, - ) - require.NoError(err) + )) val, err := db.Get([]byte{0}) require.NoError(err) @@ -299,8 +281,7 @@ func TestPutAndRemoveBatch(t *testing.T, chainID0, _ ids.ID, _, sm1 SharedMemory batch := db.NewBatch() - err := batch.Put([]byte{0}, []byte{1}) - require.NoError(err) + require.NoError(batch.Put([]byte{0}, []byte{1})) batchChainsAndInputs := make(map[ids.ID]*Requests) @@ -314,9 +295,7 @@ func TestPutAndRemoveBatch(t *testing.T, chainID0, _ ids.ID, _, sm1 SharedMemory RemoveRequests: byteArr, } - err = sm1.Apply(batchChainsAndInputs, batch) - - require.NoError(err) + require.NoError(sm1.Apply(batchChainsAndInputs, batch)) val, err := db.Get([]byte{0}) require.NoError(err) @@ -348,24 +327,19 @@ func TestSharedMemoryLargeBatchSize(t *testing.T, _, chainID1 ids.ID, sm0, _ Sha value := bytes[:elementSize] bytes = bytes[elementSize:] - err := batch.Put(key, value) - require.NoError(err) + require.NoError(batch.Put(key, value)) } - err = db.Put([]byte{1}, []byte{2}) - require.NoError(err) + require.NoError(db.Put([]byte{1}, []byte{2})) - err = batch.Put([]byte{0}, []byte{1}) - require.NoError(err) + require.NoError(batch.Put([]byte{0}, []byte{1})) - err = batch.Delete([]byte{1}) - require.NoError(err) + require.NoError(batch.Delete([]byte{1})) - err = sm0.Apply( + require.NoError(sm0.Apply( map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{0}}}}, batch, - ) - require.NoError(err) + )) val, err := db.Get([]byte{0}) require.NoError(err) @@ -382,16 +356,13 @@ func TestSharedMemoryLargeBatchSize(t *testing.T, _, chainID1 ids.ID, sm0, _ Sha key := bytes[:elementSize] bytes = bytes[pairSize:] - err := batch.Delete(key) - require.NoError(err) + require.NoError(batch.Delete(key)) } - err = sm0.Apply( + require.NoError(sm0.Apply( map[ids.ID]*Requests{chainID1: {RemoveRequests: [][]byte{{1}}}}, batch, - ) - - require.NoError(err) + )) batch.Reset() @@ -400,8 +371,7 @@ func TestSharedMemoryLargeBatchSize(t *testing.T, _, chainID1 ids.ID, sm0, _ Sha key := bytes[:elementSize] bytes = bytes[pairSize:] - err := batch.Delete(key) - require.NoError(err) + require.NoError(batch.Delete(key)) } batchChainsAndInputs := make(map[ids.ID]*Requests) @@ -416,9 +386,8 @@ func TestSharedMemoryLargeBatchSize(t *testing.T, _, chainID1 ids.ID, sm0, _ Sha RemoveRequests: byteArr, } - err = sm0.Apply( + require.NoError(sm0.Apply( batchChainsAndInputs, batch, - ) - require.NoError(err) + )) } diff --git a/codec/test_codec.go b/codec/test_codec.go index 40e91b7df5a..13d81d1eca6 100644 --- a/codec/test_codec.go +++ b/codec/test_codec.go @@ -167,8 +167,7 @@ func TestUInt32(codec GeneralCodec, t testing.TB) { number := uint32(500) manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, number) require.NoError(err) @@ -189,11 +188,10 @@ func TestUIntPtr(codec GeneralCodec, t testing.TB) { manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) number := uintptr(500) - _, err = manager.Marshal(0, number) + _, err := manager.Marshal(0, number) require.ErrorIs(err, ErrUnsupportedType) } @@ -202,8 +200,7 @@ func TestSlice(codec GeneralCodec, t testing.TB) { mySlice := []bool{true, false, true, true} manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, mySlice) require.NoError(err) @@ -227,8 +224,7 @@ func TestMaxSizeSlice(codec GeneralCodec, t testing.TB) { mySlice[0] = "first!" mySlice[math.MaxUint16-1] = "last!" manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, mySlice) require.NoError(err) @@ -250,8 +246,7 @@ func TestBool(codec GeneralCodec, t testing.TB) { myBool := true manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myBool) require.NoError(err) @@ -273,8 +268,7 @@ func TestArray(codec GeneralCodec, t testing.TB) { myArr := [5]uint64{5, 6, 7, 8, 9} manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myArr) require.NoError(err) @@ -296,8 +290,7 @@ func TestBigArray(codec GeneralCodec, t testing.TB) { myArr := [30000]uint64{5, 6, 7, 8, 9} manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myArr) require.NoError(err) @@ -319,8 +312,7 @@ func TestPointerToStruct(codec GeneralCodec, t testing.TB) { myPtr := &MyInnerStruct{Str: "Hello!"} manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myPtr) require.NoError(err) @@ -352,12 +344,10 @@ func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { F: &MyInnerStruct{"Six"}, }, } - err := codec.RegisterType(&MyInnerStruct{}) - require.NoError(err) + require.NoError(codec.RegisterType(&MyInnerStruct{})) manager := NewDefaultManager() - err = manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, mySlice) require.NoError(err) @@ -377,12 +367,10 @@ func TestSliceOfStruct(codec GeneralCodec, t testing.TB) { func TestInterface(codec GeneralCodec, t testing.TB) { require := require.New(t) - err := codec.RegisterType(&MyInnerStruct2{}) - require.NoError(err) + require.NoError(codec.RegisterType(&MyInnerStruct2{})) manager := NewDefaultManager() - err = manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) var f Foo = &MyInnerStruct2{true} bytes, err := manager.Marshal(0, &f) @@ -411,12 +399,10 @@ func TestSliceOfInterface(codec GeneralCodec, t testing.TB) { Str: ", World!", }, } - err := codec.RegisterType(&MyInnerStruct{}) - require.NoError(err) + require.NoError(codec.RegisterType(&MyInnerStruct{})) manager := NewDefaultManager() - err = manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, mySlice) require.NoError(err) @@ -444,12 +430,10 @@ func TestArrayOfInterface(codec GeneralCodec, t testing.TB) { Str: ", World!", }, } - err := codec.RegisterType(&MyInnerStruct{}) - require.NoError(err) + require.NoError(codec.RegisterType(&MyInnerStruct{})) manager := NewDefaultManager() - err = manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myArray) require.NoError(err) @@ -472,12 +456,10 @@ func TestPointerToInterface(codec GeneralCodec, t testing.TB) { var myinnerStruct Foo = &MyInnerStruct{Str: "Hello!"} myPtr := &myinnerStruct - err := codec.RegisterType(&MyInnerStruct{}) - require.NoError(err) + require.NoError(codec.RegisterType(&MyInnerStruct{})) manager := NewDefaultManager() - err = manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, &myPtr) require.NoError(err) @@ -499,8 +481,7 @@ func TestString(codec GeneralCodec, t testing.TB) { myString := "Ayy" manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myString) require.NoError(err) @@ -526,8 +507,7 @@ func TestNilSlice(codec GeneralCodec, t testing.TB) { myStruct := structWithSlice{Slice: nil} manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) bytes, err := manager.Marshal(0, myStruct) require.NoError(err) @@ -559,10 +539,9 @@ func TestSerializeUnexportedField(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) - _, err = manager.Marshal(0, myS) + _, err := manager.Marshal(0, myS) require.ErrorIs(err, ErrUnexportedField) _, err = manager.Size(0, myS) @@ -583,8 +562,7 @@ func TestSerializeOfNoSerializeField(codec GeneralCodec, t testing.TB) { UnmarkedField: "No declared serialize", } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) marshalled, err := manager.Marshal(0, myS) require.NoError(err) @@ -611,8 +589,7 @@ func TestNilSliceSerialization(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) val := &simpleSliceStruct{} expected := []byte{0, 0, 0, 0, 0, 0} // 0 for codec version, then nil slice marshaled as 0 length slice @@ -640,8 +617,7 @@ func TestEmptySliceSerialization(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) val := &simpleSliceStruct{Arr: make([]uint32, 0, 1)} expected := []byte{0, 0, 0, 0, 0, 0} // 0 for codec version (uint16) and 0 for size (uint32) @@ -671,8 +647,7 @@ func TestSliceWithEmptySerialization(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) val := &nestedSliceStruct{ Arr: make([]emptyStruct, 1000), @@ -703,13 +678,12 @@ func TestSliceWithEmptySerializationOutOfMemory(codec GeneralCodec, t testing.TB } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) val := &nestedSliceStruct{ Arr: make([]emptyStruct, math.MaxInt32), } - _, err = manager.Marshal(0, val) + _, err := manager.Marshal(0, val) require.ErrorIs(err, ErrMaxSliceLenExceeded) bytesLen, err := manager.Size(0, val) @@ -721,12 +695,11 @@ func TestSliceTooLarge(codec GeneralCodec, t testing.TB) { require := require.New(t) manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) val := []struct{}{} b := []byte{0x00, 0x00, 0xff, 0xff, 0xff, 0xff} - _, err = manager.Unmarshal(b, &val) + _, err := manager.Unmarshal(b, &val) require.ErrorIs(err, ErrMaxSliceLenExceeded) } @@ -742,8 +715,7 @@ func TestNegativeNumbers(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) myS := s{-1, -2, -3, -4} bytes, err := manager.Marshal(0, myS) @@ -770,11 +742,10 @@ func TestTooLargeUnmarshal(codec GeneralCodec, t testing.TB) { bytes := []byte{0, 0, 0, 0} manager := NewManager(3) - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) s := inner{} - _, err = manager.Unmarshal(bytes, &s) + _, err := manager.Unmarshal(bytes, &s) require.ErrorIs(err, errUnmarshalTooBig) } @@ -828,11 +799,10 @@ func TestRestrictedSlice(codec GeneralCodec, t testing.TB) { bytes := []byte{0, 0, 0, 0, 0, 3, 0, 1, 2} manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) s := inner{} - _, err = manager.Unmarshal(bytes, &s) + _, err := manager.Unmarshal(bytes, &s) require.ErrorIs(err, ErrMaxSliceLenExceeded) s.Bytes = []byte{0, 1, 2} @@ -845,13 +815,12 @@ func TestExtraSpace(codec GeneralCodec, t testing.TB) { require := require.New(t) manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) // codec version 0x0000 then 0x01 for b then 0x02 as extra data. byteSlice := []byte{0x00, 0x00, 0x01, 0x02} var b byte - _, err = manager.Unmarshal(byteSlice, &b) + _, err := manager.Unmarshal(byteSlice, &b) require.ErrorIs(err, ErrExtraSpace) } @@ -870,11 +839,10 @@ func TestSliceLengthOverflow(codec GeneralCodec, t testing.TB) { } manager := NewDefaultManager() - err := manager.RegisterCodec(0, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(0, codec)) s := inner{} - _, err = manager.Unmarshal(bytes, &s) + _, err := manager.Unmarshal(bytes, &s) require.ErrorIs(err, ErrMaxSliceLenExceeded) } @@ -902,8 +870,7 @@ func TestMultipleTags(codec GeneralCodec, t testing.TB) { for _, codecVersion := range []uint16{0, 1, 2022} { require := require.New(t) - err := manager.RegisterCodec(codecVersion, codec) - require.NoError(err) + require.NoError(manager.RegisterCodec(codecVersion, codec)) bytes, err := manager.Marshal(codecVersion, inputs) require.NoError(err) diff --git a/database/linkeddb/linkeddb_test.go b/database/linkeddb/linkeddb_test.go index 9ee698dcb23..c7b24693eaa 100644 --- a/database/linkeddb/linkeddb_test.go +++ b/database/linkeddb/linkeddb_test.go @@ -28,11 +28,9 @@ func TestLinkedDB(t *testing.T) { _, err = ldb.Get(key) require.Equal(database.ErrNotFound, err, "Expected db.Get to return a Not Found error.") - err = ldb.Delete(key) - require.NoError(err) + require.NoError(ldb.Delete(key)) - err = ldb.Put(key, value) - require.NoError(err) + require.NoError(ldb.Put(key, value)) has, err = ldb.Has(key) require.NoError(err) @@ -42,8 +40,7 @@ func TestLinkedDB(t *testing.T) { require.NoError(err) require.Equal(value, v) - err = ldb.Delete(key) - require.NoError(err) + require.NoError(ldb.Delete(key)) has, err = ldb.Has(key) require.NoError(err) @@ -68,18 +65,15 @@ func TestLinkedDBDuplicatedPut(t *testing.T) { value1 := []byte("world1") value2 := []byte("world2") - err := ldb.Put(key, value1) - require.NoError(err) + require.NoError(ldb.Put(key, value1)) - err = ldb.Put(key, value2) - require.NoError(err) + require.NoError(ldb.Put(key, value2)) v, err := ldb.Get(key) require.NoError(err) require.Equal(value2, v) - err = ldb.Delete(key) - require.NoError(err) + require.NoError(ldb.Delete(key)) iterator := db.NewIterator() next := iterator.Next() @@ -100,11 +94,9 @@ func TestLinkedDBMultiplePuts(t *testing.T) { value2 := []byte("world2") value3 := []byte("world3") - err := ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) - err = ldb.Put(key2, value2) - require.NoError(err) + require.NoError(ldb.Put(key2, value2)) v, err := ldb.Get(key1) require.NoError(err) @@ -114,23 +106,17 @@ func TestLinkedDBMultiplePuts(t *testing.T) { require.NoError(err) require.Equal(value2, v) - err = ldb.Delete(key2) - require.NoError(err) + require.NoError(ldb.Delete(key2)) - err = ldb.Put(key2, value2) - require.NoError(err) + require.NoError(ldb.Put(key2, value2)) - err = ldb.Put(key3, value3) - require.NoError(err) + require.NoError(ldb.Put(key3, value3)) - err = ldb.Delete(key2) - require.NoError(err) + require.NoError(ldb.Delete(key2)) - err = ldb.Delete(key1) - require.NoError(err) + require.NoError(ldb.Delete(key1)) - err = ldb.Delete(key3) - require.NoError(err) + require.NoError(ldb.Delete(key3)) iterator := db.NewIterator() next := iterator.Next() @@ -154,8 +140,7 @@ func TestEmptyLinkedDBIterator(t *testing.T) { v := iterator.Value() require.Nil(v, "The iterator returned the wrong value") - err := iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -169,8 +154,7 @@ func TestLinkedDBLoadHeadKey(t *testing.T) { key := []byte("hello") value := []byte("world") - err := ldb.Put(key, value) - require.NoError(err) + require.NoError(ldb.Put(key, value)) ldb = NewDefault(db) @@ -193,8 +177,7 @@ func TestLinkedDBLoadHeadKey(t *testing.T) { v = iterator.Value() require.Nil(v, "The iterator returned the wrong value") - err = iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -208,8 +191,7 @@ func TestSingleLinkedDBIterator(t *testing.T) { key := []byte("hello") value := []byte("world") - err := ldb.Put(key, value) - require.NoError(err) + require.NoError(ldb.Put(key, value)) iterator := ldb.NewIterator() next := iterator.Next() @@ -230,8 +212,7 @@ func TestSingleLinkedDBIterator(t *testing.T) { v = iterator.Value() require.Nil(v, "The iterator returned the wrong value") - err = iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -247,11 +228,9 @@ func TestMultipleLinkedDBIterator(t *testing.T) { value0 := []byte("world0") value1 := []byte("world1") - err := ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) iterator := ldb.NewIterator() next := iterator.Next() @@ -275,8 +254,7 @@ func TestMultipleLinkedDBIterator(t *testing.T) { next = iterator.Next() require.False(next, "The iterator should now be exhausted") - err = iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -292,11 +270,9 @@ func TestMultipleLinkedDBIteratorStart(t *testing.T) { value0 := []byte("world0") value1 := []byte("world1") - err := ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) iterator := ldb.NewIteratorWithStart(key1) next := iterator.Next() @@ -320,8 +296,7 @@ func TestMultipleLinkedDBIteratorStart(t *testing.T) { next = iterator.Next() require.False(next, "The iterator should now be exhausted") - err = iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -337,11 +312,9 @@ func TestSingleLinkedDBIteratorStart(t *testing.T) { value0 := []byte("world0") value1 := []byte("world1") - err := ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) iterator := ldb.NewIteratorWithStart(key0) @@ -357,8 +330,7 @@ func TestSingleLinkedDBIteratorStart(t *testing.T) { next = iterator.Next() require.False(next, "The iterator should now be exhausted") - err = iterator.Error() - require.NoError(err) + require.NoError(iterator.Error()) iterator.Release() } @@ -377,11 +349,9 @@ func TestEmptyLinkedDBIteratorStart(t *testing.T) { value0 := []byte("world0") value1 := []byte("world1") - err := ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) iter := ldb.NewIteratorWithStart(key2) @@ -393,8 +363,7 @@ func TestEmptyLinkedDBIteratorStart(t *testing.T) { } require.Equal(2, i) - err = iter.Error() - require.NoError(err) + require.NoError(iter.Error()) iter.Release() } @@ -412,15 +381,13 @@ func TestLinkedDBIsEmpty(t *testing.T) { key := []byte("hello") value := []byte("world") - err = ldb.Put(key, value) - require.NoError(err) + require.NoError(ldb.Put(key, value)) isEmpty, err = ldb.IsEmpty() require.NoError(err) require.False(isEmpty) - err = ldb.Delete(key) - require.NoError(err) + require.NoError(ldb.Delete(key)) isEmpty, err = ldb.IsEmpty() require.NoError(err) @@ -441,22 +408,19 @@ func TestLinkedDBHeadKey(t *testing.T) { key1 := []byte("hello1") value1 := []byte("world1") - err = ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) headKey, err := ldb.HeadKey() require.NoError(err) require.Equal(key0, headKey) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) headKey, err = ldb.HeadKey() require.NoError(err) require.Equal(key1, headKey) - err = ldb.Delete(key1) - require.NoError(err) + require.NoError(ldb.Delete(key1)) headKey, err = ldb.HeadKey() require.NoError(err) @@ -477,24 +441,21 @@ func TestLinkedDBHead(t *testing.T) { key1 := []byte("hello1") value1 := []byte("world1") - err = ldb.Put(key0, value0) - require.NoError(err) + require.NoError(ldb.Put(key0, value0)) headKey, headVal, err := ldb.Head() require.NoError(err) require.Equal(key0, headKey) require.Equal(value0, headVal) - err = ldb.Put(key1, value1) - require.NoError(err) + require.NoError(ldb.Put(key1, value1)) headKey, headVal, err = ldb.Head() require.NoError(err) require.Equal(key1, headKey) require.Equal(value1, headVal) - err = ldb.Delete(key1) - require.NoError(err) + require.NoError(ldb.Delete(key1)) headKey, headVal, err = ldb.Head() require.NoError(err) diff --git a/database/manager/manager_test.go b/database/manager/manager_test.go index d4ca0c45e42..2e309fcc278 100644 --- a/database/manager/manager_test.go +++ b/database/manager/manager_test.go @@ -31,8 +31,7 @@ func TestNewSingleLevelDB(t *testing.T) { db, err := leveldb.New(dbPath, nil, logging.NoLog{}, "", prometheus.NewRegistry()) require.NoError(err) - err = db.Close() - require.NoError(err) + require.NoError(db.Close()) manager, err := NewLevelDB(dir, nil, logging.NoLog{}, v1, "", prometheus.NewRegistry()) require.NoError(err) @@ -47,8 +46,7 @@ func TestNewSingleLevelDB(t *testing.T) { dbs := manager.GetDatabases() require.Len(dbs, 1) - err = manager.Close() - require.NoError(err) + require.NoError(manager.Close()) } func TestNewCreatesSingleDB(t *testing.T) { @@ -71,8 +69,7 @@ func TestNewCreatesSingleDB(t *testing.T) { dbs := manager.GetDatabases() require.Len(dbs, 1) - err = manager.Close() - require.NoError(err) + require.NoError(manager.Close()) } func TestNewInvalidMemberPresent(t *testing.T) { @@ -99,26 +96,22 @@ func TestNewInvalidMemberPresent(t *testing.T) { db2, err := leveldb.New(dbPath2, nil, logging.NoLog{}, "", prometheus.NewRegistry()) require.NoError(err) - err = db2.Close() - require.NoError(err) + require.NoError(db2.Close()) _, err = NewLevelDB(dir, nil, logging.NoLog{}, v2, "", prometheus.NewRegistry()) require.ErrorIs(err, leveldb.ErrCouldNotOpen) - err = db1.Close() - require.NoError(err) + require.NoError(db1.Close()) f, err := os.Create(filepath.Join(dir, "dummy")) require.NoError(err) - err = f.Close() - require.NoError(err) + require.NoError(f.Close()) db, err := NewLevelDB(dir, nil, logging.NoLog{}, v1, "", prometheus.NewRegistry()) require.NoError(err, "expected not to error with a non-directory file being present") - err = db.Close() - require.NoError(err) + require.NoError(db.Close()) } func TestNewSortsDatabases(t *testing.T) { @@ -159,8 +152,7 @@ func TestNewSortsDatabases(t *testing.T) { db, err := leveldb.New(dbPath, nil, logging.NoLog{}, "", prometheus.NewRegistry()) require.NoError(err) - err = db.Close() - require.NoError(err) + require.NoError(db.Close()) } manager, err := NewLevelDB(dir, nil, logging.NoLog{}, vers[0], "", prometheus.NewRegistry()) diff --git a/ids/aliases_test.go b/ids/aliases_test.go index 624d40eb139..2e50b992aaf 100644 --- a/ids/aliases_test.go +++ b/ids/aliases_test.go @@ -22,17 +22,14 @@ func TestPrimaryAliasOrDefaultTest(t *testing.T) { aliaser := NewAliaser() id1 := ID{'J', 'a', 'm', 'e', 's', ' ', 'G', 'o', 'r', 'd', 'o', 'n'} id2 := ID{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'} - err := aliaser.Alias(id2, "Batman") - require.NoError(err) + require.NoError(aliaser.Alias(id2, "Batman")) - err = aliaser.Alias(id2, "Dark Knight") - require.NoError(err) + require.NoError(aliaser.Alias(id2, "Dark Knight")) res := aliaser.PrimaryAliasOrDefault(id1) require.Equal(res, id1.String()) expected := "Batman" res = aliaser.PrimaryAliasOrDefault(id2) - require.NoError(err) require.Equal(expected, res) } diff --git a/ids/test_aliases.go b/ids/test_aliases.go index 04d5e5d139c..5a029972001 100644 --- a/ids/test_aliases.go +++ b/ids/test_aliases.go @@ -23,8 +23,7 @@ func AliaserLookupErrorTest(require *require.Assertions, r AliaserReader, _ Alia func AliaserLookupTest(require *require.Assertions, r AliaserReader, w AliaserWriter) { id := ID{'K', 'a', 't', 'e', ' ', 'K', 'a', 'n', 'e'} - err := w.Alias(id, "Batwoman") - require.NoError(err) + require.NoError(w.Alias(id, "Batwoman")) res, err := r.Lookup("Batwoman") require.NoError(err) @@ -41,11 +40,9 @@ func AliaserAliasesEmptyTest(require *require.Assertions, r AliaserReader, _ Ali func AliaserAliasesTest(require *require.Assertions, r AliaserReader, w AliaserWriter) { id := ID{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'} - err := w.Alias(id, "Batman") - require.NoError(err) + require.NoError(w.Alias(id, "Batman")) - err = w.Alias(id, "Dark Knight") - require.NoError(err) + require.NoError(w.Alias(id, "Dark Knight")) aliases, err := r.Aliases(id) require.NoError(err) @@ -57,13 +54,11 @@ func AliaserAliasesTest(require *require.Assertions, r AliaserReader, w AliaserW func AliaserPrimaryAliasTest(require *require.Assertions, r AliaserReader, w AliaserWriter) { id1 := ID{'J', 'a', 'm', 'e', 's', ' ', 'G', 'o', 'r', 'd', 'o', 'n'} id2 := ID{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'} - err := w.Alias(id2, "Batman") - require.NoError(err) + require.NoError(w.Alias(id2, "Batman")) - err = w.Alias(id2, "Dark Knight") - require.NoError(err) + require.NoError(w.Alias(id2, "Dark Knight")) - _, err = r.PrimaryAlias(id1) + _, err := r.PrimaryAlias(id1) // TODO: require error to be errNoAliasForID require.Error(err) //nolint:forbidigo // currently returns grpc errors too @@ -76,10 +71,9 @@ func AliaserPrimaryAliasTest(require *require.Assertions, r AliaserReader, w Ali func AliaserAliasClashTest(require *require.Assertions, _ AliaserReader, w AliaserWriter) { id1 := ID{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'} id2 := ID{'D', 'i', 'c', 'k', ' ', 'G', 'r', 'a', 'y', 's', 'o', 'n'} - err := w.Alias(id1, "Batman") - require.NoError(err) + require.NoError(w.Alias(id1, "Batman")) - err = w.Alias(id2, "Batman") + err := w.Alias(id2, "Batman") // TODO: require error to be errAliasAlreadyMapped require.Error(err) //nolint:forbidigo // currently returns grpc errors too } @@ -87,24 +81,19 @@ func AliaserAliasClashTest(require *require.Assertions, _ AliaserReader, w Alias func AliaserRemoveAliasTest(require *require.Assertions, r AliaserReader, w AliaserWriter) { id1 := ID{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'} id2 := ID{'J', 'a', 'm', 'e', 's', ' ', 'G', 'o', 'r', 'd', 'o', 'n'} - err := w.Alias(id1, "Batman") - require.NoError(err) + require.NoError(w.Alias(id1, "Batman")) - err = w.Alias(id1, "Dark Knight") - require.NoError(err) + require.NoError(w.Alias(id1, "Dark Knight")) w.RemoveAliases(id1) - _, err = r.PrimaryAlias(id1) + _, err := r.PrimaryAlias(id1) // TODO: require error to be errNoAliasForID require.Error(err) //nolint:forbidigo // currently returns grpc errors too - err = w.Alias(id2, "Batman") - require.NoError(err) + require.NoError(w.Alias(id2, "Batman")) - err = w.Alias(id2, "Dark Knight") - require.NoError(err) + require.NoError(w.Alias(id2, "Dark Knight")) - err = w.Alias(id1, "Dark Night Rises") - require.NoError(err) + require.NoError(w.Alias(id1, "Dark Night Rises")) } diff --git a/indexer/index_test.go b/indexer/index_test.go index 9a25bebcc8e..760700ab05f 100644 --- a/indexer/index_test.go +++ b/indexer/index_test.go @@ -25,8 +25,7 @@ func TestIndex(t *testing.T) { pageSize := uint64(64) require := require.New(t) codec := codec.NewDefaultManager() - err := codec.RegisterCodec(codecVersion, linearcodec.NewDefault()) - require.NoError(err) + require.NoError(codec.RegisterCodec(codecVersion, linearcodec.NewDefault())) baseDB := memdb.New() db := versiondb.New(baseDB) ctx := snow.DefaultConsensusContextTest() @@ -44,8 +43,7 @@ func TestIndex(t *testing.T) { // Accept each container and after each, make assertions i := uint64(0) for containerID, containerBytes := range containers { - err = idx.Accept(ctx, containerID, containerBytes) - require.NoError(err) + require.NoError(idx.Accept(ctx, containerID, containerBytes)) lastAcceptedIndex, ok := idx.lastAcceptedIndex() require.True(ok) @@ -116,8 +114,7 @@ func TestIndexGetContainerByRangeMaxPageSize(t *testing.T) { // Setup require := require.New(t) codec := codec.NewDefaultManager() - err := codec.RegisterCodec(codecVersion, linearcodec.NewDefault()) - require.NoError(err) + require.NoError(codec.RegisterCodec(codecVersion, linearcodec.NewDefault())) db := memdb.New() ctx := snow.DefaultConsensusContextTest() indexIntf, err := newIndex(db, logging.NoLog{}, codec, mockable.Clock{}) @@ -126,8 +123,7 @@ func TestIndexGetContainerByRangeMaxPageSize(t *testing.T) { // Insert [MaxFetchedByRange] + 1 containers for i := uint64(0); i < MaxFetchedByRange+1; i++ { - err = idx.Accept(ctx, ids.GenerateTestID(), utils.RandomBytes(32)) - require.NoError(err) + require.NoError(idx.Accept(ctx, ids.GenerateTestID(), utils.RandomBytes(32))) } // Page size too large @@ -158,8 +154,7 @@ func TestDontIndexSameContainerTwice(t *testing.T) { // Setup require := require.New(t) codec := codec.NewDefaultManager() - err := codec.RegisterCodec(codecVersion, linearcodec.NewDefault()) - require.NoError(err) + require.NoError(codec.RegisterCodec(codecVersion, linearcodec.NewDefault())) db := memdb.New() ctx := snow.DefaultConsensusContextTest() idx, err := newIndex(db, logging.NoLog{}, codec, mockable.Clock{}) diff --git a/network/network_test.go b/network/network_test.go index 0afab859d80..ac4c98e1768 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -222,14 +222,12 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler } beacons := validators.NewSet() - err = beacons.Add(nodeIDs[0], nil, ids.GenerateTestID(), 1) - require.NoError(err) + require.NoError(beacons.Add(nodeIDs[0], nil, ids.GenerateTestID(), 1)) primaryVdrs := validators.NewSet() primaryVdrs.RegisterCallbackListener(&gossipTrackerCallback) for _, nodeID := range nodeIDs { - err := primaryVdrs.Add(nodeID, nil, ids.GenerateTestID(), 1) - require.NoError(err) + require.NoError(primaryVdrs.Add(nodeID, nil, ids.GenerateTestID(), 1)) } vdrs := validators.NewManager() @@ -293,8 +291,7 @@ func newFullyConnectedTestNetwork(t *testing.T, handlers []router.InboundHandler go func(net Network) { defer wg.Done() - err := net.Dispatch() - require.NoError(err) + require.NoError(net.Dispatch()) }(net) } @@ -408,10 +405,9 @@ func TestTrackVerifiesSignatures(t *testing.T) { network := networks[0].(*network) nodeID, tlsCert, _ := getTLS(t, 1) - err := validators.Add(network.config.Validators, constants.PrimaryNetworkID, nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(validators.Add(network.config.Validators, constants.PrimaryNetworkID, nodeID, nil, ids.Empty, 1)) - _, err = network.Track(ids.EmptyNodeID, []*ips.ClaimedIPPort{{ + _, err := network.Track(ids.EmptyNodeID, []*ips.ClaimedIPPort{{ Cert: tlsCert.Leaf, IPPort: ips.IPPort{ IP: net.IPv4(123, 132, 123, 123), @@ -453,14 +449,12 @@ func TestTrackDoesNotDialPrivateIPs(t *testing.T) { } beacons := validators.NewSet() - err = beacons.Add(nodeIDs[0], nil, ids.GenerateTestID(), 1) - require.NoError(err) + require.NoError(beacons.Add(nodeIDs[0], nil, ids.GenerateTestID(), 1)) primaryVdrs := validators.NewSet() primaryVdrs.RegisterCallbackListener(&gossipTrackerCallback) for _, nodeID := range nodeIDs { - err := primaryVdrs.Add(nodeID, nil, ids.GenerateTestID(), 1) - require.NoError(err) + require.NoError(primaryVdrs.Add(nodeID, nil, ids.GenerateTestID(), 1)) } vdrs := validators.NewManager() @@ -503,8 +497,7 @@ func TestTrackDoesNotDialPrivateIPs(t *testing.T) { go func(net Network) { defer wg.Done() - err := net.Dispatch() - require.NoError(err) + require.NoError(net.Dispatch()) }(net) } @@ -536,8 +529,7 @@ func TestDialDeletesNonValidators(t *testing.T) { primaryVdrs := validators.NewSet() for _, nodeID := range nodeIDs { - err := primaryVdrs.Add(nodeID, nil, ids.GenerateTestID(), 1) - require.NoError(err) + require.NoError(primaryVdrs.Add(nodeID, nil, ids.GenerateTestID(), 1)) } networks := make([]Network, len(configs)) @@ -555,8 +547,7 @@ func TestDialDeletesNonValidators(t *testing.T) { } beacons := validators.NewSet() - err = beacons.Add(nodeIDs[0], nil, ids.GenerateTestID(), 1) - require.NoError(err) + require.NoError(beacons.Add(nodeIDs[0], nil, ids.GenerateTestID(), 1)) primaryVdrs.RegisterCallbackListener(&gossipTrackerCallback) @@ -613,8 +604,7 @@ func TestDialDeletesNonValidators(t *testing.T) { go func(net Network) { defer wg.Done() - err := net.Dispatch() - require.NoError(err) + require.NoError(net.Dispatch()) }(net) } diff --git a/network/peer/peer_test.go b/network/peer/peer_test.go index 653a0c616e2..cdb9f33c937 100644 --- a/network/peer/peer_test.go +++ b/network/peer/peer_test.go @@ -188,13 +188,11 @@ func makeReadyTestPeers(t *testing.T) (*testPeer, *testPeer) { peer0, peer1 := makeTestPeers(t) - err := peer0.AwaitReady(context.Background()) - require.NoError(err) + require.NoError(peer0.AwaitReady(context.Background())) isReady := peer0.Ready() require.True(isReady) - err = peer1.AwaitReady(context.Background()) - require.NoError(err) + require.NoError(peer1.AwaitReady(context.Background())) isReady = peer1.Ready() require.True(isReady) @@ -235,21 +233,17 @@ func TestReady(t *testing.T) { ), ) - err := peer0.AwaitReady(context.Background()) - require.NoError(err) + require.NoError(peer0.AwaitReady(context.Background())) isReady = peer0.Ready() require.True(isReady) - err = peer1.AwaitReady(context.Background()) - require.NoError(err) + require.NoError(peer1.AwaitReady(context.Background())) isReady = peer1.Ready() require.True(isReady) peer0.StartClose() - err = peer0.AwaitClosed(context.Background()) - require.NoError(err) - err = peer1.AwaitClosed(context.Background()) - require.NoError(err) + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) } func TestSend(t *testing.T) { @@ -268,8 +262,6 @@ func TestSend(t *testing.T) { require.Equal(message.GetOp, inboundGetMsg.Op()) peer1.StartClose() - err = peer0.AwaitClosed(context.Background()) - require.NoError(err) - err = peer1.AwaitClosed(context.Background()) - require.NoError(err) + require.NoError(peer0.AwaitClosed(context.Background())) + require.NoError(peer1.AwaitClosed(context.Background())) } diff --git a/network/throttling/inbound_conn_throttler_test.go b/network/throttling/inbound_conn_throttler_test.go index 14f68167c3d..5c28a45da20 100644 --- a/network/throttling/inbound_conn_throttler_test.go +++ b/network/throttling/inbound_conn_throttler_test.go @@ -56,8 +56,7 @@ func TestInboundConnThrottlerClose(t *testing.T) { }, } wrappedL := NewThrottledListener(l, 1) - err := wrappedL.Close() - require.NoError(err) + require.NoError(wrappedL.Close()) require.True(closed) select { @@ -67,7 +66,7 @@ func TestInboundConnThrottlerClose(t *testing.T) { } // Accept() should return an error because the context is cancelled - _, err = wrappedL.Accept() + _, err := wrappedL.Accept() require.ErrorIs(err, context.Canceled) } diff --git a/pubsub/filter_test.go b/pubsub/filter_test.go index 051ad94c7a4..088d0ecee2d 100644 --- a/pubsub/filter_test.go +++ b/pubsub/filter_test.go @@ -31,8 +31,7 @@ func TestAddAddressesParseAddresses(t *testing.T) { }, }} - err = msg.parseAddresses() - require.NoError(err) + require.NoError(msg.parseAddresses()) require.Len(msg.addressIds, 1) require.Equal(addrID[:], msg.addressIds[0]) diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index 99abfbf3173..0affcf87155 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -1080,8 +1080,7 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) - require.NoError(err) + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1116,31 +1115,26 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { HeightV: block2.HeightV + 1, } - err = sm.Add(context.Background(), block0) - require.NoError(err) + require.NoError(sm.Add(context.Background(), block0)) - err = sm.Add(context.Background(), block1) - require.NoError(err) + require.NoError(sm.Add(context.Background(), block1)) // The first bit is contested as either 0 or 1. When voting for [block0] and // when the first bit is 1, the following bits have been decided to follow // the 255 remaining bits of [block0]. votes0 := bag.Bag[ids.ID]{} votes0.Add(block0.ID()) - err = sm.RecordPoll(context.Background(), votes0) - require.NoError(err) + require.NoError(sm.RecordPoll(context.Background(), votes0)) // Although we are adding in [block2] here - the underlying snowball // instance has already decided it is rejected. Snowman doesn't actually // know that though, because that is an implementation detail of the // Snowball trie that is used. - err = sm.Add(context.Background(), block2) - require.NoError(err) + require.NoError(sm.Add(context.Background(), block2)) // Because [block2] is effectively rejected, [block3] is also effectively // rejected. - err = sm.Add(context.Background(), block3) - require.NoError(err) + require.NoError(sm.Add(context.Background(), block3)) require.Equal(block0.ID(), sm.Preference()) require.Equal(choices.Processing, block0.Status(), "should not be accepted yet") @@ -1166,8 +1160,7 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { // transitively. votes3 := bag.Bag[ids.ID]{} votes3.Add(block3.ID()) - err = sm.RecordPoll(context.Background(), votes3) - require.NoError(err) + require.NoError(sm.RecordPoll(context.Background(), votes3)) require.True(sm.Finalized(), "finalized too late") require.Equal(choices.Accepted, block0.Status(), "should be accepted") diff --git a/snow/engine/common/queue/jobs_test.go b/snow/engine/common/queue/jobs_test.go index 8a5205af698..68bc8a1b7d8 100644 --- a/snow/engine/common/queue/jobs_test.go +++ b/snow/engine/common/queue/jobs_test.go @@ -104,8 +104,7 @@ func TestPushAndExecute(t *testing.T) { require.NoError(err) require.True(has) - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) jobs, err = New(db, "", prometheus.NewRegistry()) require.NoError(err) @@ -233,8 +232,7 @@ func TestDuplicatedExecutablePush(t *testing.T) { require.False(pushed) require.NoError(err) - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) jobs, err = New(db, "", prometheus.NewRegistry()) require.NoError(err) @@ -267,8 +265,7 @@ func TestDuplicatedNotExecutablePush(t *testing.T) { require.False(pushed) require.NoError(err) - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) jobs, err = New(db, "", prometheus.NewRegistry()) require.NoError(err) @@ -296,8 +293,7 @@ func TestMissingJobs(t *testing.T) { jobs.AddMissingID(job0ID) jobs.AddMissingID(job1ID) - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) numMissingIDs := jobs.NumMissingIDs() require.Equal(2, numMissingIDs) @@ -313,8 +309,7 @@ func TestMissingJobs(t *testing.T) { jobs.RemoveMissingID(job1ID) - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) jobs, err = NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) @@ -489,14 +484,9 @@ func TestInitializeNumJobs(t *testing.T) { require.NoError(err) require.Equal(uint64(2), jobs.state.numJobs) - err = jobs.Commit() - require.NoError(err) - - err = database.Clear(jobs.state.metadataDB, jobs.state.metadataDB) - require.NoError(err) - - err = jobs.Commit() - require.NoError(err) + require.NoError(jobs.Commit()) + require.NoError(database.Clear(jobs.state.metadataDB, jobs.state.metadataDB)) + require.NoError(jobs.Commit()) jobs, err = NewWithMissing(db, "", prometheus.NewRegistry()) if err != nil { diff --git a/snow/engine/common/tracker/peers_test.go b/snow/engine/common/tracker/peers_test.go index 802386f8468..bae131cbae0 100644 --- a/snow/engine/common/tracker/peers_test.go +++ b/snow/engine/common/tracker/peers_test.go @@ -27,8 +27,7 @@ func TestPeers(t *testing.T) { require.Zero(p.ConnectedWeight()) require.Empty(p.PreferredPeers()) - err := p.Connected(context.Background(), nodeID, version.CurrentApp) - require.NoError(err) + require.NoError(p.Connected(context.Background(), nodeID, version.CurrentApp)) require.Equal(uint64(5), p.ConnectedWeight()) require.Contains(p.PreferredPeers(), nodeID) @@ -44,8 +43,7 @@ func TestPeers(t *testing.T) { require.Equal(uint64(5), p.ConnectedWeight()) require.Contains(p.PreferredPeers(), nodeID) - err = p.Disconnected(context.Background(), nodeID) - require.NoError(err) + require.NoError(p.Disconnected(context.Background(), nodeID)) require.Zero(p.ConnectedWeight()) require.Empty(p.PreferredPeers()) } diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 30a23baf0c0..52f345e5e39 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -1190,8 +1190,7 @@ func TestEngineAbandonChit(t *testing.T) { reqID = requestID } - err := te.issue(context.Background(), blk) - require.NoError(err) + require.NoError(te.issue(context.Background(), blk)) fakeBlkID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { @@ -1204,14 +1203,12 @@ func TestEngineAbandonChit(t *testing.T) { } // Register a voter dependency on an unknown block. - err = te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeBlkID}, nil) - require.NoError(err) + require.NoError(te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeBlkID}, nil)) require.Len(te.blocked, 1) sender.CantSendPullQuery = false - err = te.GetFailed(context.Background(), vdr, reqID) - require.NoError(err) + require.NoError(te.GetFailed(context.Background(), vdr, reqID)) require.Empty(te.blocked) } @@ -1248,8 +1245,7 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { reqID = requestID } - err := te.issue(context.Background(), blk) - require.NoError(err) + require.NoError(te.issue(context.Background(), blk)) fakeBlkID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { @@ -1262,8 +1258,7 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { } // Register a voter dependency on an unknown block. - err = te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeBlkID}, nil) - require.NoError(err) + require.NoError(te.Chits(context.Background(), vdr, reqID, []ids.ID{fakeBlkID}, nil)) require.Len(te.blocked, 1) sender.CantSendPullQuery = false @@ -1276,8 +1271,7 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { // Respond with an unexpected block and verify that the request is correctly // cleared. - err = te.Put(context.Background(), vdr, reqID, gBlkBytes) - require.NoError(err) + require.NoError(te.Put(context.Background(), vdr, reqID, gBlkBytes)) require.Empty(te.blocked) } @@ -3331,8 +3325,7 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { } // Give the engine the grandparent - err := te.Put(context.Background(), vdr, 0, grandParentBlk.BytesV) - require.NoError(err) + require.NoError(te.Put(context.Background(), vdr, 0, grandParentBlk.BytesV)) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(parentBlkA.BytesV, b) @@ -3342,8 +3335,7 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { // Give the node [parentBlkA]/[parentBlkB]. // When it's parsed we get [parentBlkA] (not [parentBlkB]). // [parentBlkA] fails verification and gets put into [te.nonVerifiedCache]. - err = te.Put(context.Background(), vdr, 0, parentBlkA.BytesV) - require.NoError(err) + require.NoError(te.Put(context.Background(), vdr, 0, parentBlkA.BytesV)) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(parentBlkB.BytesV, b) @@ -3375,15 +3367,11 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { // When we fetch it using [GetBlockF] we get [parentBlkB]. // Note that [parentBlkB] doesn't fail verification and is issued into consensus. // This evicts [parentBlkA] from [te.nonVerifiedCache]. - err = te.Put(context.Background(), vdr, 0, parentBlkA.BytesV) - require.NoError(err) + require.NoError(te.Put(context.Background(), vdr, 0, parentBlkA.BytesV)) // Give 2 chits for [parentBlkA]/[parentBlkB] - err = te.Chits(context.Background(), vdr, *queryRequestAID, []ids.ID{parentBlkB.IDV}, nil) - require.NoError(err) - - err = te.Chits(context.Background(), vdr, *queryRequestGPID, []ids.ID{parentBlkB.IDV}, nil) - require.NoError(err) + require.NoError(te.Chits(context.Background(), vdr, *queryRequestAID, []ids.ID{parentBlkB.IDV}, nil)) + require.NoError(te.Chits(context.Background(), vdr, *queryRequestGPID, []ids.ID{parentBlkB.IDV}, nil)) // Assert that the blocks' statuses are correct. // The evicted [parentBlkA] shouldn't be changed. @@ -3400,8 +3388,7 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { } // Should issue a new block and send a query for it. - err = te.Notify(context.Background(), common.PendingTxs) - require.NoError(err) + require.NoError(te.Notify(context.Background(), common.PendingTxs)) require.True(*sentQuery) } diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 17d6cec196a..5c3ede1e4bd 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -369,8 +369,7 @@ func TestRouterTimeout(t *testing.T) { ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - err = vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 2950bbc8697..104eb6e4f4e 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -51,8 +51,7 @@ var defaultSubnetConfig = subnets.Config{ func TestTimeout(t *testing.T) { require := require.New(t) vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -290,16 +289,14 @@ func TestTimeout(t *testing.T) { vdrIDs.Union(nodeIDs) wg.Add(1) requestID++ - err := sender.SendAppRequest(cancelledCtx, nodeIDs, requestID, nil) - require.NoError(err) + require.NoError(sender.SendAppRequest(cancelledCtx, nodeIDs, requestID, nil)) } { chainID := ids.GenerateTestID() chains.Add(chainID) wg.Add(1) requestID++ - err := sender.SendCrossChainAppRequest(cancelledCtx, chainID, requestID, nil) - require.NoError(err) + require.NoError(sender.SendCrossChainAppRequest(cancelledCtx, chainID, requestID, nil)) } } diff --git a/snow/uptime/manager_test.go b/snow/uptime/manager_test.go index 1f5a854e819..f33953db4bc 100644 --- a/snow/uptime/manager_test.go +++ b/snow/uptime/manager_test.go @@ -31,8 +31,7 @@ func TestStartTracking(t *testing.T) { currentTime := startTime.Add(time.Second) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) @@ -88,8 +87,7 @@ func TestStartTrackingInThePast(t *testing.T) { currentTime := startTime.Add(-time.Second) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) @@ -111,20 +109,17 @@ func TestStopTrackingDecreasesUptime(t *testing.T) { up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = startTime.Add(time.Second) up.clock.Set(currentTime) - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) up = NewManager(s).(*manager) up.clock.Set(currentTime) - err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) @@ -146,23 +141,19 @@ func TestStopTrackingIncreasesUptime(t *testing.T) { up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = startTime.Add(time.Second) up.clock.Set(currentTime) - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) up = NewManager(s).(*manager) up.clock.Set(currentTime) - err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) @@ -179,10 +170,9 @@ func TestStopTrackingDisconnectedNonValidator(t *testing.T) { s := NewTestState() up := NewManager(s).(*manager) - err := up.StartTracking(nil, subnetID) - require.NoError(err) + require.NoError(up.StartTracking(nil, subnetID)) - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) + err := up.StopTracking([]ids.NodeID{nodeID0}, subnetID) require.ErrorIs(err, database.ErrNotFound) } @@ -197,14 +187,12 @@ func TestStopTrackingConnectedDBError(t *testing.T) { s.AddNode(nodeID0, subnetID, startTime) up := NewManager(s).(*manager) - err := up.StartTracking(nil, subnetID) - require.NoError(err) + require.NoError(up.StartTracking(nil, subnetID)) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) s.dbReadError = errTest - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) + err := up.StopTracking([]ids.NodeID{nodeID0}, subnetID) require.ErrorIs(err, errTest) } @@ -221,14 +209,12 @@ func TestStopTrackingNonConnectedPast(t *testing.T) { up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = currentTime.Add(-time.Second) up.clock.Set(currentTime) - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := s.GetUptime(nodeID0, subnetID) require.NoError(err) @@ -249,14 +235,13 @@ func TestStopTrackingNonConnectedDBError(t *testing.T) { up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) s.dbWriteError = errTest - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) + err := up.StopTracking([]ids.NodeID{nodeID0}, subnetID) require.ErrorIs(err, errTest) } @@ -292,8 +277,7 @@ func TestConnectAndDisconnect(t *testing.T) { connected := up.IsConnected(nodeID0, subnetID) require.False(connected) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) connected = up.IsConnected(nodeID0, subnetID) require.False(connected) @@ -303,8 +287,7 @@ func TestConnectAndDisconnect(t *testing.T) { require.Equal(time.Duration(0), duration) require.Equal(up.clock.UnixTime(), lastUpdated) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) connected = up.IsConnected(nodeID0, subnetID) require.True(connected) @@ -320,8 +303,7 @@ func TestConnectAndDisconnect(t *testing.T) { require.Equal(up.clock.UnixTime(), lastUpdated) } - err := up.Disconnect(nodeID0) - require.NoError(err) + require.NoError(up.Disconnect(nodeID0)) for _, subnetID := range tt.subnetIDs { connected := up.IsConnected(nodeID0, subnetID) @@ -356,17 +338,14 @@ func TestConnectAndDisconnectBeforeTracking(t *testing.T) { currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) - err := up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) - err = up.Disconnect(nodeID0) - require.NoError(err) + require.NoError(up.Disconnect(nodeID0)) - err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) @@ -389,19 +368,16 @@ func TestUnrelatedNodeDisconnect(t *testing.T) { up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) duration, lastUpdated, err := up.CalculateUptime(nodeID0, subnetID) require.NoError(err) require.Equal(time.Duration(0), duration) require.Equal(up.clock.UnixTime(), lastUpdated) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) - err = up.Connect(nodeID1, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID1, subnetID)) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) @@ -411,8 +387,7 @@ func TestUnrelatedNodeDisconnect(t *testing.T) { require.Equal(time.Second, duration) require.Equal(up.clock.UnixTime(), lastUpdated) - err = up.Disconnect(nodeID1) - require.NoError(err) + require.NoError(up.Disconnect(nodeID1)) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) @@ -459,8 +434,7 @@ func TestCalculateUptimeWhenNeverConnected(t *testing.T) { up := NewManager(s).(*manager) - err := up.StartTracking([]ids.NodeID{}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{}, subnetID)) s.AddNode(nodeID0, subnetID, startTime) @@ -491,14 +465,12 @@ func TestCalculateUptimeWhenConnectedBeforeTracking(t *testing.T) { up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) - err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) @@ -523,14 +495,12 @@ func TestCalculateUptimeWhenConnectedInFuture(t *testing.T) { up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = currentTime.Add(2 * time.Second) up.clock.Set(currentTime) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = currentTime.Add(-time.Second) up.clock.Set(currentTime) @@ -588,8 +558,7 @@ func TestCalculateUptimePercentage(t *testing.T) { up := NewManager(s).(*manager) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = currentTime.Add(time.Second) up.clock.Set(currentTime) @@ -613,17 +582,14 @@ func TestStopTrackingUnixTimeRegression(t *testing.T) { up := NewManager(s).(*manager) up.clock.Set(currentTime) - err := up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = startTime.Add(time.Second) up.clock.Set(currentTime) - err = up.StopTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StopTracking([]ids.NodeID{nodeID0}, subnetID)) currentTime = startTime.Add(time.Second) up.clock.Set(currentTime) @@ -633,11 +599,9 @@ func TestStopTrackingUnixTimeRegression(t *testing.T) { currentTime = startTime.Add(time.Second) up.clock.Set(currentTime) - err = up.StartTracking([]ids.NodeID{nodeID0}, subnetID) - require.NoError(err) + require.NoError(up.StartTracking([]ids.NodeID{nodeID0}, subnetID)) - err = up.Connect(nodeID0, subnetID) - require.NoError(err) + require.NoError(up.Connect(nodeID0, subnetID)) currentTime = startTime.Add(time.Second) up.clock.Set(currentTime) diff --git a/snow/validators/manager_test.go b/snow/validators/manager_test.go index 1b500d58d06..d5f26c8d587 100644 --- a/snow/validators/manager_test.go +++ b/snow/validators/manager_test.go @@ -25,8 +25,7 @@ func TestAdd(t *testing.T) { s := NewSet() m.Add(subnetID, s) - err = Add(m, subnetID, nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(Add(m, subnetID, nodeID, nil, ids.Empty, 1)) require.Equal(uint64(1), s.Weight()) } @@ -48,11 +47,9 @@ func TestAddWeight(t *testing.T) { err = AddWeight(m, subnetID, nodeID, 1) require.ErrorIs(err, errMissingValidator) - err = Add(m, subnetID, nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(Add(m, subnetID, nodeID, nil, ids.Empty, 1)) - err = AddWeight(m, subnetID, nodeID, 1) - require.NoError(err) + require.NoError(AddWeight(m, subnetID, nodeID, 1)) require.Equal(uint64(2), s.Weight()) } @@ -71,16 +68,13 @@ func TestRemoveWeight(t *testing.T) { s := NewSet() m.Add(subnetID, s) - err = Add(m, subnetID, nodeID, nil, ids.Empty, 2) - require.NoError(err) + require.NoError(Add(m, subnetID, nodeID, nil, ids.Empty, 2)) - err = RemoveWeight(m, subnetID, nodeID, 1) - require.NoError(err) + require.NoError(RemoveWeight(m, subnetID, nodeID, 1)) require.Equal(uint64(1), s.Weight()) - err = RemoveWeight(m, subnetID, nodeID, 1) - require.NoError(err) + require.NoError(RemoveWeight(m, subnetID, nodeID, 1)) require.Zero(s.Weight()) } @@ -102,14 +96,12 @@ func TestContains(t *testing.T) { contains = Contains(m, subnetID, nodeID) require.False(contains) - err := Add(m, subnetID, nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(Add(m, subnetID, nodeID, nil, ids.Empty, 1)) contains = Contains(m, subnetID, nodeID) require.True(contains) - err = RemoveWeight(m, subnetID, nodeID, 1) - require.NoError(err) + require.NoError(RemoveWeight(m, subnetID, nodeID, 1)) contains = Contains(m, subnetID, nodeID) require.False(contains) diff --git a/snow/validators/set_test.go b/snow/validators/set_test.go index cebd565ea65..fe90c051c44 100644 --- a/snow/validators/set_test.go +++ b/snow/validators/set_test.go @@ -31,10 +31,9 @@ func TestSetAddDuplicate(t *testing.T) { s := NewSet() nodeID := ids.GenerateTestNodeID() - err := s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) - err = s.Add(nodeID, nil, ids.Empty, 1) + err := s.Add(nodeID, nil, ids.Empty, 1) require.ErrorIs(err, errDuplicateValidator) } @@ -42,10 +41,9 @@ func TestSetAddOverflow(t *testing.T) { require := require.New(t) s := NewSet() - err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) - err = s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, stdmath.MaxUint64) + err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, stdmath.MaxUint64) require.ErrorIs(err, math.ErrOverflow) require.Equal(uint64(1), s.Weight()) @@ -57,10 +55,9 @@ func TestSetAddWeightZeroWeight(t *testing.T) { s := NewSet() nodeID := ids.GenerateTestNodeID() - err := s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) - err = s.AddWeight(nodeID, 0) + err := s.AddWeight(nodeID, 0) require.ErrorIs(err, errZeroWeight) } @@ -69,14 +66,12 @@ func TestSetAddWeightOverflow(t *testing.T) { s := NewSet() - err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) nodeID := ids.GenerateTestNodeID() - err = s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) - err = s.AddWeight(nodeID, stdmath.MaxUint64-1) + err := s.AddWeight(nodeID, stdmath.MaxUint64-1) require.ErrorIs(err, math.ErrOverflow) require.Equal(uint64(2), s.Weight()) @@ -91,8 +86,7 @@ func TestSetGetWeight(t *testing.T) { weight := s.GetWeight(nodeID) require.Zero(weight) - err := s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) require.Equal(uint64(1), s.GetWeight(nodeID)) } @@ -114,14 +108,11 @@ func TestSetSubsetWeight(t *testing.T) { s := NewSet() - err := s.Add(nodeID0, nil, ids.Empty, weight0) - require.NoError(err) + require.NoError(s.Add(nodeID0, nil, ids.Empty, weight0)) - err = s.Add(nodeID1, nil, ids.Empty, weight1) - require.NoError(err) + require.NoError(s.Add(nodeID1, nil, ids.Empty, weight1)) - err = s.Add(nodeID2, nil, ids.Empty, weight2) - require.NoError(err) + require.NoError(s.Add(nodeID2, nil, ids.Empty, weight2)) expectedWeight := weight0 + weight1 subsetWeight := s.SubsetWeight(subset) @@ -134,10 +125,9 @@ func TestSetRemoveWeightZeroWeight(t *testing.T) { s := NewSet() nodeID := ids.GenerateTestNodeID() - err := s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) - err = s.RemoveWeight(nodeID, 0) + err := s.RemoveWeight(nodeID, 0) require.ErrorIs(err, errZeroWeight) } @@ -146,10 +136,9 @@ func TestSetRemoveWeightMissingValidator(t *testing.T) { s := NewSet() - err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) - err = s.RemoveWeight(ids.GenerateTestNodeID(), 1) + err := s.RemoveWeight(ids.GenerateTestNodeID(), 1) require.ErrorIs(err, errMissingValidator) } @@ -158,14 +147,12 @@ func TestSetRemoveWeightUnderflow(t *testing.T) { s := NewSet() - err := s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) nodeID := ids.GenerateTestNodeID() - err = s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) - err = s.RemoveWeight(nodeID, 2) + err := s.RemoveWeight(nodeID, 2) require.ErrorIs(err, math.ErrUnderflow) require.Equal(uint64(2), s.Weight()) @@ -184,8 +171,7 @@ func TestSetGet(t *testing.T) { require.NoError(err) pk := bls.PublicFromSecretKey(sk) - err = s.Add(nodeID, pk, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, pk, ids.Empty, 1)) vdr0, ok := s.Get(nodeID) require.True(ok) @@ -193,8 +179,7 @@ func TestSetGet(t *testing.T) { require.Equal(pk, vdr0.PublicKey) require.Equal(uint64(1), vdr0.Weight) - err = s.AddWeight(nodeID, 1) - require.NoError(err) + require.NoError(s.AddWeight(nodeID, 1)) vdr1, ok := s.Get(nodeID) require.True(ok) @@ -215,14 +200,12 @@ func TestSetContains(t *testing.T) { contains := s.Contains(nodeID) require.False(contains) - err := s.Add(nodeID, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) contains = s.Contains(nodeID) require.True(contains) - err = s.RemoveWeight(nodeID, 1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID, 1)) contains = s.Contains(nodeID) require.False(contains) @@ -237,27 +220,23 @@ func TestSetLen(t *testing.T) { require.Zero(len) nodeID0 := ids.GenerateTestNodeID() - err := s.Add(nodeID0, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID0, nil, ids.Empty, 1)) len = s.Len() require.Equal(1, len) nodeID1 := ids.GenerateTestNodeID() - err = s.Add(nodeID1, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID1, nil, ids.Empty, 1)) len = s.Len() require.Equal(2, len) - err = s.RemoveWeight(nodeID1, 1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID1, 1)) len = s.Len() require.Equal(1, len) - err = s.RemoveWeight(nodeID0, 1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID0, 1)) len = s.Len() require.Zero(len) @@ -276,8 +255,7 @@ func TestSetList(t *testing.T) { pk := bls.PublicFromSecretKey(sk) nodeID0 := ids.GenerateTestNodeID() - err = s.Add(nodeID0, pk, ids.Empty, 2) - require.NoError(err) + require.NoError(s.Add(nodeID0, pk, ids.Empty, 2)) list = s.List() require.Len(list, 1) @@ -288,8 +266,7 @@ func TestSetList(t *testing.T) { require.Equal(uint64(2), node0.Weight) nodeID1 := ids.GenerateTestNodeID() - err = s.Add(nodeID1, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID1, nil, ids.Empty, 1)) list = s.List() require.Len(list, 2) @@ -304,8 +281,7 @@ func TestSetList(t *testing.T) { require.Nil(node1.PublicKey) require.Equal(uint64(1), node1.Weight) - err = s.RemoveWeight(nodeID0, 1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID0, 1)) require.Equal(nodeID0, node0.NodeID) require.Equal(pk, node0.PublicKey) require.Equal(uint64(2), node0.Weight) @@ -323,8 +299,7 @@ func TestSetList(t *testing.T) { require.Nil(node1.PublicKey) require.Equal(uint64(1), node1.Weight) - err = s.RemoveWeight(nodeID0, 1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID0, 1)) list = s.List() require.Len(list, 1) @@ -334,8 +309,7 @@ func TestSetList(t *testing.T) { require.Nil(node0.PublicKey) require.Equal(uint64(1), node0.Weight) - err = s.RemoveWeight(nodeID1, 1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID1, 1)) list = s.List() require.Empty(list) @@ -350,11 +324,9 @@ func TestSetWeight(t *testing.T) { weight1 := uint64(123) s := NewSet() - err := s.Add(vdr0, nil, ids.Empty, weight0) - require.NoError(err) + require.NoError(s.Add(vdr0, nil, ids.Empty, weight0)) - err = s.Add(vdr1, nil, ids.Empty, weight1) - require.NoError(err) + require.NoError(s.Add(vdr1, nil, ids.Empty, weight1)) setWeight := s.Weight() expectedWeight := weight0 + weight1 @@ -375,8 +347,7 @@ func TestSetSample(t *testing.T) { nodeID0 := ids.GenerateTestNodeID() pk := bls.PublicFromSecretKey(sk) - err = s.Add(nodeID0, pk, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID0, pk, ids.Empty, 1)) sampled, err = s.Sample(1) require.NoError(err) @@ -386,8 +357,7 @@ func TestSetSample(t *testing.T) { require.ErrorIs(err, sampler.ErrOutOfRange) nodeID1 := ids.GenerateTestNodeID() - err = s.Add(nodeID1, nil, ids.Empty, stdmath.MaxInt64-1) - require.NoError(err) + require.NoError(s.Add(nodeID1, nil, ids.Empty, stdmath.MaxInt64-1)) sampled, err = s.Sample(1) require.NoError(err) @@ -412,11 +382,9 @@ func TestSetString(t *testing.T) { } s := NewSet() - err := s.Add(nodeID0, nil, ids.Empty, 1) - require.NoError(err) + require.NoError(s.Add(nodeID0, nil, ids.Empty, 1)) - err = s.Add(nodeID1, nil, ids.Empty, stdmath.MaxInt64-1) - require.NoError(err) + require.NoError(s.Add(nodeID1, nil, ids.Empty, stdmath.MaxInt64-1)) expected := "Validator Set: (Size = 2, Weight = 9223372036854775807)\n" + " Validator[0]: NodeID-111111111111111111116DBWJs, 1\n" + @@ -480,8 +448,7 @@ func TestSetAddCallback(t *testing.T) { callCount++ }, }) - err = s.Add(nodeID0, pk0, txID0, weight0) - require.NoError(err) + require.NoError(s.Add(nodeID0, pk0, txID0, weight0)) require.Equal(1, callCount) } @@ -494,8 +461,7 @@ func TestSetAddWeightCallback(t *testing.T) { weight1 := uint64(93) s := NewSet() - err := s.Add(nodeID0, nil, txID0, weight0) - require.NoError(err) + require.NoError(s.Add(nodeID0, nil, txID0, weight0)) callCount := 0 s.RegisterCallbackListener(&callbackListener{ @@ -514,8 +480,7 @@ func TestSetAddWeightCallback(t *testing.T) { callCount++ }, }) - err = s.AddWeight(nodeID0, weight1) - require.NoError(err) + require.NoError(s.AddWeight(nodeID0, weight1)) require.Equal(2, callCount) } @@ -528,8 +493,7 @@ func TestSetRemoveWeightCallback(t *testing.T) { weight1 := uint64(92) s := NewSet() - err := s.Add(nodeID0, nil, txID0, weight0) - require.NoError(err) + require.NoError(s.Add(nodeID0, nil, txID0, weight0)) callCount := 0 s.RegisterCallbackListener(&callbackListener{ @@ -548,8 +512,7 @@ func TestSetRemoveWeightCallback(t *testing.T) { callCount++ }, }) - err = s.RemoveWeight(nodeID0, weight1) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID0, weight1)) require.Equal(2, callCount) } @@ -561,8 +524,7 @@ func TestSetValidatorRemovedCallback(t *testing.T) { weight0 := uint64(93) s := NewSet() - err := s.Add(nodeID0, nil, txID0, weight0) - require.NoError(err) + require.NoError(s.Add(nodeID0, nil, txID0, weight0)) callCount := 0 s.RegisterCallbackListener(&callbackListener{ @@ -580,7 +542,6 @@ func TestSetValidatorRemovedCallback(t *testing.T) { callCount++ }, }) - err = s.RemoveWeight(nodeID0, weight0) - require.NoError(err) + require.NoError(s.RemoveWeight(nodeID0, weight0)) require.Equal(2, callCount) } diff --git a/staking/tls_test.go b/staking/tls_test.go index bcab2c98b45..2282090b4c8 100644 --- a/staking/tls_test.go +++ b/staking/tls_test.go @@ -27,6 +27,5 @@ func TestMakeKeys(t *testing.T) { sig, err := cert.PrivateKey.(crypto.Signer).Sign(rand.Reader, msgHash, crypto.SHA256) require.NoError(err) - err = cert.Leaf.CheckSignature(cert.Leaf.SignatureAlgorithm, msg, sig) - require.NoError(err) + require.NoError(cert.Leaf.CheckSignature(cert.Leaf.SignatureAlgorithm, msg, sig)) } diff --git a/utils/beacon/set_test.go b/utils/beacon/set_test.go index da396631a98..2d4d3d7f240 100644 --- a/utils/beacon/set_test.go +++ b/utils/beacon/set_test.go @@ -46,8 +46,7 @@ func TestSet(t *testing.T) { len := s.Len() require.Zero(len) - err := s.Add(b0) - require.NoError(err) + require.NoError(s.Add(b0)) idsArg = s.IDsArg() require.Equal("NodeID-111111111111111111116DBWJs", idsArg) @@ -56,7 +55,7 @@ func TestSet(t *testing.T) { len = s.Len() require.Equal(1, len) - err = s.Add(b0) + err := s.Add(b0) require.ErrorIs(err, errDuplicateID) idsArg = s.IDsArg() @@ -66,8 +65,7 @@ func TestSet(t *testing.T) { len = s.Len() require.Equal(1, len) - err = s.Add(b1) - require.NoError(err) + require.NoError(s.Add(b1)) idsArg = s.IDsArg() require.Equal("NodeID-111111111111111111116DBWJs,NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt", idsArg) @@ -76,8 +74,7 @@ func TestSet(t *testing.T) { len = s.Len() require.Equal(2, len) - err = s.Add(b2) - require.NoError(err) + require.NoError(s.Add(b2)) idsArg = s.IDsArg() require.Equal("NodeID-111111111111111111116DBWJs,NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt,NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp", idsArg) @@ -86,8 +83,7 @@ func TestSet(t *testing.T) { len = s.Len() require.Equal(3, len) - err = s.RemoveByID(b0.ID()) - require.NoError(err) + require.NoError(s.RemoveByID(b0.ID())) idsArg = s.IDsArg() require.Equal("NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp,NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt", idsArg) @@ -96,8 +92,7 @@ func TestSet(t *testing.T) { len = s.Len() require.Equal(2, len) - err = s.RemoveByIP(b1.IP()) - require.NoError(err) + require.NoError(s.RemoveByIP(b1.IP())) idsArg = s.IDsArg() require.Equal("NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp", idsArg) diff --git a/utils/crypto/secp256k1/secp256k1_test.go b/utils/crypto/secp256k1/secp256k1_test.go index 8f5e3879d38..b9c47ac4e8b 100644 --- a/utils/crypto/secp256k1/secp256k1_test.go +++ b/utils/crypto/secp256k1/secp256k1_test.go @@ -115,8 +115,7 @@ func TestPrivateKeySECP256K1RUnmarshalJSON(t *testing.T) { require.NoError(err) key2 := PrivateKey{} - err = key2.UnmarshalJSON(keyJSON) - require.NoError(err) + require.NoError(key2.UnmarshalJSON(keyJSON)) require.Equal(key.PublicKey(), key2.PublicKey()) } diff --git a/utils/profiler/profiler_test.go b/utils/profiler/profiler_test.go index 8f04c940532..2d7d864aca6 100644 --- a/utils/profiler/profiler_test.go +++ b/utils/profiler/profiler_test.go @@ -19,13 +19,11 @@ func TestProfiler(t *testing.T) { p := New(dir) // Test Start and Stop CPU Profiler - err := p.StartCPUProfiler() - require.NoError(err) + require.NoError(p.StartCPUProfiler()) - err = p.StopCPUProfiler() - require.NoError(err) + require.NoError(p.StopCPUProfiler()) - _, err = os.Stat(filepath.Join(dir, cpuProfileFile)) + _, err := os.Stat(filepath.Join(dir, cpuProfileFile)) require.NoError(err) // Test Stop CPU Profiler without it running @@ -33,15 +31,13 @@ func TestProfiler(t *testing.T) { require.ErrorIs(err, errCPUProfilerNotRunning) // Test Memory Profiler - err = p.MemoryProfile() - require.NoError(err) + require.NoError(p.MemoryProfile()) _, err = os.Stat(filepath.Join(dir, memProfileFile)) require.NoError(err) // Test Lock Profiler - err = p.LockProfile() - require.NoError(err) + require.NoError(p.LockProfile()) _, err = os.Stat(filepath.Join(dir, lockProfileFile)) require.NoError(err) diff --git a/utils/sampler/weighted_heap_test.go b/utils/sampler/weighted_heap_test.go index 098f431b734..4d409198ce5 100644 --- a/utils/sampler/weighted_heap_test.go +++ b/utils/sampler/weighted_heap_test.go @@ -14,8 +14,7 @@ func TestWeightedHeapInitialize(t *testing.T) { h := weightedHeap{} - err := h.Initialize([]uint64{2, 2, 1, 3, 3, 1, 3}) - require.NoError(err) + require.NoError(h.Initialize([]uint64{2, 2, 1, 3, 3, 1, 3})) expectedOrdering := []int{3, 4, 6, 0, 1, 2, 5} for i, elem := range h.heap { diff --git a/utils/set/set_test.go b/utils/set/set_test.go index a0ae3eb9de1..603c46b5a35 100644 --- a/utils/set/set_test.go +++ b/utils/set/set_test.go @@ -145,8 +145,7 @@ func TestSetUnmarshalJSON(t *testing.T) { require := require.New(t) set := Set[int]{} { - err := set.UnmarshalJSON([]byte("[]")) - require.NoError(err) + require.NoError(set.UnmarshalJSON([]byte("[]"))) require.Empty(set) } id1, id2 := 1, 2 @@ -155,29 +154,25 @@ func TestSetUnmarshalJSON(t *testing.T) { id2JSON, err := json.Marshal(id2) require.NoError(err) { - err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%s]", string(id1JSON)))) - require.NoError(err) + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%s]", string(id1JSON))))) require.Len(set, 1) require.Contains(set, id1) } { - err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON)))) - require.NoError(err) + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON))))) require.Len(set, 2) require.Contains(set, id1) require.Contains(set, id2) } { - err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d]", 3, 4, 5))) - require.NoError(err) + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d]", 3, 4, 5)))) require.Len(set, 3) require.Contains(set, 3) require.Contains(set, 4) require.Contains(set, 5) } { - err := set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d, %d]", 3, 4, 5, 3))) - require.NoError(err) + require.NoError(set.UnmarshalJSON([]byte(fmt.Sprintf("[%d,%d,%d, %d]", 3, 4, 5, 3)))) require.Len(set, 3) require.Contains(set, 3) require.Contains(set, 4) @@ -186,10 +181,8 @@ func TestSetUnmarshalJSON(t *testing.T) { { set1 := Set[int]{} set2 := Set[int]{} - err := set1.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON)))) - require.NoError(err) - err = set2.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id2JSON), string(id1JSON)))) - require.NoError(err) + require.NoError(set1.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id1JSON), string(id2JSON))))) + require.NoError(set2.UnmarshalJSON([]byte(fmt.Sprintf("[%s,%s]", string(id2JSON), string(id1JSON))))) require.Equal(set1, set2) } } diff --git a/vms/avm/blocks/builder/builder_test.go b/vms/avm/blocks/builder/builder_test.go index a1501a0cddb..2a504cd9255 100644 --- a/vms/avm/blocks/builder/builder_test.go +++ b/vms/avm/blocks/builder/builder_test.go @@ -510,8 +510,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { // add a tx to the mempool tx := transactions[0] txID := tx.ID() - err = mempool.Add(tx) - require.NoError(err) + require.NoError(mempool.Add(tx)) has := mempool.Has(txID) require.True(has) diff --git a/vms/avm/blocks/executor/block_test.go b/vms/avm/blocks/executor/block_test.go index 781ede461ea..d58f78825ca 100644 --- a/vms/avm/blocks/executor/block_test.go +++ b/vms/avm/blocks/executor/block_test.go @@ -961,8 +961,7 @@ func TestBlockReject(t *testing.T) { defer ctrl.Finish() b := tt.blockFunc(ctrl) - err := b.Reject(context.Background()) - require.NoError(err) + require.NoError(b.Reject(context.Background())) require.True(b.rejected) _, ok := b.manager.blkIDToState[b.ID()] require.False(ok) diff --git a/vms/avm/blocks/executor/manager_test.go b/vms/avm/blocks/executor/manager_test.go index f5c43d1f59f..4e7d9e3c466 100644 --- a/vms/avm/blocks/executor/manager_test.go +++ b/vms/avm/blocks/executor/manager_test.go @@ -317,8 +317,7 @@ func TestVerifyUniqueInputs(t *testing.T) { // Case: No inputs { m := &manager{} - err := m.VerifyUniqueInputs(ids.GenerateTestID(), set.Set[ids.ID]{}) - require.NoError(err) + require.NoError(m.VerifyUniqueInputs(ids.GenerateTestID(), set.Set[ids.ID]{})) } // blk0 is blk1's parent @@ -344,6 +343,5 @@ func TestVerifyUniqueInputs(t *testing.T) { err := m.VerifyUniqueInputs(blk1ID, set.Set[ids.ID]{inputID: struct{}{}}) require.ErrorIs(err, ErrConflictingParentTxs) - err = m.VerifyUniqueInputs(blk1ID, set.Set[ids.ID]{ids.GenerateTestID(): struct{}{}}) - require.NoError(err) + require.NoError(m.VerifyUniqueInputs(blk1ID, set.Set[ids.ID]{ids.GenerateTestID(): struct{}{}})) } diff --git a/vms/avm/network/network_test.go b/vms/avm/network/network_test.go index 0e0cb76aa34..351975063ab 100644 --- a/vms/avm/network/network_test.go +++ b/vms/avm/network/network_test.go @@ -158,8 +158,7 @@ func TestNetworkAppGossip(t *testing.T) { tt.mempoolFunc(ctrl), tt.appSenderFunc(ctrl), ) - err = n.AppGossip(context.Background(), ids.GenerateTestNodeID(), tt.msgBytesFunc()) - require.NoError(err) + require.NoError(n.AppGossip(context.Background(), ids.GenerateTestNodeID(), tt.msgBytesFunc())) }) } } diff --git a/vms/avm/pubsub_filterer_test.go b/vms/avm/pubsub_filterer_test.go index 9d5fb5c231c..95f4fc3cd22 100644 --- a/vms/avm/pubsub_filterer_test.go +++ b/vms/avm/pubsub_filterer_test.go @@ -42,8 +42,7 @@ func TestFilter(t *testing.T) { addrBytes := addrID[:] fp := pubsub.NewFilterParam() - err := fp.Add(addrBytes) - require.NoError(err) + require.NoError(fp.Add(addrBytes)) parser := NewPubSubFilterer(&tx) fr, _ := parser.Filter([]pubsub.Filter{&mockFilter{addr: addrBytes}}) diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 58e719d5ef3..f2b95b9b056 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -898,8 +898,7 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { require.NoError(err) mintNFTTx := buildOperationTxWithOp(buildNFTxMintOp(createAssetTx, key, 2, 1)) - err = mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}) - require.NoError(err) + require.NoError(mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) txID, err := vm.IssueTx(mintNFTTx.Bytes()) require.NoError(err) @@ -954,7 +953,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager, @@ -977,8 +976,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { }, }, &common.SenderTest{T: t}, - ) - require.NoError(err) + )) vm.batchTimeout = 0 require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) @@ -986,15 +984,14 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { key := keys[0] createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) + _, err := vm.IssueTx(createAssetTx.Bytes()) require.NoError(err) mintOp1 := buildNFTxMintOp(createAssetTx, key, 2, 1) mintOp2 := buildNFTxMintOp(createAssetTx, key, 3, 2) mintNFTTx := buildOperationTxWithOp(mintOp1, mintOp2) - err = mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}}) - require.NoError(err) + require.NoError(mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) txID, err := vm.IssueTx(mintNFTTx.Bytes()) require.NoError(err) @@ -1084,8 +1081,7 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { require.NoError(err) mintSecpOpTx := buildOperationTxWithOp(buildSecpMintOp(createAssetTx, key, 0)) - err = mintSecpOpTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}) - require.NoError(err) + require.NoError(mintSecpOpTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) txID, err := vm.IssueTx(mintSecpOpTx.Bytes()) require.NoError(err) @@ -1181,8 +1177,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { op2 := buildSecpMintOp(createAssetTx, key, 1) mintSecpOpTx := buildOperationTxWithOp(op1, op2) - err = mintSecpOpTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}}) - require.NoError(err) + require.NoError(mintSecpOpTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) txID, err := vm.IssueTx(mintSecpOpTx.Bytes()) require.NoError(err) @@ -1273,8 +1268,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { require.NoError(err) mintPropertyFxOpTx := buildOperationTxWithOp(buildPropertyFxMintOp(createAssetTx, key, 4)) - err = mintPropertyFxOpTx.SignPropertyFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}) - require.NoError(err) + require.NoError(mintPropertyFxOpTx.SignPropertyFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) txID, err := vm.IssueTx(mintPropertyFxOpTx.Bytes()) require.NoError(err) @@ -1369,8 +1363,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) op2 := buildPropertyFxMintOp(createAssetTx, key, 5) mintPropertyFxOpTx := buildOperationTxWithOp(op1, op2) - err = mintPropertyFxOpTx.SignPropertyFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}}) - require.NoError(err) + require.NoError(mintPropertyFxOpTx.SignPropertyFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}, {key}})) txID, err := vm.IssueTx(mintPropertyFxOpTx.Bytes()) require.NoError(err) diff --git a/vms/avm/states/state_test.go b/vms/avm/states/state_test.go index 35533a84bb3..149ad0927f6 100644 --- a/vms/avm/states/state_test.go +++ b/vms/avm/states/state_test.go @@ -281,8 +281,7 @@ func TestInitializeChainState(t *testing.T) { stopVertexID := ids.GenerateTestID() genesisTimestamp := version.CortinaDefaultTime - err = s.InitializeChainState(stopVertexID, genesisTimestamp) - require.NoError(err) + require.NoError(s.InitializeChainState(stopVertexID, genesisTimestamp)) lastAcceptedID := s.GetLastAccepted() genesis, err := s.GetBlock(lastAcceptedID) @@ -301,11 +300,9 @@ func TestInitializeChainState(t *testing.T) { s.AddBlock(childBlock) s.SetLastAccepted(childBlock.ID()) - err = s.Commit() - require.NoError(err) + require.NoError(s.Commit()) - err = s.InitializeChainState(stopVertexID, genesisTimestamp) - require.NoError(err) + require.NoError(s.InitializeChainState(stopVertexID, genesisTimestamp)) lastAcceptedID = s.GetLastAccepted() lastAccepted, err := s.GetBlock(lastAcceptedID) diff --git a/vms/avm/txs/mempool/mempool_test.go b/vms/avm/txs/mempool/mempool_test.go index 0f633b75e87..5e69304fe4a 100644 --- a/vms/avm/txs/mempool/mempool_test.go +++ b/vms/avm/txs/mempool/mempool_test.go @@ -50,8 +50,7 @@ func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { // shortcut to simulated almost filled mempool mempool.bytesAvailable = len(tx.Bytes()) - err = mempool.Add(tx) - require.NoError(err) + require.NoError(mempool.Add(tx)) } func TestTxsInMempool(t *testing.T) { diff --git a/vms/components/avax/utxo_fetching_test.go b/vms/components/avax/utxo_fetching_test.go index 0b8f4f71f8c..e483508ef66 100644 --- a/vms/components/avax/utxo_fetching_test.go +++ b/vms/components/avax/utxo_fetching_test.go @@ -54,8 +54,7 @@ func TestFetchUTXOs(t *testing.T) { db := memdb.New() s := NewUTXOState(db, manager) - err := s.PutUTXO(utxo) - require.NoError(err) + require.NoError(s.PutUTXO(utxo)) utxos, err := GetAllUTXOs(s, addrs) require.NoError(err) @@ -111,8 +110,7 @@ func TestGetPaginatedUTXOs(t *testing.T) { }, }, } - err := s.PutUTXO(utxo0) - require.NoError(err) + require.NoError(s.PutUTXO(utxo0)) utxo1 := &UTXO{ UTXOID: UTXOID{ @@ -129,8 +127,7 @@ func TestGetPaginatedUTXOs(t *testing.T) { }, }, } - err = s.PutUTXO(utxo1) - require.NoError(err) + require.NoError(s.PutUTXO(utxo1)) utxo2 := &UTXO{ UTXOID: UTXOID{ @@ -147,8 +144,7 @@ func TestGetPaginatedUTXOs(t *testing.T) { }, }, } - err = s.PutUTXO(utxo2) - require.NoError(err) + require.NoError(s.PutUTXO(utxo2)) } var ( diff --git a/vms/components/avax/utxo_state_test.go b/vms/components/avax/utxo_state_test.go index 993fab7606f..864d31f3565 100644 --- a/vms/components/avax/utxo_state_test.go +++ b/vms/components/avax/utxo_state_test.go @@ -63,11 +63,9 @@ func TestUTXOState(t *testing.T) { _, err = s.GetUTXO(utxoID) require.Equal(database.ErrNotFound, err) - err = s.DeleteUTXO(utxoID) - require.NoError(err) + require.NoError(s.DeleteUTXO(utxoID)) - err = s.PutUTXO(utxo) - require.NoError(err) + require.NoError(s.PutUTXO(utxo)) utxoIDs, err := s.UTXOIDs(addr[:], ids.Empty, 5) require.NoError(err) @@ -77,14 +75,12 @@ func TestUTXOState(t *testing.T) { require.NoError(err) require.Equal(utxo, readUTXO) - err = s.DeleteUTXO(utxoID) - require.NoError(err) + require.NoError(s.DeleteUTXO(utxoID)) _, err = s.GetUTXO(utxoID) require.Equal(database.ErrNotFound, err) - err = s.PutUTXO(utxo) - require.NoError(err) + require.NoError(s.PutUTXO(utxo)) s = NewUTXOState(db, manager) diff --git a/vms/components/keystore/user_test.go b/vms/components/keystore/user_test.go index 379897e9d2a..a06c13a340a 100644 --- a/vms/components/keystore/user_test.go +++ b/vms/components/keystore/user_test.go @@ -24,8 +24,7 @@ func TestUserClosedDB(t *testing.T) { db, err := encdb.New([]byte(testPassword), memdb.New()) require.NoError(err) - err = db.Close() - require.NoError(err) + require.NoError(db.Close()) u := NewUserFromDB(db) @@ -62,12 +61,10 @@ func TestUser(t *testing.T) { sk, err := factory.NewPrivateKey() require.NoError(err) - err = u.PutKeys(sk) - require.NoError(err) + require.NoError(u.PutKeys(sk)) // Putting the same key multiple times should be a noop - err = u.PutKeys(sk) - require.NoError(err) + require.NoError(u.PutKeys(sk)) addr := sk.PublicKey().Address() diff --git a/vms/components/message/handler_test.go b/vms/components/message/handler_test.go index cd6c5173eaa..6af6bbed867 100644 --- a/vms/components/message/handler_test.go +++ b/vms/components/message/handler_test.go @@ -27,8 +27,7 @@ func TestHandleTx(t *testing.T) { handler := CounterHandler{} msg := Tx{} - err := msg.Handle(&handler, ids.EmptyNodeID, 0) - require.NoError(err) + require.NoError(msg.Handle(&handler, ids.EmptyNodeID, 0)) require.Equal(1, handler.Tx) } diff --git a/vms/platformvm/blocks/builder/builder_test.go b/vms/platformvm/blocks/builder/builder_test.go index 3f3efe80aff..eea7e161293 100644 --- a/vms/platformvm/blocks/builder/builder_test.go +++ b/vms/platformvm/blocks/builder/builder_test.go @@ -51,8 +51,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { env.sender.SendAppGossipF = func(context.Context, []byte) error { return nil } - err := env.Builder.AddUnverifiedTx(tx) - require.NoError(err) + require.NoError(env.Builder.AddUnverifiedTx(tx)) has := env.mempool.Has(txID) require.True(has) diff --git a/vms/platformvm/blocks/builder/network_test.go b/vms/platformvm/blocks/builder/network_test.go index 51777fdd824..29793b4b544 100644 --- a/vms/platformvm/blocks/builder/network_test.go +++ b/vms/platformvm/blocks/builder/network_test.go @@ -60,8 +60,7 @@ func TestMempoolValidGossipedTxIsAddedToMempool(t *testing.T) { // Free lock because [AppGossip] waits for the context lock env.ctx.Lock.Unlock() // show that unknown tx is added to mempool - err = env.AppGossip(context.Background(), nodeID, msgBytes) - require.NoError(err) + require.NoError(env.AppGossip(context.Background(), nodeID, msgBytes)) require.True(env.Builder.Has(txID)) // Grab lock back env.ctx.Lock.Lock() @@ -127,8 +126,7 @@ func TestMempoolNewLocaTxIsGossiped(t *testing.T) { tx := getValidTx(env.txBuilder, t) txID := tx.ID() - err := env.Builder.AddUnverifiedTx(tx) - require.NoError(err) + require.NoError(env.Builder.AddUnverifiedTx(tx)) require.NotNil(gossipedBytes) // show gossiped bytes can be decoded to the original tx @@ -144,8 +142,7 @@ func TestMempoolNewLocaTxIsGossiped(t *testing.T) { // show that transaction is not re-gossiped is recently added to mempool gossipedBytes = nil env.Builder.Remove([]*txs.Tx{tx}) - err = env.Builder.Add(tx) - require.NoError(err) + require.NoError(env.Builder.Add(tx)) require.Nil(gossipedBytes) } diff --git a/vms/platformvm/blocks/executor/acceptor_test.go b/vms/platformvm/blocks/executor/acceptor_test.go index 3f382d248a2..e4f73ea9038 100644 --- a/vms/platformvm/blocks/executor/acceptor_test.go +++ b/vms/platformvm/blocks/executor/acceptor_test.go @@ -65,8 +65,7 @@ func TestAcceptorVisitProposalBlock(t *testing.T) { recentlyAccepted: nil, } - err = acceptor.ApricotProposalBlock(blk) - require.NoError(err) + require.NoError(acceptor.ApricotProposalBlock(blk)) require.Equal(blkID, acceptor.backend.lastAccepted) @@ -159,8 +158,7 @@ func TestAcceptorVisitAtomicBlock(t *testing.T) { onAcceptState.EXPECT().Apply(s).Times(1) sharedMemory.EXPECT().Apply(atomicRequests, batch).Return(nil).Times(1) - err = acceptor.ApricotAtomicBlock(blk) - require.NoError(err) + require.NoError(acceptor.ApricotAtomicBlock(blk)) } func TestAcceptorVisitStandardBlock(t *testing.T) { @@ -253,8 +251,7 @@ func TestAcceptorVisitStandardBlock(t *testing.T) { onAcceptState.EXPECT().Apply(s).Times(1) sharedMemory.EXPECT().Apply(atomicRequests, batch).Return(nil).Times(1) - err = acceptor.BanffStandardBlock(blk) - require.NoError(err) + require.NoError(acceptor.BanffStandardBlock(blk)) require.True(calledOnAcceptFunc) require.Equal(blk.ID(), acceptor.backend.lastAccepted) } @@ -350,8 +347,7 @@ func TestAcceptorVisitCommitBlock(t *testing.T) { s.EXPECT().Commit().Return(nil).Times(1), ) - err = acceptor.ApricotCommitBlock(blk) - require.NoError(err) + require.NoError(acceptor.ApricotCommitBlock(blk)) require.Equal(blk.ID(), acceptor.backend.lastAccepted) } @@ -447,7 +443,6 @@ func TestAcceptorVisitAbortBlock(t *testing.T) { s.EXPECT().Commit().Return(nil).Times(1), ) - err = acceptor.ApricotAbortBlock(blk) - require.NoError(err) + require.NoError(acceptor.ApricotAbortBlock(blk)) require.Equal(blk.ID(), acceptor.backend.lastAccepted) } diff --git a/vms/platformvm/blocks/executor/rejector_test.go b/vms/platformvm/blocks/executor/rejector_test.go index 3c909e6bddf..538fdf7775c 100644 --- a/vms/platformvm/blocks/executor/rejector_test.go +++ b/vms/platformvm/blocks/executor/rejector_test.go @@ -147,8 +147,7 @@ func TestRejectBlock(t *testing.T) { state.EXPECT().Commit().Return(nil).Times(1), ) - err = tt.rejectFunc(rejector, blk) - require.NoError(err) + require.NoError(tt.rejectFunc(rejector, blk)) // Make sure block and its parent are removed from the state map. require.NotContains(rejector.blkIDToState, blk.ID()) }) diff --git a/vms/platformvm/blocks/executor/verifier_test.go b/vms/platformvm/blocks/executor/verifier_test.go index 098fb9eee8c..fd12dec6021 100644 --- a/vms/platformvm/blocks/executor/verifier_test.go +++ b/vms/platformvm/blocks/executor/verifier_test.go @@ -98,8 +98,7 @@ func TestVerifierVisitProposalBlock(t *testing.T) { // Visit the block blk := manager.NewBlock(apricotBlk) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) gotBlkState := verifier.backend.blkIDToState[apricotBlk.ID()] require.Equal(apricotBlk, gotBlkState.statelessBlock) @@ -115,8 +114,7 @@ func TestVerifierVisitProposalBlock(t *testing.T) { require.Equal(status.Aborted, gotStatus) // Visiting again should return nil without using dependencies. - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) } func TestVerifierVisitAtomicBlock(t *testing.T) { @@ -195,8 +193,7 @@ func TestVerifierVisitAtomicBlock(t *testing.T) { onAccept.EXPECT().GetTimestamp().Return(timestamp).Times(1) blk := manager.NewBlock(apricotBlk) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) gotBlkState := verifier.backend.blkIDToState[apricotBlk.ID()] @@ -206,8 +203,7 @@ func TestVerifierVisitAtomicBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) } func TestVerifierVisitStandardBlock(t *testing.T) { @@ -297,8 +293,7 @@ func TestVerifierVisitStandardBlock(t *testing.T) { mempool.EXPECT().Remove(apricotBlk.Txs()).Times(1) blk := manager.NewBlock(apricotBlk) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) // Assert expected state. require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) @@ -308,8 +303,7 @@ func TestVerifierVisitStandardBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) } func TestVerifierVisitCommitBlock(t *testing.T) { @@ -371,8 +365,7 @@ func TestVerifierVisitCommitBlock(t *testing.T) { // Verify the block. blk := manager.NewBlock(apricotBlk) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) // Assert expected state. require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) @@ -381,8 +374,7 @@ func TestVerifierVisitCommitBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) } func TestVerifierVisitAbortBlock(t *testing.T) { @@ -444,8 +436,7 @@ func TestVerifierVisitAbortBlock(t *testing.T) { // Verify the block. blk := manager.NewBlock(apricotBlk) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) // Assert expected state. require.Contains(verifier.backend.blkIDToState, apricotBlk.ID()) @@ -454,8 +445,7 @@ func TestVerifierVisitAbortBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) } // Assert that a block with an unverified parent fails verification. diff --git a/vms/platformvm/health_test.go b/vms/platformvm/health_test.go index 7a7d67b4bc3..40ef4142ccc 100644 --- a/vms/platformvm/health_test.go +++ b/vms/platformvm/health_test.go @@ -29,8 +29,7 @@ func TestHealthCheckPrimaryNetwork(t *testing.T) { }() genesisState, _ := defaultGenesis() for index, validator := range genesisState.Validators { - err := vm.Connected(context.Background(), validator.NodeID, version.CurrentApp) - require.NoError(err) + require.NoError(vm.Connected(context.Background(), validator.NodeID, version.CurrentApp)) details, err := vm.HealthCheck(context.Background()) if float64((index+1)*20) >= defaultMinConnectedStake*100 { require.NoError(err) @@ -72,8 +71,7 @@ func TestHealthCheckSubnet(t *testing.T) { testVdrCount := 4 for i := 0; i < testVdrCount; i++ { subnetVal := ids.GenerateTestNodeID() - err := subnetVdrs.Add(subnetVal, nil, ids.Empty, 100) - require.NoError(err) + require.NoError(subnetVdrs.Add(subnetVal, nil, ids.Empty, 100)) } ok := vm.Validators.Add(subnetID, subnetVdrs) require.True(ok) @@ -81,8 +79,7 @@ func TestHealthCheckSubnet(t *testing.T) { // connect to all primary network validators first genesisState, _ := defaultGenesis() for _, validator := range genesisState.Validators { - err := vm.Connected(context.Background(), validator.NodeID, version.CurrentApp) - require.NoError(err) + require.NoError(vm.Connected(context.Background(), validator.NodeID, version.CurrentApp)) } var expectedMinStake float64 if test.useDefault { @@ -94,8 +91,7 @@ func TestHealthCheckSubnet(t *testing.T) { } } for index, vdr := range subnetVdrs.List() { - err := vm.ConnectedSubnet(context.Background(), vdr.NodeID, subnetID) - require.NoError(err) + require.NoError(vm.ConnectedSubnet(context.Background(), vdr.NodeID, subnetID)) details, err := vm.HealthCheck(context.Background()) connectedPerc := float64((index + 1) * (100 / testVdrCount)) if connectedPerc >= expectedMinStake*100 { diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 124b159ea29..158035ebfe8 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -121,21 +121,18 @@ func TestExportKey(t *testing.T) { require := require.New(t) jsonString := `{"username":"ScoobyUser","password":"ShaggyPassword1Zoinks!","address":"` + testAddress + `"}` args := ExportKeyArgs{} - err := stdjson.Unmarshal([]byte(jsonString), &args) - require.NoError(err) + require.NoError(stdjson.Unmarshal([]byte(jsonString), &args)) service, _ := defaultService(t) defaultAddress(t, service) service.vm.ctx.Lock.Lock() defer func() { - err := service.vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(service.vm.Shutdown(context.Background())) service.vm.ctx.Lock.Unlock() }() reply := ExportKeyReply{} - err = service.ExportKey(nil, &args, &reply) - require.NoError(err) + require.NoError(service.ExportKey(nil, &args, &reply)) require.Equal(testPrivateKey, reply.PrivateKey.Bytes()) } @@ -144,20 +141,17 @@ func TestImportKey(t *testing.T) { require := require.New(t) jsonString := `{"username":"ScoobyUser","password":"ShaggyPassword1Zoinks!","privateKey":"PrivateKey-ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN"}` args := ImportKeyArgs{} - err := stdjson.Unmarshal([]byte(jsonString), &args) - require.NoError(err) + require.NoError(stdjson.Unmarshal([]byte(jsonString), &args)) service, _ := defaultService(t) service.vm.ctx.Lock.Lock() defer func() { - err := service.vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(service.vm.Shutdown(context.Background())) service.vm.ctx.Lock.Unlock() }() reply := api.JSONAddress{} - err = service.ImportKey(nil, &args, &reply) - require.NoError(err) + require.NoError(service.ImportKey(nil, &args, &reply)) require.Equal(testAddress, reply.Address) } @@ -168,8 +162,7 @@ func TestGetTxStatus(t *testing.T) { defaultAddress(t, service) service.vm.ctx.Lock.Lock() defer func() { - err := service.vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(service.vm.Shutdown(context.Background())) service.vm.ctx.Lock.Unlock() }() @@ -229,8 +222,7 @@ func TestGetTxStatus(t *testing.T) { arg = &GetTxStatusArgs{TxID: tx.ID()} resp GetTxStatusResponse ) - err = service.GetTxStatus(nil, arg, &resp) - require.NoError(err) + require.NoError(service.GetTxStatus(nil, arg, &resp)) require.Equal(status.Unknown, resp.Status) require.Zero(resp.Reason) @@ -240,22 +232,18 @@ func TestGetTxStatus(t *testing.T) { mutableSharedMemory.SharedMemory = sm - err = service.vm.Builder.AddUnverifiedTx(tx) - require.NoError(err) + require.NoError(service.vm.Builder.AddUnverifiedTx(tx)) block, err := service.vm.BuildBlock(context.Background()) require.NoError(err) blk := block.(*blockexecutor.Block) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) resp = GetTxStatusResponse{} // reset - err = service.GetTxStatus(nil, arg, &resp) - require.NoError(err) + require.NoError(service.GetTxStatus(nil, arg, &resp)) require.Equal(status.Committed, resp.Status) require.Zero(resp.Reason) } @@ -334,17 +322,14 @@ func TestGetTx(t *testing.T) { err = service.GetTx(nil, arg, &response) require.ErrorIs(err, database.ErrNotFound) // We haven't issued the tx yet - err = service.vm.Builder.AddUnverifiedTx(tx) - require.NoError(err) + require.NoError(service.vm.Builder.AddUnverifiedTx(tx)) block, err := service.vm.BuildBlock(context.Background()) require.NoError(err) - err = block.Verify(context.Background()) - require.NoError(err) + require.NoError(block.Verify(context.Background())) - err = block.Accept(context.Background()) - require.NoError(err) + require.NoError(block.Accept(context.Background())) if blk, ok := block.(snowman.OracleBlock); ok { // For proposal blocks, commit them options, err := blk.Options(context.Background()) @@ -354,16 +339,13 @@ func TestGetTx(t *testing.T) { commit := options[0].(*blockexecutor.Block) require.IsType(&blocks.BanffCommitBlock{}, commit.Block) - err := commit.Verify(context.Background()) - require.NoError(err) + require.NoError(commit.Verify(context.Background())) - err = commit.Accept(context.Background()) - require.NoError(err) + require.NoError(commit.Accept(context.Background())) } } - err = service.GetTx(nil, arg, &response) - require.NoError(err) + require.NoError(service.GetTx(nil, arg, &response)) switch encoding { case formatting.Hex: @@ -376,8 +358,7 @@ func TestGetTx(t *testing.T) { require.Equal(tx, response.Tx) } - err = service.vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(service.vm.Shutdown(context.Background())) service.vm.ctx.Lock.Unlock() }) } @@ -391,8 +372,7 @@ func TestGetBalance(t *testing.T) { defaultAddress(t, service) service.vm.ctx.Lock.Lock() defer func() { - err := service.vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(service.vm.Shutdown(context.Background())) service.vm.ctx.Lock.Unlock() }() @@ -587,8 +567,7 @@ func TestGetCurrentValidators(t *testing.T) { defaultAddress(t, service) service.vm.ctx.Lock.Lock() defer func() { - err := service.vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(service.vm.Shutdown(context.Background())) service.vm.ctx.Lock.Unlock() }() @@ -598,8 +577,7 @@ func TestGetCurrentValidators(t *testing.T) { args := GetCurrentValidatorsArgs{SubnetID: constants.PrimaryNetworkID} response := GetCurrentValidatorsReply{} - err := service.GetCurrentValidators(nil, &args, &response) - require.NoError(err) + require.NoError(service.GetCurrentValidators(nil, &args, &response)) require.Len(response.Validators, len(genesis.Validators)) for _, vdr := range genesis.Validators { @@ -643,13 +621,11 @@ func TestGetCurrentValidators(t *testing.T) { service.vm.state.PutCurrentDelegator(staker) service.vm.state.AddTx(delTx, status.Committed) - err = service.vm.state.Commit() - require.NoError(err) + require.NoError(service.vm.state.Commit()) // Call getCurrentValidators args = GetCurrentValidatorsArgs{SubnetID: constants.PrimaryNetworkID} - err = service.GetCurrentValidators(nil, &args, &response) - require.NoError(err) + require.NoError(service.GetCurrentValidators(nil, &args, &response)) require.Len(response.Validators, len(genesis.Validators)) // Make sure the delegator is there @@ -668,8 +644,7 @@ func TestGetCurrentValidators(t *testing.T) { NodeIDs: []ids.NodeID{vdr.NodeID}, } innerResponse := GetCurrentValidatorsReply{} - err = service.GetCurrentValidators(nil, &innerArgs, &innerResponse) - require.NoError(err) + require.NoError(service.GetCurrentValidators(nil, &innerArgs, &innerResponse)) require.Len(innerResponse.Validators, 1) innerVdr := innerResponse.Validators[0].(pchainapi.PermissionlessValidator) @@ -784,8 +759,7 @@ func TestGetBlock(t *testing.T) { Encoding: test.encoding, } response := api.GetBlockResponse{} - err = service.GetBlock(nil, &args, &response) - require.NoError(err) + require.NoError(service.GetBlock(nil, &args, &response)) switch { case test.encoding == formatting.JSON: diff --git a/vms/platformvm/state/validator_metadata_test.go b/vms/platformvm/state/validator_metadata_test.go index 09d4a681870..15fa1964952 100644 --- a/vms/platformvm/state/validator_metadata_test.go +++ b/vms/platformvm/state/validator_metadata_test.go @@ -46,8 +46,7 @@ func TestValidatorUptimes(t *testing.T) { // set uptime newUpDuration := testMetadata.UpDuration + 1 newLastUpdated := testMetadata.lastUpdated.Add(time.Hour) - err = state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated) - require.NoError(err) + require.NoError(state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated)) // get new uptime upDuration, lastUpdated, err = state.GetUptime(nodeID, subnetID) @@ -83,8 +82,7 @@ func TestWriteValidatorMetadata(t *testing.T) { primaryDB := memdb.New() subnetDB := memdb.New() // write empty uptimes - err := state.WriteValidatorMetadata(primaryDB, subnetDB) - require.NoError(err) + require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB)) // load uptime nodeID := ids.GenerateTestNodeID() @@ -98,8 +96,7 @@ func TestWriteValidatorMetadata(t *testing.T) { state.LoadValidatorMetadata(nodeID, subnetID, testUptimeReward) // write state, should not reflect to DB yet - err = state.WriteValidatorMetadata(primaryDB, subnetDB) - require.NoError(err) + require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB)) require.False(primaryDB.Has(testUptimeReward.txID[:])) require.False(subnetDB.Has(testUptimeReward.txID[:])) @@ -112,12 +109,10 @@ func TestWriteValidatorMetadata(t *testing.T) { // update uptimes newUpDuration := testUptimeReward.UpDuration + 1 newLastUpdated := testUptimeReward.lastUpdated.Add(time.Hour) - err = state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated) - require.NoError(err) + require.NoError(state.SetUptime(nodeID, subnetID, newUpDuration, newLastUpdated)) // write uptimes, should reflect to subnet DB - err = state.WriteValidatorMetadata(primaryDB, subnetDB) - require.NoError(err) + require.NoError(state.WriteValidatorMetadata(primaryDB, subnetDB)) require.False(primaryDB.Has(testUptimeReward.txID[:])) require.True(subnetDB.Has(testUptimeReward.txID[:])) } @@ -149,8 +144,7 @@ func TestValidatorDelegateeRewards(t *testing.T) { // set delegatee reward newDelegateeReward := testMetadata.PotentialDelegateeReward + 100000 - err = state.SetDelegateeReward(subnetID, nodeID, newDelegateeReward) - require.NoError(err) + require.NoError(state.SetDelegateeReward(subnetID, nodeID, newDelegateeReward)) // get new delegatee reward delegateeReward, err = state.GetDelegateeReward(subnetID, nodeID) diff --git a/vms/platformvm/status/blockchain_status_test.go b/vms/platformvm/status/blockchain_status_test.go index 08216ad77f6..97b96badcb9 100644 --- a/vms/platformvm/status/blockchain_status_test.go +++ b/vms/platformvm/status/blockchain_status_test.go @@ -26,8 +26,7 @@ func TestBlockchainStatusJSON(t *testing.T) { require.NoError(err) var parsedStatus BlockchainStatus - err = json.Unmarshal(statusJSON, &parsedStatus) - require.NoError(err) + require.NoError(json.Unmarshal(statusJSON, &parsedStatus)) require.Equal(status, parsedStatus) } @@ -39,8 +38,7 @@ func TestBlockchainStatusJSON(t *testing.T) { { status := Validating - err := json.Unmarshal([]byte("null"), &status) - require.NoError(err) + require.NoError(json.Unmarshal([]byte("null"), &status)) require.Equal(Validating, status) } diff --git a/vms/platformvm/status/status_test.go b/vms/platformvm/status/status_test.go index 78670a01fe4..59316f98372 100644 --- a/vms/platformvm/status/status_test.go +++ b/vms/platformvm/status/status_test.go @@ -26,8 +26,7 @@ func TestStatusJSON(t *testing.T) { require.NoError(err) var parsedStatus Status - err = json.Unmarshal(statusJSON, &parsedStatus) - require.NoError(err) + require.NoError(json.Unmarshal(statusJSON, &parsedStatus)) require.Equal(status, parsedStatus) } @@ -39,8 +38,7 @@ func TestStatusJSON(t *testing.T) { { status := Committed - err := json.Unmarshal([]byte("null"), &status) - require.NoError(err) + require.NoError(json.Unmarshal([]byte("null"), &status)) require.Equal(Committed, status) } diff --git a/vms/platformvm/txs/executor/advance_time_test.go b/vms/platformvm/txs/executor/advance_time_test.go index 1a8a26accd5..4e658a623dc 100644 --- a/vms/platformvm/txs/executor/advance_time_test.go +++ b/vms/platformvm/txs/executor/advance_time_test.go @@ -141,8 +141,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { require.ErrorIs(err, ErrChildBlockAfterStakerChangeTime) } - err = shutdownEnvironment(env) - require.NoError(err) + require.NoError(shutdownEnvironment(env)) // Case: Timestamp is after next validator end time env = newEnvironment(false /*=postBanff*/, false /*=postCortina*/) @@ -589,8 +588,7 @@ func TestTrackedSubnet(t *testing.T) { env.state.PutPendingValidator(staker) env.state.AddTx(tx, status.Committed) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) // Advance time to the staker's start time. env.clk.Set(subnetVdr1StartTime) @@ -609,8 +607,7 @@ func TestTrackedSubnet(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) require.NoError(executor.OnCommitState.Apply(env.state)) @@ -659,8 +656,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) require.NoError(executor.OnCommitState.Apply(env.state)) @@ -719,8 +715,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) require.NoError(executor.OnCommitState.Apply(env.state)) @@ -764,8 +759,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) require.NoError(executor.OnCommitState.Apply(env.state)) @@ -819,8 +813,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) require.NoError(executor.OnCommitState.Apply(env.state)) @@ -858,8 +851,7 @@ func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) require.True(executor.PrefersCommit, "should prefer to commit this tx because its proposed timestamp it's within sync bound") } diff --git a/vms/platformvm/txs/executor/create_chain_test.go b/vms/platformvm/txs/executor/create_chain_test.go index 4ca3ae84270..97ccb8d6013 100644 --- a/vms/platformvm/txs/executor/create_chain_test.go +++ b/vms/platformvm/txs/executor/create_chain_test.go @@ -161,8 +161,7 @@ func TestCreateChainTxValid(t *testing.T) { State: stateDiff, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) } func TestCreateChainTxAP3FeeChange(t *testing.T) { @@ -224,8 +223,7 @@ func TestCreateChainTxAP3FeeChange(t *testing.T) { SubnetAuth: subnetAuth, } tx := &txs.Tx{Unsigned: utx} - err = tx.Sign(txs.Codec, signers) - require.NoError(err) + require.NoError(tx.Sign(txs.Codec, signers)) stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/vms/platformvm/txs/executor/export_test.go b/vms/platformvm/txs/executor/export_test.go index 547f933c307..15f8b87fc37 100644 --- a/vms/platformvm/txs/executor/export_test.go +++ b/vms/platformvm/txs/executor/export_test.go @@ -77,8 +77,7 @@ func TestNewExportTx(t *testing.T) { StateVersions: env, Tx: tx, } - err = tx.Unsigned.Visit(&verifier) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&verifier)) }) } } diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index 7a17dbd7b04..3300751b6fa 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -199,8 +199,7 @@ func TestNewImportTx(t *testing.T) { StateVersions: env, Tx: tx, } - err = tx.Unsigned.Visit(&verifier) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&verifier)) }) } } diff --git a/vms/platformvm/txs/executor/proposal_tx_executor_test.go b/vms/platformvm/txs/executor/proposal_tx_executor_test.go index 458aceb1776..517251e0206 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor_test.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor_test.go @@ -352,8 +352,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) } // Add a validator to pending validator set of primary network @@ -417,8 +416,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { env.state.AddTx(addDSTx, status.Committed) dummyHeight := uint64(1) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) // Node with ID key.PublicKey().Address() now a pending validator for primary network @@ -508,8 +506,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) } // Case: Proposed validator start validating at/before current timestamp @@ -571,8 +568,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { env.state.PutCurrentValidator(staker) env.state.AddTx(subnetTx, status.Committed) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) { // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID @@ -605,8 +601,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { env.state.DeleteCurrentValidator(staker) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) { // Case: Too few signatures @@ -702,8 +697,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { env.state.PutCurrentValidator(staker) env.state.AddTx(tx, status.Committed) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) @@ -847,8 +841,7 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { env.state.AddTx(tx, status.Committed) dummyHeight := uint64(1) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) onCommitState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/vms/platformvm/txs/executor/reward_validator_test.go b/vms/platformvm/txs/executor/reward_validator_test.go index 9434a4ab147..3e1ce2162ff 100644 --- a/vms/platformvm/txs/executor/reward_validator_test.go +++ b/vms/platformvm/txs/executor/reward_validator_test.go @@ -308,8 +308,7 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&txExecutor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&txExecutor)) vdrDestSet := set.Set[ids.ShortID]{} vdrDestSet.Add(vdrRewardAddress) @@ -446,8 +445,7 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&txExecutor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&txExecutor)) // The delegator should be rewarded if the ProposalTx is committed. Since the // delegatee's share is 25%, we expect the delegator to receive 75% of the reward. @@ -809,8 +807,7 @@ func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { Backend: &env.backend, Tx: tx, } - err = tx.Unsigned.Visit(&txExecutor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&txExecutor)) vdrDestSet := set.Set[ids.ShortID]{} vdrDestSet.Add(vdrRewardAddress) diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index c88358d7c8e..06df97f05f4 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -438,8 +438,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { State: onAcceptState, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) } // Add a validator to pending validator set of primary network @@ -500,8 +499,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { env.state.AddTx(addDSTx, status.Committed) dummyHeight := uint64(1) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) // Node with ID key.PublicKey().Address() now a pending validator for primary network @@ -578,8 +576,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { State: onAcceptState, Tx: tx, } - err = tx.Unsigned.Visit(&executor) - require.NoError(err) + require.NoError(tx.Unsigned.Visit(&executor)) } // Case: Proposed validator start validating at/before current timestamp @@ -637,8 +634,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { env.state.PutCurrentValidator(staker) env.state.AddTx(subnetTx, status.Committed) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) { // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID @@ -668,8 +664,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { env.state.DeleteCurrentValidator(staker) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) { // Case: Duplicate signatures @@ -793,8 +788,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { env.state.PutCurrentValidator(staker) env.state.AddTx(tx, status.Committed) env.state.SetHeight(dummyHeight) - err = env.state.Commit() - require.NoError(err) + require.NoError(env.state.Commit()) onAcceptState, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) diff --git a/vms/platformvm/txs/txheap/by_end_time_test.go b/vms/platformvm/txs/txheap/by_end_time_test.go index 05995683de9..33ddc3cc3d1 100644 --- a/vms/platformvm/txs/txheap/by_end_time_test.go +++ b/vms/platformvm/txs/txheap/by_end_time_test.go @@ -30,8 +30,7 @@ func TestByStopTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx0 := &txs.Tx{Unsigned: utx0} - err := tx0.Initialize(txs.Codec) - require.NoError(err) + require.NoError(tx0.Initialize(txs.Codec)) utx1 := &txs.AddValidatorTx{ Validator: txs.Validator{ @@ -42,8 +41,7 @@ func TestByStopTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx1 := &txs.Tx{Unsigned: utx1} - err = tx1.Initialize(txs.Codec) - require.NoError(err) + require.NoError(tx1.Initialize(txs.Codec)) utx2 := &txs.AddValidatorTx{ Validator: txs.Validator{ @@ -54,8 +52,7 @@ func TestByStopTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx2 := &txs.Tx{Unsigned: utx2} - err = tx2.Initialize(txs.Codec) - require.NoError(err) + require.NoError(tx2.Initialize(txs.Codec)) txHeap.Add(tx2) require.Equal(utx2.EndTime(), txHeap.Timestamp()) diff --git a/vms/platformvm/txs/txheap/by_start_time_test.go b/vms/platformvm/txs/txheap/by_start_time_test.go index fe9180d0c15..164e2ec35e5 100644 --- a/vms/platformvm/txs/txheap/by_start_time_test.go +++ b/vms/platformvm/txs/txheap/by_start_time_test.go @@ -30,8 +30,7 @@ func TestByStartTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx0 := &txs.Tx{Unsigned: utx0} - err := tx0.Initialize(txs.Codec) - require.NoError(err) + require.NoError(tx0.Initialize(txs.Codec)) utx1 := &txs.AddValidatorTx{ Validator: txs.Validator{ @@ -42,8 +41,7 @@ func TestByStartTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx1 := &txs.Tx{Unsigned: utx1} - err = tx1.Initialize(txs.Codec) - require.NoError(err) + require.NoError(tx1.Initialize(txs.Codec)) utx2 := &txs.AddValidatorTx{ Validator: txs.Validator{ @@ -54,8 +52,7 @@ func TestByStartTime(t *testing.T) { RewardsOwner: &secp256k1fx.OutputOwners{}, } tx2 := &txs.Tx{Unsigned: utx2} - err = tx2.Initialize(txs.Codec) - require.NoError(err) + require.NoError(tx2.Initialize(txs.Codec)) txHeap.Add(tx2) require.Equal(utx2.EndTime(), txHeap.Timestamp()) diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index d8bedcd7853..2a4aeac748d 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -205,8 +205,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -231,8 +230,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the add validator tx - err = vm.Builder.AddUnverifiedTx(addValidatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -254,8 +252,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the first add delegator tx - err = vm.Builder.AddUnverifiedTx(addFirstDelegatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addFirstDelegatorTx)) // trigger block creation for the first add delegator tx addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -277,8 +274,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the second add delegator tx - err = vm.Builder.AddUnverifiedTx(addSecondDelegatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addSecondDelegatorTx)) // trigger block creation for the second add delegator tx addSecondDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -300,8 +296,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the third add delegator tx - err = vm.Builder.AddUnverifiedTx(addThirdDelegatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addThirdDelegatorTx)) // trigger block creation for the third add delegator tx addThirdDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -323,8 +318,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { require.NoError(err) // issue the fourth add delegator tx - err = vm.Builder.AddUnverifiedTx(addFourthDelegatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addFourthDelegatorTx)) // trigger block creation for the fourth add delegator tx addFourthDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -471,8 +465,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { vm, baseDB, mutableSharedMemory := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -515,8 +508,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require.NoError(err) addValidatorStandardBlk := vm.manager.NewBlock(statelessBlk) - err = addValidatorStandardBlk.Verify(context.Background()) - require.NoError(err) + require.NoError(addValidatorStandardBlk.Verify(context.Background())) // Verify that the new validator now in pending validator set { @@ -616,8 +608,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { // Because the shared memory UTXO has now been populated, the block should // pass verification. - err = importBlk.Verify(context.Background()) - require.NoError(err) + require.NoError(importBlk.Verify(context.Background())) // The status shouldn't have been changed during a successful verification. importBlkStatus = importBlk.Status() @@ -641,8 +632,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require.NoError(err) advanceTimeStandardBlk := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk) - err = advanceTimeStandardBlk.Verify(context.Background()) - require.NoError(err) + require.NoError(advanceTimeStandardBlk.Verify(context.Background())) // Accept all the blocks allBlocks := []snowman.Block{ @@ -651,8 +641,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { advanceTimeStandardBlk, } for _, blk := range allBlocks { - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) status := blk.Status() require.Equal(choices.Accepted, status) @@ -693,8 +682,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { vm, baseDB, mutableSharedMemory := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -736,8 +724,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) addValidatorStandardBlk0 := vm.manager.NewBlock(statelessAddValidatorStandardBlk0) - err = addValidatorStandardBlk0.Verify(context.Background()) - require.NoError(err) + require.NoError(addValidatorStandardBlk0.Verify(context.Background())) // Verify that first new validator now in pending validator set { @@ -766,8 +753,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) advanceTimeStandardBlk0 := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk0) - err = advanceTimeStandardBlk0.Verify(context.Background()) - require.NoError(err) + require.NoError(advanceTimeStandardBlk0.Verify(context.Background())) // Verify that the first new validator is now in the current validator set. { @@ -872,8 +858,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { // Because the shared memory UTXO has now been populated, the block should // pass verification. - err = importBlk.Verify(context.Background()) - require.NoError(err) + require.NoError(importBlk.Verify(context.Background())) // The status shouldn't have been changed during a successful verification. importBlkStatus = importBlk.Status() @@ -912,8 +897,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { addValidatorStandardBlk1 := vm.manager.NewBlock(statelessAddValidatorStandardBlk1) - err = addValidatorStandardBlk1.Verify(context.Background()) - require.NoError(err) + require.NoError(addValidatorStandardBlk1.Verify(context.Background())) // Verify that the second new validator now in pending validator set { @@ -942,8 +926,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) advanceTimeStandardBlk1 := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk1) - err = advanceTimeStandardBlk1.Verify(context.Background()) - require.NoError(err) + require.NoError(advanceTimeStandardBlk1.Verify(context.Background())) // Verify that the second new validator is now in the current validator set. { @@ -969,8 +952,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { advanceTimeStandardBlk1, } for _, blk := range allBlocks { - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) status := blk.Status() require.Equal(choices.Accepted, status) @@ -1020,8 +1002,7 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1168,8 +1149,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1194,8 +1174,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(err) // issue the add validator tx - err = vm.Builder.AddUnverifiedTx(addValidatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1217,8 +1196,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { require.NoError(err) // issue the first add delegator tx - err = vm.Builder.AddUnverifiedTx(addFirstDelegatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addFirstDelegatorTx)) // trigger block creation for the first add delegator tx addFirstDelegatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1255,8 +1233,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1279,8 +1256,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(addValidatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1297,8 +1273,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(createSubnetTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1318,8 +1293,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(addSubnetValidatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addSubnetValidatorTx)) // trigger block creation for the validator tx addSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1348,8 +1322,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t // validator set into the current validator set. vm.clock.Set(validatorStartTime) - err = vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx)) // trigger block creation for the validator tx removeSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1377,8 +1350,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1401,8 +1373,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(addValidatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1419,8 +1390,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(createSubnetTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1431,8 +1401,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t vm.TrackedSubnets.Add(createSubnetTx.ID()) subnetValidators := validators.NewSet() - err = vm.state.ValidatorSet(createSubnetTx.ID(), subnetValidators) - require.NoError(err) + require.NoError(vm.state.ValidatorSet(createSubnetTx.ID(), subnetValidators)) added := vm.Validators.Add(createSubnetTx.ID(), subnetValidators) require.True(added) @@ -1448,8 +1417,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(addSubnetValidatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addSubnetValidatorTx)) // trigger block creation for the validator tx addSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -1470,8 +1438,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t // validator set into the current validator set. vm.clock.Set(validatorStartTime) - err = vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(removeSubnetValidatorTx)) // trigger block creation for the validator tx removeSubnetValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index fca28a74d58..73d62845458 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -308,8 +308,7 @@ func BuildGenesisTestWithArgs(t *testing.T, args *api.BuildGenesisArgs) (*api.Bu buildGenesisResponse := api.BuildGenesisReply{} platformvmSS := api.StaticService{} - err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse) - require.NoError(err) + require.NoError(platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse)) genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) require.NoError(err) @@ -470,8 +469,7 @@ func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan c ) require.NoError(err) - err = vm.SetState(context.Background(), snow.NormalOp) - require.NoError(err) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) // Create a subnet and store it in testSubnet1 testSubnet1, err = vm.txBuilder.NewCreateSubnetTx( @@ -483,17 +481,14 @@ func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan c ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(testSubnet1) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(testSubnet1)) blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) return genesisBytes, msgChan, vm, m } @@ -504,8 +499,7 @@ func TestGenesis(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -613,8 +607,7 @@ func TestInvalidAddValidatorCommit(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -712,8 +705,7 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1116,8 +1108,7 @@ func TestUnneededBuildBlock(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() _, err := vm.Builder.BuildBlock(context.Background()) @@ -1130,8 +1121,7 @@ func TestCreateChain(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1146,17 +1136,14 @@ func TestCreateChain(t *testing.T) { ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(tx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(tx)) blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) // should contain proposal to create chain - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) _, txStatus, err := vm.state.GetTx(tx.ID()) require.NoError(err) @@ -1296,8 +1283,7 @@ func TestAtomicImport(t *testing.T) { vm, baseDB, mutableSharedMemory := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1362,17 +1348,14 @@ func TestAtomicImport(t *testing.T) { ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(tx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(tx)) blk, err := vm.Builder.BuildBlock(context.Background()) require.NoError(err) - err = blk.Verify(context.Background()) - require.NoError(err) + require.NoError(blk.Verify(context.Background())) - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) _, txStatus, err := vm.state.GetTx(tx.ID()) require.NoError(err) @@ -1389,8 +1372,7 @@ func TestOptimisticAtomicImport(t *testing.T) { vm, _, _ := defaultVM() vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1411,8 +1393,7 @@ func TestOptimisticAtomicImport(t *testing.T) { }, }}, }} - err := tx.Initialize(txs.Codec) - require.NoError(err) + require.NoError(tx.Initialize(txs.Codec)) preferred, err := vm.Builder.Preferred() require.NoError(err) @@ -1432,17 +1413,13 @@ func TestOptimisticAtomicImport(t *testing.T) { err = blk.Verify(context.Background()) require.ErrorIs(err, database.ErrNotFound) // erred due to missing shared memory UTXOs - err = vm.SetState(context.Background(), snow.Bootstrapping) - require.NoError(err) + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - err = blk.Verify(context.Background()) - require.NoError(err) // skips shared memory UTXO verification during bootstrapping + require.NoError(blk.Verify(context.Background())) // skips shared memory UTXO verification during bootstrapping - err = blk.Accept(context.Background()) - require.NoError(err) + require.NoError(blk.Accept(context.Background())) - err = vm.SetState(context.Background(), snow.NormalOp) - require.NoError(err) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) _, txStatus, err := vm.state.GetTx(tx.ID()) require.NoError(err) @@ -1564,8 +1541,7 @@ func TestRestartFullyAccepted(t *testing.T) { secondVM.clock.Set(initialClkTime) secondCtx.Lock.Lock() defer func() { - err := secondVM.Shutdown(context.Background()) - require.NoError(err) + require.NoError(secondVM.Shutdown(context.Background())) secondCtx.Lock.Unlock() }() @@ -1873,8 +1849,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { h.Start(context.Background(), false) ctx.Lock.Lock() - err = bootstrapper.Connected(context.Background(), peerID, version.CurrentApp) - require.NoError(err) + require.NoError(bootstrapper.Connected(context.Background(), peerID, version.CurrentApp)) externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { inMsgIntf, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) @@ -1887,8 +1862,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { } frontier := []ids.ID{advanceTimeBlkID} - err = bootstrapper.AcceptedFrontier(context.Background(), peerID, reqID, frontier) - require.NoError(err) + require.NoError(bootstrapper.AcceptedFrontier(context.Background(), peerID, reqID, frontier)) externalSender.SendF = func(msg message.OutboundMessage, nodeIDs set.Set[ids.NodeID], _ ids.ID, _ subnets.Allower) set.Set[ids.NodeID] { inMsgIntf, err := mc.Parse(msg.Bytes(), ctx.NodeID, func() {}) @@ -1995,8 +1969,7 @@ func TestUnverifiedParent(t *testing.T) { ) require.NoError(err) firstAdvanceTimeBlk := vm.manager.NewBlock(statelessBlk) - err = firstAdvanceTimeBlk.Verify(context.Background()) - require.NoError(err) + require.NoError(firstAdvanceTimeBlk.Verify(context.Background())) // include a tx1 to make the block be accepted tx2 := &txs.Tx{Unsigned: &txs.ImportTx{ @@ -2811,8 +2784,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { vm.ctx.Lock.Lock() defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(err) + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -2834,8 +2806,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(addValidatorTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(addValidatorTx)) // trigger block creation for the validator tx addValidatorBlock, err := vm.Builder.BuildBlock(context.Background()) @@ -2852,8 +2823,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { ) require.NoError(err) - err = vm.Builder.AddUnverifiedTx(createSubnetTx) - require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(createSubnetTx)) // trigger block creation for the subnet tx createSubnetBlock, err := vm.Builder.BuildBlock(context.Background()) diff --git a/vms/proposervm/block/build_test.go b/vms/proposervm/block/build_test.go index a4136c7089c..e6ec488eb48 100644 --- a/vms/proposervm/block/build_test.go +++ b/vms/proposervm/block/build_test.go @@ -45,8 +45,7 @@ func TestBuild(t *testing.T) { require.Equal(timestamp, builtBlock.Timestamp()) require.Equal(innerBlockBytes, builtBlock.Block()) - err = builtBlock.Verify(true, chainID) - require.NoError(err) + require.NoError(builtBlock.Verify(true, chainID)) err = builtBlock.Verify(false, chainID) require.ErrorIs(err, errUnexpectedProposer) @@ -69,8 +68,7 @@ func TestBuildUnsigned(t *testing.T) { require.Equal(innerBlockBytes, builtBlock.Block()) require.Equal(ids.EmptyNodeID, builtBlock.Proposer()) - err = builtBlock.Verify(false, ids.Empty) - require.NoError(err) + require.NoError(builtBlock.Verify(false, ids.Empty)) err = builtBlock.Verify(true, ids.Empty) require.ErrorIs(err, errMissingProposer) diff --git a/vms/proposervm/tree/tree_test.go b/vms/proposervm/tree/tree_test.go index 979943b8677..4bd992cd221 100644 --- a/vms/proposervm/tree/tree_test.go +++ b/vms/proposervm/tree/tree_test.go @@ -43,8 +43,7 @@ func TestAcceptSingleBlock(t *testing.T) { _, contains = tr.Get(block) require.True(contains) - err := tr.Accept(context.Background(), block) - require.NoError(err) + require.NoError(tr.Accept(context.Background(), block)) require.Equal(choices.Accepted, block.Status()) } @@ -78,8 +77,7 @@ func TestAcceptBlockConflict(t *testing.T) { _, contains = tr.Get(blockToReject) require.True(contains) - err := tr.Accept(context.Background(), blockToAccept) - require.NoError(err) + require.NoError(tr.Accept(context.Background(), blockToAccept)) require.Equal(choices.Accepted, blockToAccept.Status()) require.Equal(choices.Rejected, blockToReject.Status()) } @@ -126,8 +124,7 @@ func TestAcceptChainConflict(t *testing.T) { _, contains = tr.Get(blockToRejectChild) require.True(contains) - err := tr.Accept(context.Background(), blockToAccept) - require.NoError(err) + require.NoError(tr.Accept(context.Background(), blockToAccept)) require.Equal(choices.Accepted, blockToAccept.Status()) require.Equal(choices.Rejected, blockToReject.Status()) require.Equal(choices.Rejected, blockToRejectChild.Status()) diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index a9b45f505ab..423cb7439a9 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -2022,11 +2022,9 @@ func TestRejectedHeightNotIndexed(t *testing.T) { // Initialize shouldn't be called again coreVM.InitializeF = nil - err = proVM.SetState(context.Background(), snow.NormalOp) - require.NoError(err) + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - err = proVM.SetPreference(context.Background(), coreGenBlk.IDV) - require.NoError(err) + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) ctx.Lock.Lock() for proVM.VerifyHeightIndex(context.Background()) != nil { @@ -2055,8 +2053,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { require.NoError(err) coreVM.BuildBlockF = nil - err = aBlock.Verify(context.Background()) - require.NoError(err) + require.NoError(aBlock.Verify(context.Background())) // use a different way to construct inner block Y and outer block B yBlock := &snowman.TestBlock{ @@ -2087,12 +2084,10 @@ func TestRejectedHeightNotIndexed(t *testing.T) { }, } - err = bBlock.Verify(context.Background()) - require.NoError(err) + require.NoError(bBlock.Verify(context.Background())) // accept A - err = aBlock.Accept(context.Background()) - require.NoError(err) + require.NoError(aBlock.Accept(context.Background())) coreHeights = append(coreHeights, xBlock.ID()) blkID, err := proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) @@ -2100,8 +2095,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { require.Equal(aBlock.ID(), blkID) // reject B - err = bBlock.Reject(context.Background()) - require.NoError(err) + require.NoError(bBlock.Reject(context.Background())) blkID, err = proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) require.NoError(err) @@ -2237,11 +2231,9 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { // Initialize shouldn't be called again coreVM.InitializeF = nil - err = proVM.SetState(context.Background(), snow.NormalOp) - require.NoError(err) + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - err = proVM.SetPreference(context.Background(), coreGenBlk.IDV) - require.NoError(err) + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) ctx.Lock.Lock() for proVM.VerifyHeightIndex(context.Background()) != nil { @@ -2296,20 +2288,16 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { opts, err := aBlock.Options(context.Background()) require.NoError(err) - err = aBlock.Verify(context.Background()) - require.NoError(err) + require.NoError(aBlock.Verify(context.Background())) bBlock := opts[0] - err = bBlock.Verify(context.Background()) - require.NoError(err) + require.NoError(bBlock.Verify(context.Background())) cBlock := opts[1] - err = cBlock.Verify(context.Background()) - require.NoError(err) + require.NoError(cBlock.Verify(context.Background())) // accept A - err = aBlock.Accept(context.Background()) - require.NoError(err) + require.NoError(aBlock.Accept(context.Background())) coreHeights = append(coreHeights, xBlock.ID()) blkID, err := proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) @@ -2317,8 +2305,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { require.Equal(aBlock.ID(), blkID) // accept B - err = bBlock.Accept(context.Background()) - require.NoError(err) + require.NoError(bBlock.Accept(context.Background())) coreHeights = append(coreHeights, xBlock.opts[0].ID()) blkID, err = proVM.GetBlockIDAtHeight(context.Background(), bBlock.Height()) @@ -2326,8 +2313,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { require.Equal(bBlock.ID(), blkID) // reject C - err = cBlock.Reject(context.Background()) - require.NoError(err) + require.NoError(cBlock.Reject(context.Background())) blkID, err = proVM.GetBlockIDAtHeight(context.Background(), cBlock.Height()) require.NoError(err) @@ -2369,7 +2355,7 @@ func TestVMInnerBlkCache(t *testing.T) { ctx := snow.DefaultContextTest() ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, dummyDBManager, @@ -2379,8 +2365,7 @@ func TestVMInnerBlkCache(t *testing.T) { nil, nil, nil, - ) - require.NoError(err) + )) state := state.NewMockState(ctrl) // mock state vm.State = state @@ -2479,17 +2464,10 @@ func TestVMInnerBlkCacheDeduplicationRegression(t *testing.T) { bBlock, err := proVM.ParseBlock(context.Background(), bBlockBytes) require.NoError(err) - err = aBlock.Verify(context.Background()) - require.NoError(err) - - err = bBlock.Verify(context.Background()) - require.NoError(err) - - err = aBlock.Accept(context.Background()) - require.NoError(err) - - err = bBlock.Reject(context.Background()) - require.NoError(err) + require.NoError(aBlock.Verify(context.Background())) + require.NoError(bBlock.Verify(context.Background())) + require.NoError(aBlock.Accept(context.Background())) + require.NoError(bBlock.Reject(context.Background())) require.Equal( choices.Accepted, @@ -2651,7 +2629,6 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { blk.EXPECT().getInnerBlk().Return(innerBlk).AnyTimes() blkID := ids.GenerateTestID() blk.EXPECT().ID().Return(blkID).AnyTimes() - err = vm.verifyAndRecordInnerBlk(context.Background(), nil, blk) - require.NoError(err) + require.NoError(vm.verifyAndRecordInnerBlk(context.Background(), nil, blk)) } } diff --git a/vms/rpcchainvm/batched_vm_test.go b/vms/rpcchainvm/batched_vm_test.go index b0de0f5656e..480be0d4c39 100644 --- a/vms/rpcchainvm/batched_vm_test.go +++ b/vms/rpcchainvm/batched_vm_test.go @@ -90,8 +90,7 @@ func TestBatchedParseBlockCaching(t *testing.T) { ctx := snow.DefaultContextTest() dbManager := manager.NewMemDB(version.Semantic1_0_0) - err := vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil) - require.NoError(err) + require.NoError(vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil)) // Call should parse the first block blk, err := vm.ParseBlock(context.Background(), blkBytes1) diff --git a/vms/rpcchainvm/ghttp/http_test.go b/vms/rpcchainvm/ghttp/http_test.go index 7cafe62a56e..2bcf5f3150d 100644 --- a/vms/rpcchainvm/ghttp/http_test.go +++ b/vms/rpcchainvm/ghttp/http_test.go @@ -13,8 +13,6 @@ import ( ) func TestConvertWriteResponse(t *testing.T) { - require := require.New(t) - scenerios := map[string]struct { resp *httppb.HandleSimpleHTTPResponse }{ @@ -47,8 +45,7 @@ func TestConvertWriteResponse(t *testing.T) { for testName, scenerio := range scenerios { t.Run(testName, func(t *testing.T) { w := httptest.NewRecorder() - err := convertWriteResponse(w, scenerio.resp) - require.NoError(err) + require.NoError(t, convertWriteResponse(w, scenerio.resp)) }) } } diff --git a/vms/rpcchainvm/grpcutils/client_test.go b/vms/rpcchainvm/grpcutils/client_test.go index 0e48a837c4e..c19fa498944 100644 --- a/vms/rpcchainvm/grpcutils/client_test.go +++ b/vms/rpcchainvm/grpcutils/client_test.go @@ -61,8 +61,7 @@ func TestWaitForReady(t *testing.T) { db := rpcdb.NewClient(pb.NewDatabaseClient(conn)) - err = db.Put([]byte("foo"), []byte("bar")) - require.NoError(err) + require.NoError(db.Put([]byte("foo"), []byte("bar"))) noWaitListener, err := NewListener() require.NoError(err) diff --git a/vms/rpcchainvm/vm_test.go b/vms/rpcchainvm/vm_test.go index 5c1953d3216..968a04fe0b0 100644 --- a/vms/rpcchainvm/vm_test.go +++ b/vms/rpcchainvm/vm_test.go @@ -181,8 +181,7 @@ func TestRuntimeSubprocessBootstrap(t *testing.T) { listener, err := grpcutils.NewListener() require.NoError(err) - err = os.Setenv(runtime.EngineAddressKey, listener.Addr().String()) - require.NoError(err) + require.NoError(os.Setenv(runtime.EngineAddressKey, listener.Addr().String())) ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/vms/rpcchainvm/with_context_vm_test.go b/vms/rpcchainvm/with_context_vm_test.go index 8fd85f3067a..28b73dcb5d0 100644 --- a/vms/rpcchainvm/with_context_vm_test.go +++ b/vms/rpcchainvm/with_context_vm_test.go @@ -102,8 +102,7 @@ func TestContextVMSummary(t *testing.T) { ctx := snow.DefaultContextTest() dbManager := manager.NewMemDB(version.Semantic1_0_0) - err := vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil) - require.NoError(err) + require.NoError(vm.Initialize(context.Background(), ctx, dbManager, nil, nil, nil, nil, nil, nil)) blkIntf, err := vm.BuildBlockWithContext(context.Background(), blockContext) require.NoError(err) @@ -115,6 +114,5 @@ func TestContextVMSummary(t *testing.T) { require.NoError(err) require.True(shouldVerify) - err = blk.VerifyWithContext(context.Background(), blockContext) - require.NoError(err) + require.NoError(blk.VerifyWithContext(context.Background(), blockContext)) } diff --git a/x/merkledb/cache_test.go b/x/merkledb/cache_test.go index ba66268cce0..2aa136afb02 100644 --- a/x/merkledb/cache_test.go +++ b/x/merkledb/cache_test.go @@ -28,8 +28,7 @@ func TestNewOnEvictCache(t *testing.T) { require.Zero(cache.fifo.Len()) // Can't test function equality directly so do this // to make sure it was assigned correctly - err := cache.onEviction(0) - require.NoError(err) + require.NoError(cache.onEviction(0)) require.True(called) } @@ -53,8 +52,7 @@ func TestOnEvictCacheNoOnEvictionError(t *testing.T) { require.False(ok) // Put key - err := cache.Put(0, 0) - require.NoError(err) + require.NoError(cache.Put(0, 0)) require.Equal(1, cache.fifo.Len()) // Get key @@ -68,8 +66,7 @@ func TestOnEvictCacheNoOnEvictionError(t *testing.T) { // Fill the cache for i := 1; i < maxSize; i++ { - err := cache.Put(i, i) - require.NoError(err) + require.NoError(cache.Put(i, i)) require.Equal(i+1, cache.fifo.Len()) } require.Empty(evicted) @@ -77,8 +74,7 @@ func TestOnEvictCacheNoOnEvictionError(t *testing.T) { // Cache has [0,1,2] // Put another key. This should evict the oldest inserted key (0). - err = cache.Put(maxSize, maxSize) - require.NoError(err) + require.NoError(cache.Put(maxSize, maxSize)) require.Equal(maxSize, cache.fifo.Len()) require.Len(evicted, 1) require.Zero(evicted[0]) @@ -121,8 +117,7 @@ func TestOnEvictCacheNoOnEvictionError(t *testing.T) { require.False(iter.Next()) // Put another key to evict the oldest inserted key (1). - err = cache.Put(maxSize+1, maxSize+1) - require.NoError(err) + require.NoError(cache.Put(maxSize+1, maxSize+1)) require.Equal(maxSize, cache.fifo.Len()) require.Len(evicted, 2) require.Equal(1, evicted[1]) @@ -144,8 +139,7 @@ func TestOnEvictCacheNoOnEvictionError(t *testing.T) { _, ok = cache.Get(1) require.False(ok) - err = cache.Flush() - require.NoError(err) + require.NoError(cache.Flush()) // Cache should be empty require.Zero(cache.fifo.Len()) @@ -177,8 +171,7 @@ func TestOnEvictCacheOnEvictionError(t *testing.T) { // Fill the cache for i := 0; i < maxSize; i++ { - err := cache.Put(i, i) - require.NoError(err) + require.NoError(cache.Put(i, i)) require.Equal(i+1, cache.fifo.Len()) } diff --git a/x/merkledb/codec_test.go b/x/merkledb/codec_test.go index 157f87f97fb..50790b87df4 100644 --- a/x/merkledb/codec_test.go +++ b/x/merkledb/codec_test.go @@ -138,8 +138,7 @@ func FuzzCodecBool(f *testing.F) { // Encoding [got] should be the same as [b]. var buf bytes.Buffer - err = codec.encodeBool(&buf, got) - require.NoError(err) + require.NoError(codec.encodeBool(&buf, got)) bufBytes := buf.Bytes() require.Len(bufBytes, numRead) require.Equal(b[:numRead], bufBytes) @@ -167,8 +166,7 @@ func FuzzCodecInt(f *testing.F) { // Encoding [got] should be the same as [b]. var buf bytes.Buffer - err = codec.encodeInt(&buf, got) - require.NoError(err) + require.NoError(codec.encodeInt(&buf, got)) bufBytes := buf.Bytes() require.Len(bufBytes, numRead) require.Equal(b[:numRead], bufBytes) @@ -196,8 +194,7 @@ func FuzzCodecSerializedPath(f *testing.F) { // Encoding [got] should be the same as [b]. var buf bytes.Buffer - err = codec.encodeSerializedPath(got, &buf) - require.NoError(err) + require.NoError(codec.encodeSerializedPath(got, &buf)) bufBytes := buf.Bytes() require.Len(bufBytes, numRead) require.Equal(b[:numRead], bufBytes) @@ -601,8 +598,7 @@ func TestCodec_DecodeChangeProof(t *testing.T) { // Put key-values length of -1 proofBytesBuf := bytes.NewBuffer(proofBytes) - err = Codec.(*codecImpl).encodeInt(proofBytesBuf, -1) - require.NoError(err) + require.NoError(Codec.(*codecImpl).encodeInt(proofBytesBuf, -1)) _, err = Codec.DecodeChangeProof(proofBytesBuf.Bytes(), &parsedProof) require.ErrorIs(err, errNegativeNumKeyValues) @@ -634,8 +630,7 @@ func TestCodec_DecodeRangeProof(t *testing.T) { proofBytes = proofBytes[:len(proofBytes)-minVarIntLen] proofBytesBuf := bytes.NewBuffer(proofBytes) // Put key-value length (-1) at end - err = Codec.(*codecImpl).encodeInt(proofBytesBuf, -1) - require.NoError(err) + require.NoError(Codec.(*codecImpl).encodeInt(proofBytesBuf, -1)) _, err = Codec.DecodeRangeProof(proofBytesBuf.Bytes(), &parsedProof) require.ErrorIs(err, errNegativeNumKeyValues) @@ -666,8 +661,7 @@ func TestCodec_DecodeDBNode(t *testing.T) { nodeBytes = nodeBytes[:len(nodeBytes)-minVarIntLen] proofBytesBuf := bytes.NewBuffer(nodeBytes) // Put num children -1 at end - err = Codec.(*codecImpl).encodeInt(proofBytesBuf, -1) - require.NoError(err) + require.NoError(Codec.(*codecImpl).encodeInt(proofBytesBuf, -1)) _, err = Codec.decodeDBNode(proofBytesBuf.Bytes(), &parsedDBNode) require.ErrorIs(err, errNegativeNumChildren) @@ -677,8 +671,7 @@ func TestCodec_DecodeDBNode(t *testing.T) { nodeBytes = nodeBytes[:len(nodeBytes)-minVarIntLen] proofBytesBuf = bytes.NewBuffer(nodeBytes) // Put num children NodeBranchFactor+1 at end - err = Codec.(*codecImpl).encodeInt(proofBytesBuf, NodeBranchFactor+1) - require.NoError(err) + require.NoError(Codec.(*codecImpl).encodeInt(proofBytesBuf, NodeBranchFactor+1)) _, err = Codec.decodeDBNode(proofBytesBuf.Bytes(), &parsedDBNode) require.ErrorIs(err, errTooManyChildren) diff --git a/x/merkledb/db_test.go b/x/merkledb/db_test.go index 940b1f55ee4..9465e65d161 100644 --- a/x/merkledb/db_test.go +++ b/x/merkledb/db_test.go @@ -384,8 +384,7 @@ func Test_MerkleDB_InsertAndRetrieve(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) require.Nil(value) - err = db.Put([]byte("key"), []byte("value")) - require.NoError(err) + require.NoError(db.Put([]byte("key"), []byte("value"))) value, err = db.Get([]byte("key")) require.NoError(err) @@ -459,12 +458,10 @@ func TestDatabaseNewUntrackedView(t *testing.T) { require.Empty(db.childViews) // Write to the untracked view. - err = view.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(err) + require.NoError(view.Insert(context.Background(), []byte{1}, []byte{1})) // Commit the view - err = view.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view.CommitToDB(context.Background())) // The untracked view should not be tracked by the parent database. require.Empty(db.childViews) @@ -483,12 +480,10 @@ func TestDatabaseNewPreallocatedViewTracked(t *testing.T) { require.Len(db.childViews, 1) // Write to the view. - err = view.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(err) + require.NoError(view.Insert(context.Background(), []byte{1}, []byte{1})) // Commit the view - err = view.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view.CommitToDB(context.Background())) // The untracked view should be tracked by the parent database. require.Contains(db.childViews, view) @@ -503,8 +498,7 @@ func TestDatabaseCommitChanges(t *testing.T) { dbRoot := db.getMerkleRoot() // Committing a nil view should be a no-op. - err = db.commitToDB(context.Background()) - require.NoError(err) + require.NoError(db.commitToDB(context.Background())) require.Equal(dbRoot, db.getMerkleRoot()) // Root didn't change // Committing an invalid view should fail. @@ -515,20 +509,16 @@ func TestDatabaseCommitChanges(t *testing.T) { require.ErrorIs(err, ErrInvalid) // Add key-value pairs to the database - err = db.Put([]byte{1}, []byte{1}) - require.NoError(err) - err = db.Put([]byte{2}, []byte{2}) - require.NoError(err) + require.NoError(db.Put([]byte{1}, []byte{1})) + require.NoError(db.Put([]byte{2}, []byte{2})) // Make a view and inser/delete a key-value pair. view1Intf, err := db.NewView() require.NoError(err) require.IsType(&trieView{}, view1Intf) view1 := view1Intf.(*trieView) - err = view1.Insert(context.Background(), []byte{3}, []byte{3}) - require.NoError(err) - err = view1.Remove(context.Background(), []byte{1}) - require.NoError(err) + require.NoError(view1.Insert(context.Background(), []byte{3}, []byte{3})) + require.NoError(view1.Remove(context.Background(), []byte{1})) view1Root, err := view1.getMerkleRoot(context.Background()) require.NoError(err) @@ -551,8 +541,7 @@ func TestDatabaseCommitChanges(t *testing.T) { // db // Commit view1 - err = view1.commitToDB(context.Background()) - require.NoError(err) + require.NoError(view1.commitToDB(context.Background())) // Make sure the key-value pairs are correct. _, err = db.Get([]byte{1}) @@ -773,13 +762,11 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest) { require.LessOrEqual(i, len(rt)) switch step.op { case opUpdate: - err := currentBatch.Put(step.key, step.value) - require.NoError(err) + require.NoError(currentBatch.Put(step.key, step.value)) currentValues[newPath(step.key)] = step.value delete(deleteValues, newPath(step.key)) case opDelete: - err := currentBatch.Delete(step.key) - require.NoError(err) + require.NoError(currentBatch.Delete(step.key)) deleteValues[newPath(step.key)] = struct{}{} delete(currentValues, newPath(step.key)) case opGenerateRangeProof: @@ -824,8 +811,7 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest) { case opWriteBatch: oldRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) - err = currentBatch.Write() - require.NoError(err) + require.NoError(currentBatch.Write()) for key, value := range currentValues { values[key] = value } @@ -873,8 +859,7 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest) { require.NoError(err) localTrie := Trie(dbTrie) for key, value := range values { - err := localTrie.Insert(context.Background(), key.Serialize().Value, value) - require.NoError(err) + require.NoError(localTrie.Insert(context.Background(), key.Serialize().Value, value)) } calculatedRoot, err := localTrie.GetMerkleRoot(context.Background()) require.NoError(err) diff --git a/x/merkledb/history_test.go b/x/merkledb/history_test.go index 704dae9bb3c..becdf85f6df 100644 --- a/x/merkledb/history_test.go +++ b/x/merkledb/history_test.go @@ -28,10 +28,8 @@ func Test_History_Simple(t *testing.T) { ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) val, err := db.Get([]byte("key")) require.NoError(err) @@ -41,71 +39,49 @@ func Test_History_Simple(t *testing.T) { require.NoError(err) require.NotNil(origProof) origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) batch = db.NewBatch() - err = batch.Put([]byte("key"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value0"))) + require.NoError(batch.Write()) newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Put([]byte("key8"), []byte("value8")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Put([]byte("key8"), []byte("value8"))) + require.NoError(batch.Write()) newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) batch = db.NewBatch() - err = batch.Put([]byte("k"), []byte("v")) - require.NoError(err) - require.NoError(err) - err = batch.Write() + require.NoError(batch.Put([]byte("k"), []byte("v"))) require.NoError(err) + require.NoError(batch.Write()) newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) batch = db.NewBatch() - err = batch.Delete([]byte("k")) - require.NoError(err) - err = batch.Delete([]byte("ke")) - require.NoError(err) - err = batch.Delete([]byte("key")) - require.NoError(err) - err = batch.Delete([]byte("key1")) - require.NoError(err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(err) - err = batch.Delete([]byte("key3")) - require.NoError(err) - err = batch.Delete([]byte("key4")) - require.NoError(err) - err = batch.Delete([]byte("key5")) - require.NoError(err) - err = batch.Delete([]byte("key8")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Delete([]byte("k"))) + require.NoError(batch.Delete([]byte("ke"))) + require.NoError(batch.Delete([]byte("key"))) + require.NoError(batch.Delete([]byte("key1"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2"))) + require.NoError(batch.Delete([]byte("key3"))) + require.NoError(batch.Delete([]byte("key4"))) + require.NoError(batch.Delete([]byte("key5"))) + require.NoError(batch.Delete([]byte("key8"))) + require.NoError(batch.Write()) newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) } func Test_History_Large(t *testing.T) { @@ -133,14 +109,12 @@ func Test_History_Large(t *testing.T) { _, err = r.Read(val) require.NoError(err) - err = db.Put(addkey, val) - require.NoError(err) + require.NoError(db.Put(addkey, val)) addNilkey := make([]byte, r.Intn(50)) _, err = r.Read(addNilkey) require.NoError(err) - err = db.Put(addNilkey, nil) - require.NoError(err) + require.NoError(db.Put(addNilkey, nil)) deleteKeyStart := make([]byte, r.Intn(50)) _, err = r.Read(deleteKeyStart) @@ -148,8 +122,7 @@ func Test_History_Large(t *testing.T) { it := db.NewIteratorWithStart(deleteKeyStart) if it.Next() { - err = db.Delete(it.Key()) - require.NoError(err) + require.NoError(db.Delete(it.Key())) } require.NoError(it.Error()) it.Release() @@ -162,8 +135,7 @@ func Test_History_Large(t *testing.T) { require.NoError(err) require.NotNil(proof) - err = proof.Verify(context.Background(), nil, nil, roots[0]) - require.NoError(err) + require.NoError(proof.Verify(context.Background(), nil, nil, roots[0])) } } @@ -181,38 +153,28 @@ func Test_History_Bad_GetValueChanges_Input(t *testing.T) { ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) toBeDeletedRoot := db.getMerkleRoot() batch = db.NewBatch() - err = batch.Put([]byte("key"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value0"))) + require.NoError(batch.Write()) startRoot := db.getMerkleRoot() batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value0"))) + require.NoError(batch.Write()) batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Write()) batch = db.NewBatch() - err = batch.Put([]byte("key2"), []byte("value3")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key2"), []byte("value3"))) + require.NoError(batch.Write()) endRoot := db.getMerkleRoot() @@ -230,10 +192,8 @@ func Test_History_Bad_GetValueChanges_Input(t *testing.T) { // trigger the first root to be deleted by exiting the lookback window batch = db.NewBatch() - err = batch.Put([]byte("key2"), []byte("value4")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key2"), []byte("value4"))) + require.NoError(batch.Write()) // now this root should no longer be present _, err = db.history.getValueChanges(toBeDeletedRoot, endRoot, nil, nil, 1) @@ -259,10 +219,8 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) origRootID := db.getMerkleRoot() origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) @@ -278,10 +236,8 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { // write a new value into the db, now there should be 2 roots in the history batch = db.NewBatch() - err = batch.Put([]byte("key"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value0"))) + require.NoError(batch.Write()) // ensure that previous root is still present and generates a valid proof newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) @@ -297,10 +253,8 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { // trigger a new root to be added to the history, which should cause rollover since there can only be 2 batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Write()) // proof from first root shouldn't be generatable since it should have been removed from the history _, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) @@ -321,40 +275,30 @@ func Test_History_Values_Lookup_Over_Queue_Break(t *testing.T) { ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) // write a new value into the db batch = db.NewBatch() - err = batch.Put([]byte("key"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value0"))) + require.NoError(batch.Write()) startRoot := db.getMerkleRoot() // write a new value into the db batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value0")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value0"))) + require.NoError(batch.Write()) // write a new value into the db that overwrites key1 batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Write()) // trigger a new root to be added to the history, which should cause rollover since there can only be 3 batch = db.NewBatch() - err = batch.Put([]byte("key2"), []byte("value3")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key2"), []byte("value3"))) + require.NoError(batch.Write()) endRoot := db.getMerkleRoot() @@ -381,53 +325,38 @@ func Test_History_RepeatedRoot(t *testing.T) { ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2"))) + require.NoError(batch.Put([]byte("key3"), []byte("value3"))) + require.NoError(batch.Write()) origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(origProof) origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("other")) - require.NoError(err) - err = batch.Put([]byte("key2"), []byte("other")) - require.NoError(err) - err = batch.Put([]byte("key3"), []byte("other")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("other"))) + require.NoError(batch.Put([]byte("key2"), []byte("other"))) + require.NoError(batch.Put([]byte("key3"), []byte("other"))) + require.NoError(batch.Write()) newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) // revert state to be the same as in orig proof batch = db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2"))) + require.NoError(batch.Put([]byte("key3"), []byte("value3"))) + require.NoError(batch.Write()) newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) } func Test_History_ExcessDeletes(t *testing.T) { @@ -444,36 +373,26 @@ func Test_History_ExcessDeletes(t *testing.T) { ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(origProof) origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) batch = db.NewBatch() - err = batch.Delete([]byte("key1")) - require.NoError(err) - err = batch.Delete([]byte("key2")) - require.NoError(err) - err = batch.Delete([]byte("key3")) - require.NoError(err) - err = batch.Delete([]byte("key4")) - require.NoError(err) - err = batch.Delete([]byte("key5")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Delete([]byte("key1"))) + require.NoError(batch.Delete([]byte("key2"))) + require.NoError(batch.Delete([]byte("key3"))) + require.NoError(batch.Delete([]byte("key4"))) + require.NoError(batch.Delete([]byte("key5"))) + require.NoError(batch.Write()) newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) } func Test_History_DontIncludeAllNodes(t *testing.T) { @@ -490,28 +409,22 @@ func Test_History_DontIncludeAllNodes(t *testing.T) { ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(origProof) origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) batch = db.NewBatch() - err = batch.Put([]byte("z"), []byte("z")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("z"), []byte("z"))) + require.NoError(batch.Write()) newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) } func Test_History_Branching2Nodes(t *testing.T) { @@ -528,28 +441,22 @@ func Test_History_Branching2Nodes(t *testing.T) { ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(origProof) origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) batch = db.NewBatch() - err = batch.Put([]byte("k"), []byte("v")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("k"), []byte("v"))) + require.NoError(batch.Write()) newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) } func Test_History_Branching3Nodes(t *testing.T) { @@ -566,28 +473,22 @@ func Test_History_Branching3Nodes(t *testing.T) { ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key123"), []byte("value123")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key123"), []byte("value123"))) + require.NoError(batch.Write()) origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(origProof) origRootID := db.root.id - err = origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(origProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) batch = db.NewBatch() - err = batch.Put([]byte("key321"), []byte("value321")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key321"), []byte("value321"))) + require.NoError(batch.Write()) newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID) - require.NoError(err) + require.NoError(newProof.Verify(context.Background(), []byte("k"), []byte("key3"), origRootID)) } func Test_History_MaxLength(t *testing.T) { @@ -605,27 +506,21 @@ func Test_History_MaxLength(t *testing.T) { require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key"), []byte("value")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key"), []byte("value"))) + require.NoError(batch.Write()) oldRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) batch = db.NewBatch() - err = batch.Put([]byte("k"), []byte("v")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("k"), []byte("v"))) + require.NoError(batch.Write()) require.Contains(db.history.lastChanges, oldRoot) batch = db.NewBatch() - err = batch.Put([]byte("k1"), []byte("v2")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("k1"), []byte("v2"))) + require.NoError(batch.Write()) require.NotContains(db.history.lastChanges, oldRoot) } @@ -644,48 +539,30 @@ func Test_Change_List(t *testing.T) { ) require.NoError(err) batch := db.NewBatch() - err = batch.Put([]byte("key20"), []byte("value20")) - require.NoError(err) - err = batch.Put([]byte("key21"), []byte("value21")) - require.NoError(err) - err = batch.Put([]byte("key22"), []byte("value22")) - require.NoError(err) - err = batch.Put([]byte("key23"), []byte("value23")) - require.NoError(err) - err = batch.Put([]byte("key24"), []byte("value24")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key20"), []byte("value20"))) + require.NoError(batch.Put([]byte("key21"), []byte("value21"))) + require.NoError(batch.Put([]byte("key22"), []byte("value22"))) + require.NoError(batch.Put([]byte("key23"), []byte("value23"))) + require.NoError(batch.Put([]byte("key24"), []byte("value24"))) + require.NoError(batch.Write()) startRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) batch = db.NewBatch() - err = batch.Put([]byte("key25"), []byte("value25")) - require.NoError(err) - err = batch.Put([]byte("key26"), []byte("value26")) - require.NoError(err) - err = batch.Put([]byte("key27"), []byte("value27")) - require.NoError(err) - err = batch.Put([]byte("key28"), []byte("value28")) - require.NoError(err) - err = batch.Put([]byte("key29"), []byte("value29")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key25"), []byte("value25"))) + require.NoError(batch.Put([]byte("key26"), []byte("value26"))) + require.NoError(batch.Put([]byte("key27"), []byte("value27"))) + require.NoError(batch.Put([]byte("key28"), []byte("value28"))) + require.NoError(batch.Put([]byte("key29"), []byte("value29"))) + require.NoError(batch.Write()) batch = db.NewBatch() - err = batch.Put([]byte("key30"), []byte("value30")) - require.NoError(err) - err = batch.Put([]byte("key31"), []byte("value31")) - require.NoError(err) - err = batch.Put([]byte("key32"), []byte("value32")) - require.NoError(err) - err = batch.Delete([]byte("key21")) - require.NoError(err) - err = batch.Delete([]byte("key22")) - require.NoError(err) - err = batch.Write() - require.NoError(err) + require.NoError(batch.Put([]byte("key30"), []byte("value30"))) + require.NoError(batch.Put([]byte("key31"), []byte("value31"))) + require.NoError(batch.Put([]byte("key32"), []byte("value32"))) + require.NoError(batch.Delete([]byte("key21"))) + require.NoError(batch.Delete([]byte("key22"))) + require.NoError(batch.Write()) endRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(err) diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index 53f69f33d09..3163f8db082 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -121,10 +121,8 @@ func TestTrieViewGetPathTo(t *testing.T) { // Insert a key key1 := []byte{0} - err = trie.Insert(context.Background(), key1, []byte("value")) - require.NoError(err) - err = trie.calculateNodeIDs(context.Background()) - require.NoError(err) + require.NoError(trie.Insert(context.Background(), key1, []byte("value"))) + require.NoError(trie.calculateNodeIDs(context.Background())) path, err = trie.getPathTo(newPath(key1)) require.NoError(err) @@ -136,10 +134,8 @@ func TestTrieViewGetPathTo(t *testing.T) { // Insert another key which is a child of the first key2 := []byte{0, 1} - err = trie.Insert(context.Background(), key2, []byte("value")) - require.NoError(err) - err = trie.calculateNodeIDs(context.Background()) - require.NoError(err) + require.NoError(trie.Insert(context.Background(), key2, []byte("value"))) + require.NoError(trie.calculateNodeIDs(context.Background())) path, err = trie.getPathTo(newPath(key2)) require.NoError(err) @@ -150,10 +146,8 @@ func TestTrieViewGetPathTo(t *testing.T) { // Insert a key which shares no prefix with the others key3 := []byte{255} - err = trie.Insert(context.Background(), key3, []byte("value")) - require.NoError(err) - err = trie.calculateNodeIDs(context.Background()) - require.NoError(err) + require.NoError(trie.Insert(context.Background(), key3, []byte("value"))) + require.NoError(trie.calculateNodeIDs(context.Background())) path, err = trie.getPathTo(newPath(key3)) require.NoError(err) @@ -265,15 +259,13 @@ func Test_Trie_WriteToDB(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) require.Nil(value) - err = trie.Insert(context.Background(), []byte("key"), []byte("value")) - require.NoError(err) + require.NoError(trie.Insert(context.Background(), []byte("key"), []byte("value"))) value, err = getNodeValue(trie, "key") require.NoError(err) require.Equal([]byte("value"), value) - err = trie.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(trie.CommitToDB(context.Background())) p := newPath([]byte("key")) rawBytes, err := dbTrie.nodeDB.Get(p.Bytes()) @@ -298,8 +290,7 @@ func Test_Trie_InsertAndRetrieve(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) require.Nil(value) - err = trie.Insert(context.Background(), []byte("key"), []byte("value")) - require.NoError(err) + require.NoError(trie.Insert(context.Background(), []byte("key"), []byte("value"))) value, err = getNodeValue(trie, "key") require.NoError(err) @@ -552,8 +543,7 @@ func Test_Trie_CommitChanges(t *testing.T) { require.IsType(&trieView{}, view1Intf) view1 := view1Intf.(*trieView) - err = view1.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(err) + require.NoError(view1.Insert(context.Background(), []byte{1}, []byte{1})) // view1 // | @@ -568,8 +558,7 @@ func Test_Trie_CommitChanges(t *testing.T) { // Case: Committing a nil view is a no-op oldRoot, err := view1.getMerkleRoot(context.Background()) require.NoError(err) - err = view1.commitChanges(context.Background(), nil) - require.NoError(err) + require.NoError(view1.commitChanges(context.Background(), nil)) newRoot, err := view1.getMerkleRoot(context.Background()) require.NoError(err) require.Equal(oldRoot, newRoot) @@ -591,10 +580,8 @@ func Test_Trie_CommitChanges(t *testing.T) { require.IsType(&trieView{}, view2Intf) view2 := view2Intf.(*trieView) - err = view2.Insert(context.Background(), []byte{2}, []byte{2}) - require.NoError(err) - err = view2.Remove(context.Background(), []byte{1}) - require.NoError(err) + require.NoError(view2.Insert(context.Background(), []byte{2}, []byte{2})) + require.NoError(view2.Remove(context.Background(), []byte{1})) view2Root, err := view2.getMerkleRoot(context.Background()) require.NoError(err) @@ -621,8 +608,7 @@ func Test_Trie_CommitChanges(t *testing.T) { // db // Commit view2 to view1 - err = view1.commitChanges(context.Background(), view2) - require.NoError(err) + require.NoError(view1.commitChanges(context.Background(), view2)) // All siblings of view2 should be invalidated require.True(view3.invalidated) @@ -925,12 +911,10 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Contains(db.childViews, view1) require.Equal(db, view1.parentTrie) - err = view1.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(err) + require.NoError(view1.Insert(context.Background(), []byte{1}, []byte{1})) // Commit the view - err = view1.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view1.CommitToDB(context.Background())) // view1 (committed) // | @@ -984,8 +968,7 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Len(db.childViews, 2) // Commit view2 - err = view2.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view2.CommitToDB(context.Background())) // view3 // | @@ -1003,8 +986,7 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Equal(db, view3.parentTrie) // Commit view3 - err = view3.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view3.CommitToDB(context.Background())) // view3 being committed invalidates view2 require.True(view2.invalidated) @@ -1043,8 +1025,7 @@ func Test_TrieView_NewView(t *testing.T) { require.Len(view1.childViews, 1) // Commit view1 - err = view1.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(view1.CommitToDB(context.Background())) // Make another view atop view1 view3Intf, err := view1.NewView() @@ -1307,8 +1288,7 @@ func Test_Trie_ConcurrentReadWrite(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - err := newTrie.Insert(context.Background(), []byte("key"), []byte("value")) - require.NoError(err) + require.NoError(newTrie.Insert(context.Background(), []byte("key"), []byte("value"))) }() require.Eventually( @@ -1337,8 +1317,7 @@ func Test_Trie_ConcurrentNewViewAndCommit(t *testing.T) { newTrie, err := trie.NewView() require.NoError(err) - err = newTrie.Insert(context.Background(), []byte("key"), []byte("value0")) - require.NoError(err) + require.NoError(newTrie.Insert(context.Background(), []byte("key"), []byte("value0"))) var wg sync.WaitGroup defer wg.Wait() @@ -1346,8 +1325,7 @@ func Test_Trie_ConcurrentNewViewAndCommit(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - err := newTrie.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(newTrie.CommitToDB(context.Background())) }() newView, err := newTrie.NewView() @@ -1364,8 +1342,7 @@ func Test_Trie_ConcurrentDeleteAndMerkleRoot(t *testing.T) { newTrie, err := trie.NewView() require.NoError(err) - err = newTrie.Insert(context.Background(), []byte("key"), []byte("value0")) - require.NoError(err) + require.NoError(newTrie.Insert(context.Background(), []byte("key"), []byte("value0"))) var wg sync.WaitGroup defer wg.Wait() @@ -1373,8 +1350,7 @@ func Test_Trie_ConcurrentDeleteAndMerkleRoot(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - err := newTrie.Remove(context.Background(), []byte("key")) - require.NoError(err) + require.NoError(newTrie.Remove(context.Background(), []byte("key"))) }() rootID, err := newTrie.GetMerkleRoot(context.Background()) @@ -1398,8 +1374,7 @@ func Test_Trie_ConcurrentInsertProveCommit(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - err := newTrie.Insert(context.Background(), []byte("key2"), []byte("value2")) - require.NoError(err) + require.NoError(newTrie.Insert(context.Background(), []byte("key2"), []byte("value2"))) }() require.Eventually( @@ -1415,8 +1390,7 @@ func Test_Trie_ConcurrentInsertProveCommit(t *testing.T) { } require.Equal([]byte("value2"), proof.Value.value) - err = newTrie.CommitToDB(context.Background()) - require.NoError(err) + require.NoError(newTrie.CommitToDB(context.Background())) return true }, time.Second, @@ -1433,8 +1407,7 @@ func Test_Trie_ConcurrentInsertAndRangeProof(t *testing.T) { newTrie, err := trie.NewView() require.NoError(err) - err = newTrie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(err) + require.NoError(newTrie.Insert(context.Background(), []byte("key1"), []byte("value1"))) var wg sync.WaitGroup defer wg.Wait() @@ -1442,10 +1415,8 @@ func Test_Trie_ConcurrentInsertAndRangeProof(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - err := newTrie.Insert(context.Background(), []byte("key2"), []byte("value2")) - require.NoError(err) - err = newTrie.Insert(context.Background(), []byte("key3"), []byte("value3")) - require.NoError(err) + require.NoError(newTrie.Insert(context.Background(), []byte("key2"), []byte("value2"))) + require.NoError(newTrie.Insert(context.Background(), []byte("key3"), []byte("value3"))) }() require.Eventually( diff --git a/x/sync/client_test.go b/x/sync/client_test.go index 5fb24503368..55b5f049e8b 100644 --- a/x/sync/client_test.go +++ b/x/sync/client_test.go @@ -45,8 +45,7 @@ func sendRangeRequest( handler := NewNetworkServer(sender, db, logging.NoLog{}) clientNodeID, serverNodeID := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() networkClient := NewNetworkClient(sender, clientNodeID, 1, logging.NoLog{}) - err := networkClient.Connected(context.Background(), serverNodeID, version.CurrentApp) - require.NoError(err) + require.NoError(networkClient.Connected(context.Background(), serverNodeID, version.CurrentApp)) client := NewClient(&ClientConfig{ NetworkClient: networkClient, Metrics: &mockMetrics{}, @@ -75,8 +74,7 @@ func sendRangeRequest( wg.Add(1) go func() { defer wg.Done() - err := handler.AppRequest(ctx, clientNodeID, requestID, deadline, requestBytes) - require.NoError(err) + require.NoError(handler.AppRequest(ctx, clientNodeID, requestID, deadline, requestBytes)) }() // should be on a goroutine so the test can make progress. return nil }, @@ -101,8 +99,7 @@ func sendRangeRequest( // reserialize the response and pass it to the client to complete the handling. responseBytes, err = merkledb.Codec.EncodeRangeProof(merkledb.Version, response) require.NoError(err) - err = networkClient.AppResponse(context.Background(), serverNodeID, requestID, responseBytes) - require.NoError(err) + require.NoError(networkClient.AppResponse(context.Background(), serverNodeID, requestID, responseBytes)) return nil }, ).AnyTimes() @@ -301,8 +298,7 @@ func sendChangeRequest( handler := NewNetworkServer(sender, db, logging.NoLog{}) clientNodeID, serverNodeID := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() networkClient := NewNetworkClient(sender, clientNodeID, 1, logging.NoLog{}) - err := networkClient.Connected(context.Background(), serverNodeID, version.CurrentApp) - require.NoError(err) + require.NoError(networkClient.Connected(context.Background(), serverNodeID, version.CurrentApp)) client := NewClient(&ClientConfig{ NetworkClient: networkClient, Metrics: &mockMetrics{}, @@ -331,8 +327,7 @@ func sendChangeRequest( wg.Add(1) go func() { defer wg.Done() - err := handler.AppRequest(ctx, clientNodeID, requestID, deadline, requestBytes) - require.NoError(err) + require.NoError(handler.AppRequest(ctx, clientNodeID, requestID, deadline, requestBytes)) }() // should be on a goroutine so the test can make progress. return nil }, @@ -357,8 +352,7 @@ func sendChangeRequest( // reserialize the response and pass it to the client to complete the handling. responseBytes, err = merkledb.Codec.EncodeChangeProof(merkledb.Version, response) require.NoError(err) - err = networkClient.AppResponse(context.Background(), serverNodeID, requestID, responseBytes) - require.NoError(err) + require.NoError(networkClient.AppResponse(context.Background(), serverNodeID, requestID, responseBytes)) return nil }, ).AnyTimes() diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index 2d2fb33bac3..de31e57d682 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -563,8 +563,7 @@ func TestFindNextKeyRandom(t *testing.T) { key := make([]byte, rand.Intn(maxKeyLen)) _, _ = rand.Read(key) val := make([]byte, rand.Intn(maxValLen)) - err := db.Put(key, val) - require.NoError(err) + require.NoError(db.Put(key, val)) } } @@ -936,8 +935,7 @@ func Test_Sync_Error_During_Sync(t *testing.T) { require.NoError(err) require.NotNil(t, syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(err) + require.NoError(syncer.StartSyncing(context.Background())) err = syncer.Wait(context.Background()) require.ErrorIs(err, errInvalidRangeProof) @@ -1009,8 +1007,7 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { _, err = r.Read(val) require.NoError(err) - err = dbToSync.Put(key, val) - require.NoError(err) + require.NoError(dbToSync.Put(key, val)) deleteKeyStart := make([]byte, r.Intn(50)) _, err = r.Read(deleteKeyStart) @@ -1018,8 +1015,7 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { it := dbToSync.NewIteratorWithStart(deleteKeyStart) if it.Next() { - err = dbToSync.Delete(it.Key()) - require.NoError(err) + require.NoError(dbToSync.Delete(it.Key())) } require.NoError(it.Error()) it.Release() @@ -1028,8 +1024,7 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { syncRoot, err = dbToSync.GetMerkleRoot(context.Background()) require.NoError(err) - err = syncer.StartSyncing(context.Background()) - require.NoError(err) + require.NoError(syncer.StartSyncing(context.Background())) // Wait until we've processed some work // before updating the sync target. @@ -1043,12 +1038,10 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { 3*time.Second, 10*time.Millisecond, ) - err = syncer.UpdateSyncTarget(syncRoot) - require.NoError(err) + require.NoError(syncer.UpdateSyncTarget(syncRoot)) close(updatedRootChan) - err = syncer.Wait(context.Background()) - require.NoError(err) + require.NoError(syncer.Wait(context.Background())) require.NoError(syncer.Error()) newRoot, err := db.GetMerkleRoot(context.Background()) @@ -1095,8 +1088,7 @@ func Test_Sync_UpdateSyncTarget(t *testing.T) { <-startedWaiting newSyncRoot := ids.GenerateTestID() - err = m.UpdateSyncTarget(newSyncRoot) - require.NoError(err) + require.NoError(m.UpdateSyncTarget(newSyncRoot)) <-gotSignalChan require.Equal(newSyncRoot, m.config.TargetRoot) From ff945913a91f0b78fe8f66950efa17bd8ee752e6 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 16:32:12 -0400 Subject: [PATCH 28/79] fix regex --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 16b02973e52..95f1a12bc54 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -117,7 +117,7 @@ function test_require_nil { } function test_require_no_error_inline_func { - if grep -R -zo -P '\t+err :?=.*\n\t+require\.NoError\(err\)' .; then + if grep -R -zo -P '\t+err :?=.*\n\t+require\.NoError\((t, )?err\)' .; then echo "" echo "Checking that a function with a single error return doesn't error should be done in-line." echo "" From 0aa2d366494d6ef9a60619dd1d40ae7f2a15ed4c Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 16:33:57 -0400 Subject: [PATCH 29/79] moar --- api/admin/service_test.go | 4 +- api/keystore/service_test.go | 20 +- database/corruptabledb/db_test.go | 3 +- message/messages_benchmark_test.go | 4 +- network/throttling/dial_throttler_test.go | 12 +- snow/networking/handler/handler_test.go | 18 +- snow/networking/router/chain_router_test.go | 15 +- snow/networking/sender/sender_test.go | 6 +- utils/sampler/weighted_test.go | 14 +- .../weighted_without_replacement_test.go | 20 +- vms/avm/index_test.go | 22 +- vms/avm/service_test.go | 119 ++++------- .../txs/executor/semantic_verifier_test.go | 10 +- vms/avm/vm_test.go | 115 ++++------- vms/components/chain/state_test.go | 6 +- vms/components/message/handler_test.go | 3 +- vms/platformvm/service_test.go | 11 +- .../txs/executor/proposal_tx_executor_test.go | 9 +- .../txs/executor/standard_tx_executor_test.go | 9 +- vms/platformvm/vm_regression_test.go | 15 +- vms/platformvm/vm_test.go | 23 +-- vms/proposervm/state_syncable_vm_test.go | 5 +- vms/proposervm/vm_test.go | 20 +- x/merkledb/db_test.go | 90 +++----- x/merkledb/metrics_test.go | 12 +- x/merkledb/node_test.go | 9 +- x/merkledb/proof_test.go | 192 ++++++------------ x/merkledb/trie_test.go | 186 ++++++----------- x/sync/client_test.go | 6 +- x/sync/network_server_test.go | 6 +- x/sync/sync_test.go | 75 +++---- 31 files changed, 363 insertions(+), 696 deletions(-) diff --git a/api/admin/service_test.go b/api/admin/service_test.go index b32928bcad5..80d3c188c1c 100644 --- a/api/admin/service_test.go +++ b/api/admin/service_test.go @@ -75,10 +75,8 @@ func TestLoadVMsSuccess(t *testing.T) { // execute test reply := LoadVMsReply{} - err := resources.admin.LoadVMs(&http.Request{}, nil, &reply) - + require.NoError(resources.admin.LoadVMs(&http.Request{}, nil, &reply)) require.Equal(expectedVMRegistry, reply.NewVMs) - require.NoError(err) } // Tests behavior for LoadVMs if we fail to reload vms. diff --git a/api/keystore/service_test.go b/api/keystore/service_test.go index d26445878e1..5f0592a75ce 100644 --- a/api/keystore/service_test.go +++ b/api/keystore/service_test.go @@ -40,11 +40,10 @@ func TestServiceCreateUser(t *testing.T) { s := service{ks: ks.(*keystore)} { - err := s.CreateUser(nil, &api.UserPass{ + require.NoError(s.CreateUser(nil, &api.UserPass{ Username: "bob", Password: strongPassword, - }, &api.EmptyReply{}) - require.NoError(err) + }, &api.EmptyReply{})) } { @@ -123,11 +122,10 @@ func TestServiceCreateDuplicate(t *testing.T) { s := service{ks: ks.(*keystore)} { - err := s.CreateUser(nil, &api.UserPass{ + require.NoError(s.CreateUser(nil, &api.UserPass{ Username: "bob", Password: strongPassword, - }, &api.EmptyReply{}) - require.NoError(err) + }, &api.EmptyReply{})) } { @@ -161,11 +159,10 @@ func TestServiceUseBlockchainDB(t *testing.T) { s := service{ks: ks.(*keystore)} { - err := s.CreateUser(nil, &api.UserPass{ + require.NoError(s.CreateUser(nil, &api.UserPass{ Username: "bob", Password: strongPassword, - }, &api.EmptyReply{}) - require.NoError(err) + }, &api.EmptyReply{})) } { @@ -193,11 +190,10 @@ func TestServiceExportImport(t *testing.T) { s := service{ks: ks.(*keystore)} { - err := s.CreateUser(nil, &api.UserPass{ + require.NoError(s.CreateUser(nil, &api.UserPass{ Username: "bob", Password: strongPassword, - }, &api.EmptyReply{}) - require.NoError(err) + }, &api.EmptyReply{})) } { diff --git a/database/corruptabledb/db_test.go b/database/corruptabledb/db_test.go index 6c05036e9c6..6e0aff1785a 100644 --- a/database/corruptabledb/db_test.go +++ b/database/corruptabledb/db_test.go @@ -55,8 +55,7 @@ func TestCorruption(t *testing.T) { corruptableBatch := db.NewBatch() require.NotNil(t, corruptableBatch) - err := corruptableBatch.Put(key, value) - require.NoError(t, err) + require.NoError(t, corruptableBatch.Put(key, value)) return corruptableBatch.Write() }, diff --git a/message/messages_benchmark_test.go b/message/messages_benchmark_test.go index f87493fc024..8d0939a6734 100644 --- a/message/messages_benchmark_test.go +++ b/message/messages_benchmark_test.go @@ -128,10 +128,10 @@ func BenchmarkUnmarshalVersion(b *testing.B) { for i := 0; i < b.N; i++ { if useBuilder { _, err = codec.parseInbound(rawMsg, dummyNodeID, dummyOnFinishedHandling) + require.NoError(err) } else { var msg p2p.Message - err = proto.Unmarshal(rawMsg, &msg) + require.NoError(proto.Unmarshal(rawMsg, &msg)) } - require.NoError(err) } } diff --git a/network/throttling/dial_throttler_test.go b/network/throttling/dial_throttler_test.go index fdc491cf898..2eaa32206ab 100644 --- a/network/throttling/dial_throttler_test.go +++ b/network/throttling/dial_throttler_test.go @@ -21,8 +21,7 @@ func TestDialThrottler(t *testing.T) { acquiredChan := make(chan struct{}, 1) // Should return immediately because < 5 taken this second go func() { - err := throttler.Acquire(context.Background()) - require.NoError(t, err) + require.NoError(t, throttler.Acquire(context.Background())) acquiredChan <- struct{}{} }() select { @@ -36,8 +35,7 @@ func TestDialThrottler(t *testing.T) { acquiredChan := make(chan struct{}, 1) go func() { // Should block because 5 already taken within last second - err := throttler.Acquire(context.Background()) - require.NoError(t, err) + require.NoError(t, throttler.Acquire(context.Background())) acquiredChan <- struct{}{} }() @@ -68,8 +66,7 @@ func TestDialThrottlerCancel(t *testing.T) { acquiredChan := make(chan struct{}, 1) // Should return immediately because < 5 taken this second go func() { - err := throttler.Acquire(context.Background()) - require.NoError(t, err) + require.NoError(t, throttler.Acquire(context.Background())) acquiredChan <- struct{}{} }() select { @@ -105,8 +102,7 @@ func TestNoDialThrottler(t *testing.T) { throttler := NewNoDialThrottler() for i := 0; i < 250; i++ { startTime := time.Now() - err := throttler.Acquire(context.Background()) // Should always immediately return - require.NoError(t, err) + require.NoError(t, throttler.Acquire(context.Background()) // Should always immediately return) require.WithinDuration(t, time.Now(), startTime, 25*time.Millisecond) } } diff --git a/snow/networking/handler/handler_test.go b/snow/networking/handler/handler_test.go index b9e757b7d99..ccc694ae199 100644 --- a/snow/networking/handler/handler_test.go +++ b/snow/networking/handler/handler_test.go @@ -39,8 +39,7 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { vdrs := validators.NewSet() vdr0 := ids.GenerateTestNodeID() - err := vdrs.Add(vdr0, nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(vdr0, nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -134,8 +133,7 @@ func TestHandlerClosesOnError(t *testing.T) { ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -225,8 +223,7 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { closed := make(chan struct{}, 1) ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -305,8 +302,7 @@ func TestHandlerDispatchInternal(t *testing.T) { ctx := snow.DefaultConsensusContextTest() msgFromVMChan := make(chan common.Message) vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -376,8 +372,7 @@ func TestHandlerDispatchInternal(t *testing.T) { func TestHandlerSubnetConnector(t *testing.T) { ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -553,8 +548,7 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { messageReceived := make(chan struct{}) ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 5c3ede1e4bd..42ac9ab1af3 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -43,8 +43,7 @@ const ( func TestShutdown(t *testing.T) { vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -178,8 +177,7 @@ func TestShutdown(t *testing.T) { func TestShutdownTimesOut(t *testing.T) { nodeID := ids.EmptyNodeID vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() metrics := prometheus.NewRegistry() // Ensure that the Ancestors request does not timeout @@ -840,8 +838,7 @@ func TestRouterClearTimeouts(t *testing.T) { // Create bootstrapper, engine and handler ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - err = vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -1132,8 +1129,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { sb := subnets.New(ctx.NodeID, subnets.Config{ValidatorOnly: true}) vdrs := validators.NewSet() vID := ids.GenerateTestNodeID() - err = vdrs.Add(vID, nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(vID, nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, @@ -1547,8 +1543,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { vdrs := validators.NewSet() vID := ids.GenerateTestNodeID() - err = vdrs.Add(vID, nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(vID, nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 104eb6e4f4e..ea80a621189 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -320,8 +320,7 @@ func TestTimeout(t *testing.T) { func TestReliableMessages(t *testing.T) { vdrs := validators.NewSet() - err := vdrs.Add(ids.NodeID{1}, nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(ids.NodeID{1}, nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -470,8 +469,7 @@ func TestReliableMessages(t *testing.T) { func TestReliableMessagesToMyself(t *testing.T) { benchlist := benchlist.NewNoBenchlist() vdrs := validators.NewSet() - err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1) - require.NoError(t, err) + require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ InitialTimeout: 10 * time.Millisecond, diff --git a/utils/sampler/weighted_test.go b/utils/sampler/weighted_test.go index 5bd29fff79f..aba782ea3f6 100644 --- a/utils/sampler/weighted_test.go +++ b/utils/sampler/weighted_test.go @@ -94,16 +94,14 @@ func WeightedInitializeOverflowTest(t *testing.T, s Weighted) { } func WeightedOutOfRangeTest(t *testing.T, s Weighted) { - err := s.Initialize([]uint64{1}) - require.NoError(t, err) + require.NoError(t, s.Initialize([]uint64{1})) - _, err = s.Sample(1) + _, err := s.Sample(1) require.ErrorIs(t, err, ErrOutOfRange) } func WeightedSingletonTest(t *testing.T, s Weighted) { - err := s.Initialize([]uint64{1}) - require.NoError(t, err) + require.NoError(t, s.Initialize([]uint64{1})) index, err := s.Sample(0) require.NoError(t, err) @@ -111,8 +109,7 @@ func WeightedSingletonTest(t *testing.T, s Weighted) { } func WeightedWithZeroTest(t *testing.T, s Weighted) { - err := s.Initialize([]uint64{0, 1}) - require.NoError(t, err) + require.NoError(t, s.Initialize([]uint64{0, 1})) index, err := s.Sample(0) require.NoError(t, err) @@ -120,8 +117,7 @@ func WeightedWithZeroTest(t *testing.T, s Weighted) { } func WeightedDistributionTest(t *testing.T, s Weighted) { - err := s.Initialize([]uint64{1, 1, 2, 3, 4}) - require.NoError(t, err) + require.NoError(t, s.Initialize([]uint64{1, 1, 2, 3, 4})) counts := make([]int, 5) for i := uint64(0); i < 11; i++ { diff --git a/utils/sampler/weighted_without_replacement_test.go b/utils/sampler/weighted_without_replacement_test.go index a73cac19071..3bba3087094 100644 --- a/utils/sampler/weighted_without_replacement_test.go +++ b/utils/sampler/weighted_without_replacement_test.go @@ -95,10 +95,9 @@ func WeightedWithoutReplacementOutOfRangeTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize([]uint64{1}) - require.NoError(t, err) + require.NoError(t, s.Initialize([]uint64{1})) - _, err = s.Sample(2) + _, err := s.Sample(2) require.ErrorIs(t, err, ErrOutOfRange) } @@ -106,8 +105,7 @@ func WeightedWithoutReplacementEmptyWithoutWeightTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize(nil) - require.NoError(t, err) + require.NoError(t, s.Initialize(nil)) indices, err := s.Sample(0) require.NoError(t, err) @@ -118,8 +116,7 @@ func WeightedWithoutReplacementEmptyTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize([]uint64{1}) - require.NoError(t, err) + require.NoError(t, s.Initialize([]uint64{1})) indices, err := s.Sample(0) require.NoError(t, err) @@ -130,8 +127,7 @@ func WeightedWithoutReplacementSingletonTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize([]uint64{1}) - require.NoError(t, err) + require.NoError(t, s.Initialize([]uint64{1})) indices, err := s.Sample(1) require.NoError(t, err) @@ -142,8 +138,7 @@ func WeightedWithoutReplacementWithZeroTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize([]uint64{0, 1}) - require.NoError(t, err) + require.NoError(t, s.Initialize([]uint64{0, 1})) indices, err := s.Sample(1) require.NoError(t, err) @@ -159,8 +154,7 @@ func WeightedWithoutReplacementDistributionTest( t *testing.T, s WeightedWithoutReplacement, ) { - err := s.Initialize([]uint64{1, 1, 2}) - require.NoError(t, err) + require.NoError(t, s.Initialize([]uint64{1, 1, 2})) indices, err := s.Sample(4) require.NoError(t, err) diff --git a/vms/avm/index_test.go b/vms/avm/index_test.go index 5ce7af2ae83..22d4d13fc9f 100644 --- a/vms/avm/index_test.go +++ b/vms/avm/index_test.go @@ -114,8 +114,7 @@ func TestIndexTransaction_Ordered(t *testing.T) { } // index the transaction - err := vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs()) - require.NoError(t, err) + require.NoError(t, vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs())) } // ensure length is 5 @@ -204,8 +203,7 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { } // index the transaction - err := vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs()) - require.NoError(t, err) + require.NoError(t, vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs())) } // ensure length is same as keys length @@ -274,9 +272,7 @@ func TestIndexTransaction_MultipleAddresses(t *testing.T) { } // index the transaction - err := vm.addressTxsIndexer.Accept(tx.ID(), inputUTXOs, tx.UTXOs()) - require.NoError(t, err) - require.NoError(t, err) + require.NoError(t, vm.addressTxsIndexer.Accept(tx.ID(), inputUTXOs, tx.UTXOs())) assertIndexedTX(t, vm.db, uint64(0), addr, txAssetID.ID, tx.ID()) assertLatestIdx(t, vm.db, addr, txAssetID.ID, 1) @@ -357,8 +353,7 @@ func TestIndexTransaction_UnorderedWrites(t *testing.T) { } // index the transaction, NOT calling Accept(ids.ID) method - err := vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs()) - require.NoError(t, err) + require.NoError(t, vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs())) } // ensure length is same as keys length @@ -608,17 +603,14 @@ func setupTestTxsInDB(t *testing.T, db *versiondb.Database, address ids.ShortID, binary.BigEndian.PutUint64(idxBytes, idx) for _, txID := range testTxs { txID := txID - err := assetPrefixDB.Put(idxBytes, txID[:]) - require.NoError(t, err) + require.NoError(t, assetPrefixDB.Put(idxBytes, txID[:])) idx++ binary.BigEndian.PutUint64(idxBytes, idx) } _, err := db.CommitBatch() require.NoError(t, err) - err = assetPrefixDB.Put([]byte("idx"), idxBytes) - require.NoError(t, err) - err = db.Commit() - require.NoError(t, err) + require.NoError(t, assetPrefixDB.Put([]byte("idx"), idxBytes)) + require.NoError(t, db.Commit()) return testTxs } diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index f2b95b9b056..d090c6e5f46 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -310,8 +310,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply := &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] require.Equal(t, uint64(1337), uint64(balanceReply.Balance)) require.Len(t, balanceReply.UTXOIDs, 1, "should have only returned 1 utxoID") @@ -322,8 +321,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(t, balanceReply.Balance) require.Empty(t, balanceReply.UTXOIDs, "should have returned 0 utxoIDs") @@ -355,8 +353,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply = &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] require.Equal(t, uint64(1337+1337), uint64(balanceReply.Balance)) require.Len(t, balanceReply.UTXOIDs, 2, "should have only returned 2 utxoIDs") @@ -367,8 +364,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(t, balanceReply.Balance) require.Empty(t, balanceReply.UTXOIDs, "should have returned 0 utxoIDs") @@ -402,8 +398,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply = &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] require.Equal(t, uint64(1337*3), uint64(balanceReply.Balance)) require.Len(t, balanceReply.UTXOIDs, 3, "should have returned 3 utxoIDs") @@ -414,8 +409,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - err = s.GetBalance(nil, balanceArgs, balanceReply) - require.NoError(t, err) + require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Zero(t, balanceReply.Balance) require.Empty(t, balanceReply.UTXOIDs, "should have returned 0 utxoIDs") @@ -450,16 +444,14 @@ func TestServiceGetTxs(t *testing.T) { AssetID: assetID.String(), } getTxsReply := &GetAddressTxsReply{} - err = s.GetAddressTxs(nil, getTxsArgs, getTxsReply) - require.NoError(t, err) + require.NoError(t, s.GetAddressTxs(nil, getTxsArgs, getTxsReply)) require.Len(t, getTxsReply.TxIDs, 10) require.Equal(t, getTxsReply.TxIDs, testTxs[:10]) // get the second page getTxsArgs.Cursor = getTxsReply.Cursor getTxsReply = &GetAddressTxsReply{} - err = s.GetAddressTxs(nil, getTxsArgs, getTxsReply) - require.NoError(t, err) + require.NoError(t, s.GetAddressTxs(nil, getTxsArgs, getTxsReply)) require.Len(t, getTxsReply.TxIDs, 10) require.Equal(t, getTxsReply.TxIDs, testTxs[10:20]) } @@ -505,8 +497,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply := &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Len(t, reply.Balances, 1) require.Equal(t, assetID.String(), reply.Balances[0].AssetID) @@ -517,8 +508,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) require.Empty(t, reply.Balances) // A UTXO with a 1 out of 2 multisig @@ -547,8 +537,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Len(t, reply.Balances, 1) require.Equal(t, assetID.String(), reply.Balances[0].AssetID) @@ -559,8 +548,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Empty(t, reply.Balances) @@ -592,8 +580,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] // The balance should include the UTXO since it is partly owned by [addr] require.Len(t, reply.Balances, 1) @@ -604,8 +591,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) // The balance should not include the UTXO since it is only partly owned by [addr] require.Empty(t, reply.Balances) @@ -635,8 +621,7 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Len(t, reply.Balances, 2) gotAssetIDs := []string{reply.Balances[0].AssetID, reply.Balances[1].AssetID} @@ -651,8 +636,7 @@ func TestServiceGetAllBalances(t *testing.T) { JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - err = s.GetAllBalances(nil, balanceArgs, reply) - require.NoError(t, err) + require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] require.Empty(t, reply.Balances) } @@ -709,11 +693,10 @@ func TestServiceGetTxJSON_BaseTx(t *testing.T) { require.NoError(txs[0].Accept(context.Background())) reply := api.GetTxReply{} - err = s.GetTx(nil, &api.GetTxArgs{ + require.NoError(s.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + }, &reply)) require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) @@ -751,11 +734,10 @@ func TestServiceGetTxJSON_ExportTx(t *testing.T) { require.NoError(txs[0].Accept(context.Background())) reply := api.GetTxReply{} - err = s.GetTx(nil, &api.GetTxArgs{ + require.NoError(s.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + }, &reply)) require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) @@ -783,7 +765,7 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager, @@ -806,8 +788,7 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { }, }, &common.SenderTest{T: t}, - ) - require.NoError(err) + )) vm.batchTimeout = 0 require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) @@ -829,11 +810,10 @@ func TestServiceGetTxJSON_CreateAssetTx(t *testing.T) { reply := api.GetTxReply{} s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ + require.NoError(s.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + }, &reply)) require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) @@ -862,7 +842,7 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager, @@ -885,8 +865,7 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { }, }, &common.SenderTest{T: t}, - ) - require.NoError(err) + )) vm.batchTimeout = 0 require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) @@ -894,7 +873,7 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { key := keys[0] createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) + _, err := vm.IssueTx(createAssetTx.Bytes()) require.NoError(err) mintNFTTx := buildOperationTxWithOp(buildNFTxMintOp(createAssetTx, key, 2, 1)) @@ -916,11 +895,10 @@ func TestServiceGetTxJSON_OperationTxWithNftxMintOp(t *testing.T) { reply := api.GetTxReply{} s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ + require.NoError(s.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + }, &reply)) require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) @@ -1009,11 +987,10 @@ func TestServiceGetTxJSON_OperationTxWithMultipleNftxMintOp(t *testing.T) { reply := api.GetTxReply{} s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ + require.NoError(s.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + }, &reply)) require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) @@ -1099,11 +1076,10 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { reply := api.GetTxReply{} s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ + require.NoError(s.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + }, &reply)) require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) @@ -1138,7 +1114,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager, @@ -1161,8 +1137,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { }, }, &common.SenderTest{T: t}, - ) - require.NoError(err) + )) vm.batchTimeout = 0 require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) @@ -1170,7 +1145,7 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { key := keys[0] createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) + _, err := vm.IssueTx(createAssetTx.Bytes()) require.NoError(err) op1 := buildSecpMintOp(createAssetTx, key, 0) @@ -1195,11 +1170,10 @@ func TestServiceGetTxJSON_OperationTxWithMultipleSecpMintOp(t *testing.T) { reply := api.GetTxReply{} s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ + require.NoError(s.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + }, &reply)) require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) @@ -1286,11 +1260,10 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { reply := api.GetTxReply{} s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ + require.NoError(s.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + }, &reply)) require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) @@ -1324,7 +1297,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager, @@ -1347,8 +1320,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) }, }, &common.SenderTest{T: t}, - ) - require.NoError(err) + )) vm.batchTimeout = 0 require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) @@ -1356,7 +1328,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) key := keys[0] createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) + _, err := vm.IssueTx(createAssetTx.Bytes()) require.NoError(err) op1 := buildPropertyFxMintOp(createAssetTx, key, 4) @@ -1381,11 +1353,10 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOpMultiple(t *testing.T) reply := api.GetTxReply{} s := &Service{vm: vm} - err = s.GetTx(nil, &api.GetTxArgs{ + require.NoError(s.GetTx(nil, &api.GetTxArgs{ TxID: txID, Encoding: formatting.JSON, - }, &reply) - require.NoError(err) + }, &reply)) require.Equal(reply.Encoding, formatting.JSON) jsonTxBytes, err := stdjson.Marshal(reply.Tx) diff --git a/vms/avm/txs/executor/semantic_verifier_test.go b/vms/avm/txs/executor/semantic_verifier_test.go index b5157fb97b2..07600560c02 100644 --- a/vms/avm/txs/executor/semantic_verifier_test.go +++ b/vms/avm/txs/executor/semantic_verifier_test.go @@ -309,11 +309,10 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{}, - ) - require.NoError(err) + )) return tx }, err: errIncompatibleFx, @@ -687,11 +686,10 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{}, - ) - require.NoError(err) + )) return tx }, err: errIncompatibleFx, diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index f8ee1c4fc33..177bb1e7b7e 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -773,19 +773,18 @@ func TestIssueNFT(t *testing.T) { // Test issuing a transaction that creates an Property family func TestIssueProperty(t *testing.T) { + require := require.New(t) vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, manager.NewMemDB(version.Semantic1_0_0), @@ -808,21 +807,11 @@ func TestIssueProperty(t *testing.T) { }, }, nil, - ) - if err != nil { - t.Fatal(err) - } + )) vm.batchTimeout = 0 - err = vm.SetState(context.Background(), snow.Bootstrapping) - if err != nil { - t.Fatal(err) - } - - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -844,13 +833,10 @@ func TestIssueProperty(t *testing.T) { }, }}, }} - if err := vm.parser.InitializeTx(createAssetTx); err != nil { - t.Fatal(err) - } + require.NoError(vm.parser.InitializeTx(createAssetTx)) - if _, err := vm.IssueTx(createAssetTx.Bytes()); err != nil { - t.Fatal(err) - } + _, err := vm.IssueTx(createAssetTx.Bytes()) + require.NoError(err) mintPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -879,16 +865,12 @@ func TestIssueProperty(t *testing.T) { }} codec := vm.parser.Codec() - err = mintPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ + require.NoError(mintPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ {keys[0]}, - }) - if err != nil { - t.Fatal(err) - } + })) - if _, err := vm.IssueTx(mintPropertyTx.Bytes()); err != nil { - t.Fatal(err) - } + _, err = vm.IssueTx(mintPropertyTx.Bytes()) + require.NoError(err) burnPropertyTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -905,16 +887,12 @@ func TestIssueProperty(t *testing.T) { }}, }} - err = burnPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ + require.NoError(burnPropertyTx.SignPropertyFx(codec, [][]*secp256k1.PrivateKey{ {}, - }) - if err != nil { - t.Fatal(err) - } + })) - if _, err := vm.IssueTx(burnPropertyTx.Bytes()); err != nil { - t.Fatal(err) - } + _, err = vm.IssueTx(burnPropertyTx.Bytes()) + require.NoError(err) } func setupTxFeeAssets(t *testing.T) ([]byte, chan common.Message, *VM, *atomic.Memory) { @@ -978,8 +956,7 @@ func TestIssueTxWithFeeAsset(t *testing.T) { genesisBytes, issuer, vm, _ := setupTxFeeAssets(t) ctx := vm.ctx defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(t, err) + require.NoError(t, vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() // send first asset @@ -1004,8 +981,7 @@ func TestIssueTxWithAnotherAsset(t *testing.T) { genesisBytes, issuer, vm, _ := setupTxFeeAssets(t) ctx := vm.ctx defer func() { - err := vm.Shutdown(context.Background()) - require.NoError(t, err) + require.NoError(t, vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -1763,6 +1739,7 @@ func TestImportTxNotState(t *testing.T) { // Test issuing an import transaction. func TestIssueExportTx(t *testing.T) { + require := require.New(t) genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) @@ -1779,7 +1756,7 @@ func TestIssueExportTx(t *testing.T) { ctx.Lock.Lock() vm := &VM{} - if err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), @@ -1791,18 +1768,11 @@ func TestIssueExportTx(t *testing.T) { Fx: &secp256k1fx.Fx{}, }}, nil, - ); err != nil { - t.Fatal(err) - } + )) vm.batchTimeout = 0 - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } - - if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) key := keys[0] @@ -1834,40 +1804,27 @@ func TestIssueExportTx(t *testing.T) { }, }}, }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } + require.NoError(tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatal(err) - } + _, err := vm.IssueTx(tx.Bytes()) + require.NoError(err) ctx.Lock.Unlock() - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, <-issuer) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + require.Len(txs, 1) parsedTx := txs[0] - if err := parsedTx.Verify(context.Background()); err != nil { - t.Fatal(err) - } else if err := parsedTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedTx.Verify(context.Background())) + require.NoError(parsedTx.Accept(context.Background())) peerSharedMemory := m.NewSharedMemory(constants.PlatformChainID) utxoBytes, _, _, err := peerSharedMemory.Indexed( @@ -1879,12 +1836,8 @@ func TestIssueExportTx(t *testing.T) { nil, math.MaxInt32, ) - if err != nil { - t.Fatal(err) - } - if len(utxoBytes) != 1 { - t.Fatalf("wrong number of utxos %d", len(utxoBytes)) - } + require.NoError(err) + require.Len(utxoBytes, 1) } func TestClearForceAcceptedExportTx(t *testing.T) { diff --git a/vms/components/chain/state_test.go b/vms/components/chain/state_test.go index 9fdad9d8f84..28b83f1f822 100644 --- a/vms/components/chain/state_test.go +++ b/vms/components/chain/state_test.go @@ -920,15 +920,13 @@ func TestIsProcessing(t *testing.T) { require.False(t, chainState.IsProcessing(parsedBlk1.ID())) // Verify blk1 - err = parsedBlk1.Verify(context.Background()) - require.NoError(t, err) + require.NoError(t, parsedBlk1.Verify(context.Background())) // Check that it is processing in consensus require.True(t, chainState.IsProcessing(parsedBlk1.ID())) // Accept blk1 - err = parsedBlk1.Accept(context.Background()) - require.NoError(t, err) + require.NoError(t, parsedBlk1.Accept(context.Background())) // Check that it is no longer processing in consensus require.False(t, chainState.IsProcessing(parsedBlk1.ID())) diff --git a/vms/components/message/handler_test.go b/vms/components/message/handler_test.go index 6af6bbed867..489c973b68e 100644 --- a/vms/components/message/handler_test.go +++ b/vms/components/message/handler_test.go @@ -36,6 +36,5 @@ func TestNoopHandler(t *testing.T) { Log: logging.NoLog{}, } - err := handler.HandleTx(ids.EmptyNodeID, 0, nil) - require.NoError(t, err) + require.NoError(t, handler.HandleTx(ids.EmptyNodeID, 0, nil)) } diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 158035ebfe8..94b34479d7e 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -72,8 +72,7 @@ func defaultService(t *testing.T) (*Service, *mutableSharedMemory) { vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() ks := keystore.New(logging.NoLog{}, manager.NewMemDB(version.Semantic1_0_0)) - err := ks.CreateUser(testUsername, testPassword) - require.NoError(t, err) + require.NoError(t, ks.CreateUser(testUsername, testPassword)) vm.ctx.Keystore = ks.NewBlockchainKeyStore(vm.ctx.ChainID) return &Service{ @@ -95,8 +94,7 @@ func defaultAddress(t *testing.T, service *Service) { pk, err := testKeyFactory.ToPrivateKey(testPrivateKey) require.NoError(t, err) - err = user.PutKeys(pk, keys[0]) - require.NoError(t, err) + require.NoError(t, user.PutKeys(pk, keys[0])) } func TestAddValidator(t *testing.T) { @@ -110,10 +108,9 @@ func TestAddValidator(t *testing.T) { func TestCreateBlockchainArgsParsing(t *testing.T) { jsonString := `{"vmID":"lol","fxIDs":["secp256k1"], "name":"awesome", "username":"bob loblaw", "password":"yeet", "genesisData":"SkB92YpWm4Q2iPnLGCuDPZPgUQMxajqQQuz91oi3xD984f8r"}` args := CreateBlockchainArgs{} - err := stdjson.Unmarshal([]byte(jsonString), &args) - require.NoError(t, err) + require.NoError(t, stdjson.Unmarshal([]byte(jsonString), &args)) - _, err = stdjson.Marshal(args.GenesisData) + _, err := stdjson.Marshal(args.GenesisData) require.NoError(t, err) } diff --git a/vms/platformvm/txs/executor/proposal_tx_executor_test.go b/vms/platformvm/txs/executor/proposal_tx_executor_test.go index 517251e0206..f9f1fc333fc 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor_test.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor_test.go @@ -56,8 +56,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) } // [addMaxStakeValidator] adds a new validator to the primary network's @@ -85,8 +84,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) } dummyH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) @@ -209,8 +207,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { target.state.DeleteUTXO(utxoID) } target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) }, AP3Time: defaultGenesisTime, expectedErr: ErrFlowCheckFailed, diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index 06df97f05f4..4cfb1dbce12 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -130,8 +130,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) } // [addMaxStakeValidator] adds a new validator to the primary network's @@ -159,8 +158,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { target.state.PutCurrentValidator(staker) target.state.AddTx(tx, status.Committed) target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) } dummyH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) @@ -291,8 +289,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { target.state.DeleteUTXO(utxoID) } target.state.SetHeight(dummyHeight) - err = target.state.Commit() - require.NoError(t, err) + require.NoError(t, target.state.Commit()) }, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrFlowCheckFailed, diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 2a4aeac748d..e1e2f33028e 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -360,7 +360,7 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { }() msgChan := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager, @@ -370,8 +370,7 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { msgChan, nil, nil, - ) - require.NoError(err) + )) m := atomic.NewMemory(atomicDB) vm.ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) @@ -551,10 +550,9 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { }, } signedImportTx := &txs.Tx{Unsigned: unsignedImportTx} - err = signedImportTx.Sign(txs.Codec, [][]*secp256k1.PrivateKey{ + require.NoError(signedImportTx.Sign(txs.Codec, [][]*secp256k1.PrivateKey{ {}, // There is one input, with no required signers - }) - require.NoError(err) + })) // Create the standard block that will fail verification, and then be // re-verified. @@ -802,10 +800,9 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { }, } signedImportTx := &txs.Tx{Unsigned: unsignedImportTx} - err = signedImportTx.Sign(txs.Codec, [][]*secp256k1.PrivateKey{ + require.NoError(signedImportTx.Sign(txs.Codec, [][]*secp256k1.PrivateKey{ {}, // There is one input, with no required signers - }) - require.NoError(err) + })) // Create the standard block that will fail verification, and then be // re-verified. diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 73d62845458..109309fe968 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -412,7 +412,10 @@ func defaultVM() (*VM, database.Database, *mutableSharedMemory) { func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan common.Message, *VM, *atomic.Memory) { require := require.New(t) - var genesisBytes []byte + var ( + genesisBytes []byte + err error + ) if args != nil { _, genesisBytes = BuildGenesisTestWithArgs(t, args) @@ -456,7 +459,7 @@ func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan c appSender.SendAppGossipF = func(context.Context, []byte) error { return nil } - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, chainDBManager, @@ -466,8 +469,7 @@ func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan c msgChan, nil, appSender, - ) - require.NoError(err) + )) require.NoError(vm.SetState(context.Background(), snow.NormalOp)) @@ -1922,7 +1924,7 @@ func TestUnverifiedParent(t *testing.T) { }() msgChan := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, dbManager, @@ -1932,8 +1934,7 @@ func TestUnverifiedParent(t *testing.T) { msgChan, nil, nil, - ) - require.NoError(err) + )) // include a tx1 to make the block be accepted tx1 := &txs.Tx{Unsigned: &txs.ImportTx{ @@ -2253,7 +2254,7 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { msgChan := make(chan common.Message, 1) appSender := &common.SenderTest{T: t} - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, db, @@ -2263,8 +2264,7 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { msgChan, nil, appSender, - ) - require.NoError(err) + )) defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -2363,8 +2363,7 @@ func TestVM_GetValidatorSet(t *testing.T) { msgChan := make(chan common.Message, 1) appSender := &common.SenderTest{T: t} - err := vm.Initialize(context.Background(), ctx, db, genesisBytes, nil, nil, msgChan, nil, appSender) - require.NoError(t, err) + require.NoError(t, vm.Initialize(context.Background(), ctx, db, genesisBytes, nil, nil, msgChan, nil, appSender)) defer func() { require.NoError(t, vm.Shutdown(context.Background())) ctx.Lock.Unlock() diff --git a/vms/proposervm/state_syncable_vm_test.go b/vms/proposervm/state_syncable_vm_test.go index f8f7e0401ae..5e5572edb7a 100644 --- a/vms/proposervm/state_syncable_vm_test.go +++ b/vms/proposervm/state_syncable_vm_test.go @@ -86,7 +86,7 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { ctx := snow.DefaultContextTest() ctx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, dbManager, @@ -96,8 +96,7 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { nil, nil, nil, - ) - require.NoError(err) + )) return innerVM, vm } diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index 423cb7439a9..01b03c3209e 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -2530,7 +2530,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { snowCtx := snow.DefaultContextTest() snowCtx.NodeID = ids.NodeIDFromCert(pTestCert.Leaf) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), snowCtx, dummyDBManager, @@ -2540,8 +2540,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { nil, nil, nil, - ) - require.NoError(err) + )) { pChainHeight := uint64(0) @@ -2563,14 +2562,13 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { blkID := ids.GenerateTestID() blk.EXPECT().ID().Return(blkID).AnyTimes() - err = vm.verifyAndRecordInnerBlk( + require.NoError(vm.verifyAndRecordInnerBlk( context.Background(), &block.Context{ PChainHeight: pChainHeight, }, blk, - ) - require.NoError(err) + )) // Call VerifyWithContext again but with a different P-Chain height blk.EXPECT().setInnerBlk(innerBlk).AnyTimes() @@ -2581,14 +2579,13 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { }, ).Return(nil) - err = vm.verifyAndRecordInnerBlk( + require.NoError(vm.verifyAndRecordInnerBlk( context.Background(), &block.Context{ PChainHeight: pChainHeight, }, blk, - ) - require.NoError(err) + )) } { @@ -2606,14 +2603,13 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { blk.EXPECT().getInnerBlk().Return(innerBlk).AnyTimes() blkID := ids.GenerateTestID() blk.EXPECT().ID().Return(blkID).AnyTimes() - err = vm.verifyAndRecordInnerBlk( + require.NoError(vm.verifyAndRecordInnerBlk( context.Background(), &block.Context{ PChainHeight: 1, }, blk, - ) - require.NoError(err) + )) } { diff --git a/x/merkledb/db_test.go b/x/merkledb/db_test.go index 9465e65d161..dcb56ce0514 100644 --- a/x/merkledb/db_test.go +++ b/x/merkledb/db_test.go @@ -177,12 +177,9 @@ func Test_MerkleDB_Failed_Batch_Commit(t *testing.T) { _ = memDB.Close() batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("3")) - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key1"), []byte("1"))) + require.NoError(t, batch.Put([]byte("key2"), []byte("2"))) + require.NoError(t, batch.Put([]byte("key3"), []byte("3"))) err = batch.Write() // batch fails require.ErrorIs(t, err, database.ErrClosed) @@ -202,22 +199,17 @@ func Test_MerkleDB_Value_Cache(t *testing.T) { require.NoError(t, err) batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("1")) - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key1"), []byte("1"))) - err = batch.Put([]byte("key2"), []byte("2")) - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key2"), []byte("2"))) require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Write()) batch = db.NewBatch() // force key2 to be inserted into the cache as not found - err = batch.Delete([]byte("key2")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Delete([]byte("key2"))) + require.NoError(t, batch.Write()) _ = memDB.Close() @@ -259,14 +251,10 @@ func Test_MerkleDB_Commit_Proof_To_Empty_Trie(t *testing.T) { db, err := getBasicDB() require.NoError(t, err) batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("3")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key1"), []byte("1"))) + require.NoError(t, batch.Put([]byte("key2"), []byte("2"))) + require.NoError(t, batch.Put([]byte("key3"), []byte("3"))) + require.NoError(t, batch.Write()) proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key3"), 10) require.NoError(t, err) @@ -274,8 +262,7 @@ func Test_MerkleDB_Commit_Proof_To_Empty_Trie(t *testing.T) { freshDB, err := getBasicDB() require.NoError(t, err) - err = freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof) - require.NoError(t, err) + require.NoError(t, freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof)) value, err := freshDB.Get([]byte("key2")) require.NoError(t, err) @@ -292,14 +279,10 @@ func Test_MerkleDB_Commit_Proof_To_Filled_Trie(t *testing.T) { db, err := getBasicDB() require.NoError(t, err) batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("3")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key1"), []byte("1"))) + require.NoError(t, batch.Put([]byte("key2"), []byte("2"))) + require.NoError(t, batch.Put([]byte("key3"), []byte("3"))) + require.NoError(t, batch.Write()) proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key3"), 10) require.NoError(t, err) @@ -307,19 +290,13 @@ func Test_MerkleDB_Commit_Proof_To_Filled_Trie(t *testing.T) { freshDB, err := getBasicDB() require.NoError(t, err) batch = freshDB.NewBatch() - err = batch.Put([]byte("key1"), []byte("3")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("4")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("5")) - require.NoError(t, err) - err = batch.Put([]byte("key25"), []byte("5")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key1"), []byte("3"))) + require.NoError(t, batch.Put([]byte("key2"), []byte("4"))) + require.NoError(t, batch.Put([]byte("key3"), []byte("5"))) + require.NoError(t, batch.Put([]byte("key25"), []byte("5"))) + require.NoError(t, batch.Write()) - err = freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof) - require.NoError(t, err) + require.NoError(t, freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof)) value, err := freshDB.Get([]byte("key2")) require.NoError(t, err) @@ -359,10 +336,8 @@ func Test_MerkleDB_InsertNil(t *testing.T) { db, err := getBasicDB() require.NoError(t, err) batch := db.NewBatch() - err = batch.Put([]byte("key0"), nil) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key0"), nil)) + require.NoError(t, batch.Write()) value, err := db.Get([]byte("key0")) require.NoError(t, err) @@ -403,15 +378,13 @@ func Test_MerkleDB_Overwrite(t *testing.T) { db, err := getBasicDB() require.NoError(t, err) - err = db.Put([]byte("key"), []byte("value0")) - require.NoError(t, err) + require.NoError(t, db.Put([]byte("key"), []byte("value0"))) value, err := db.Get([]byte("key")) require.NoError(t, err) require.Equal(t, []byte("value0"), value) - err = db.Put([]byte("key"), []byte("value1")) - require.NoError(t, err) + require.NoError(t, db.Put([]byte("key"), []byte("value1"))) value, err = db.Get([]byte("key")) require.NoError(t, err) @@ -422,15 +395,13 @@ func Test_MerkleDB_Delete(t *testing.T) { db, err := getBasicDB() require.NoError(t, err) - err = db.Put([]byte("key"), []byte("value0")) - require.NoError(t, err) + require.NoError(t, db.Put([]byte("key"), []byte("value0"))) value, err := db.Get([]byte("key")) require.NoError(t, err) require.Equal(t, []byte("value0"), value) - err = db.Delete([]byte("key")) - require.NoError(t, err) + require.NoError(t, db.Delete([]byte("key"))) value, err = db.Get([]byte("key")) require.ErrorIs(t, err, database.ErrNotFound) @@ -441,8 +412,7 @@ func Test_MerkleDB_DeleteMissingKey(t *testing.T) { db, err := getBasicDB() require.NoError(t, err) - err = db.Delete([]byte("key")) - require.NoError(t, err) + require.NoError(t, db.Delete([]byte("key"))) } // Test that untracked views aren't persisted to [db.childViews]. diff --git a/x/merkledb/metrics_test.go b/x/merkledb/metrics_test.go index be08d7d8fe8..d66fb6fd315 100644 --- a/x/merkledb/metrics_test.go +++ b/x/merkledb/metrics_test.go @@ -27,15 +27,13 @@ func Test_Metrics_Basic_Usage(t *testing.T) { ) require.NoError(t, err) - err = db.Put([]byte("key"), []byte("value")) - require.NoError(t, err) + require.NoError(t, db.Put([]byte("key"), []byte("value"))) require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyReadCount) require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyWriteCount) require.Equal(t, int64(3), db.metrics.(*mockMetrics).hashCount) - err = db.Delete([]byte("key")) - require.NoError(t, err) + require.NoError(t, db.Delete([]byte("key"))) require.Equal(t, int64(1), db.metrics.(*mockMetrics).keyReadCount) require.Equal(t, int64(2), db.metrics.(*mockMetrics).keyWriteCount) @@ -62,13 +60,11 @@ func Test_Metrics_Initialize(t *testing.T) { ) require.NoError(t, err) - err = db.Put([]byte("key"), []byte("value")) - require.NoError(t, err) + require.NoError(t, db.Put([]byte("key"), []byte("value"))) val, err := db.Get([]byte("key")) require.NoError(t, err) require.Equal(t, []byte("value"), val) - err = db.Delete([]byte("key")) - require.NoError(t, err) + require.NoError(t, db.Delete([]byte("key"))) } diff --git a/x/merkledb/node_test.go b/x/merkledb/node_test.go index a4282df12f9..6ace63b3671 100644 --- a/x/merkledb/node_test.go +++ b/x/merkledb/node_test.go @@ -19,8 +19,7 @@ func Test_Node_Marshal(t *testing.T) { childNode.setValue(Some([]byte("value"))) require.NotNil(t, childNode) - err := childNode.calculateID(&mockMetrics{}) - require.NoError(t, err) + require.NoError(t, childNode.calculateID(&mockMetrics{})) root.addChild(childNode) data, err := root.marshal() @@ -45,8 +44,7 @@ func Test_Node_Marshal_Errors(t *testing.T) { childNode1.setValue(Some([]byte("value1"))) require.NotNil(t, childNode1) - err := childNode1.calculateID(&mockMetrics{}) - require.NoError(t, err) + require.NoError(t, childNode1.calculateID(&mockMetrics{})) root.addChild(childNode1) fullpath = newPath([]byte{237}) @@ -54,8 +52,7 @@ func Test_Node_Marshal_Errors(t *testing.T) { childNode2.setValue(Some([]byte("value2"))) require.NotNil(t, childNode2) - err = childNode2.calculateID(&mockMetrics{}) - require.NoError(t, err) + require.NoError(t, childNode2.calculateID(&mockMetrics{})) root.addChild(childNode2) data, err := root.marshal() diff --git a/x/merkledb/proof_test.go b/x/merkledb/proof_test.go index 87014af854a..cb5f03cf465 100644 --- a/x/merkledb/proof_test.go +++ b/x/merkledb/proof_test.go @@ -328,16 +328,11 @@ func Test_Proof(t *testing.T) { trie, err := dbTrie.NewView() require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key0"), []byte("value0")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key2"), []byte("value2")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key3"), []byte("value3")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key4"), []byte("value4")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key0"), []byte("value0"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("value1"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key2"), []byte("value2"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key3"), []byte("value3"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key4"), []byte("value4"))) _, err = trie.GetMerkleRoot(context.Background()) require.NoError(t, err) @@ -355,8 +350,7 @@ func Test_Proof(t *testing.T) { expectedRootID, err := trie.GetMerkleRoot(context.Background()) require.NoError(t, err) - err = proof.Verify(context.Background(), expectedRootID) - require.NoError(t, err) + require.NoError(t, proof.Verify(context.Background(), expectedRootID)) proof.Path[0].ValueOrHash = Some([]byte("value2")) @@ -585,16 +579,11 @@ func Test_RangeProof_NilStart(t *testing.T) { db, err := getBasicDB() require.NoError(t, err) batch := db.NewBatch() - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key4"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(t, batch.Put([]byte("key2"), []byte("value2"))) + require.NoError(t, batch.Put([]byte("key3"), []byte("value3"))) + require.NoError(t, batch.Put([]byte("key4"), []byte("value4"))) + require.NoError(t, batch.Write()) val, err := db.Get([]byte("key1")) require.NoError(t, err) @@ -662,14 +651,10 @@ func Test_RangeProof_EmptyValues(t *testing.T) { db, err := getBasicDB() require.NoError(t, err) batch := db.NewBatch() - err = batch.Put([]byte("key1"), nil) - require.NoError(t, err) - err = batch.Put([]byte("key12"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte{}) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key1"), nil)) + require.NoError(t, batch.Put([]byte("key12"), []byte("value1"))) + require.NoError(t, batch.Put([]byte("key2"), []byte{})) + require.NoError(t, batch.Write()) val, err := db.Get([]byte("key12")) require.NoError(t, err) @@ -786,48 +771,30 @@ func Test_ChangeProof_Marshal(t *testing.T) { db, err := getBasicDB() require.NoError(t, err) batch := db.NewBatch() - err = batch.Put([]byte("key0"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key1"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key2"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key3"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key4"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key0"), []byte("value0"))) + require.NoError(t, batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(t, batch.Put([]byte("key2"), []byte("value2"))) + require.NoError(t, batch.Put([]byte("key3"), []byte("value3"))) + require.NoError(t, batch.Put([]byte("key4"), []byte("value4"))) + require.NoError(t, batch.Write()) startRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(t, err) batch = db.NewBatch() - err = batch.Put([]byte("key4"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key5"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key6"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key7"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key8"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key4"), []byte("value0"))) + require.NoError(t, batch.Put([]byte("key5"), []byte("value1"))) + require.NoError(t, batch.Put([]byte("key6"), []byte("value2"))) + require.NoError(t, batch.Put([]byte("key7"), []byte("value3"))) + require.NoError(t, batch.Put([]byte("key8"), []byte("value4"))) + require.NoError(t, batch.Write()) batch = db.NewBatch() - err = batch.Put([]byte("key9"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key10"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key11"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key12"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key13"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key9"), []byte("value0"))) + require.NoError(t, batch.Put([]byte("key10"), []byte("value1"))) + require.NoError(t, batch.Put([]byte("key11"), []byte("value2"))) + require.NoError(t, batch.Put([]byte("key12"), []byte("value3"))) + require.NoError(t, batch.Put([]byte("key13"), []byte("value4"))) + require.NoError(t, batch.Write()) endroot, err := db.GetMerkleRoot(context.Background()) require.NoError(t, err) @@ -930,18 +897,12 @@ func Test_ChangeProof_Verify(t *testing.T) { db, err := getBasicDB() require.NoError(t, err) batch := db.NewBatch() - err = batch.Put([]byte("key20"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key21"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key22"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key23"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key24"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key20"), []byte("value0"))) + require.NoError(t, batch.Put([]byte("key21"), []byte("value1"))) + require.NoError(t, batch.Put([]byte("key22"), []byte("value2"))) + require.NoError(t, batch.Put([]byte("key23"), []byte("value3"))) + require.NoError(t, batch.Put([]byte("key24"), []byte("value4"))) + require.NoError(t, batch.Write()) startRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(t, err) @@ -949,53 +910,33 @@ func Test_ChangeProof_Verify(t *testing.T) { dbClone, err := getBasicDB() require.NoError(t, err) batch = dbClone.NewBatch() - err = batch.Put([]byte("key20"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key21"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key22"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key23"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key24"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key20"), []byte("value0"))) + require.NoError(t, batch.Put([]byte("key21"), []byte("value1"))) + require.NoError(t, batch.Put([]byte("key22"), []byte("value2"))) + require.NoError(t, batch.Put([]byte("key23"), []byte("value3"))) + require.NoError(t, batch.Put([]byte("key24"), []byte("value4"))) + require.NoError(t, batch.Write()) // the second db has started to sync some of the range outside of the range proof batch = dbClone.NewBatch() - err = batch.Put([]byte("key31"), []byte("value1")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key31"), []byte("value1"))) + require.NoError(t, batch.Write()) batch = db.NewBatch() - err = batch.Put([]byte("key25"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key26"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key27"), []byte("value2")) - require.NoError(t, err) - err = batch.Put([]byte("key28"), []byte("value3")) - require.NoError(t, err) - err = batch.Put([]byte("key29"), []byte("value4")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key25"), []byte("value0"))) + require.NoError(t, batch.Put([]byte("key26"), []byte("value1"))) + require.NoError(t, batch.Put([]byte("key27"), []byte("value2"))) + require.NoError(t, batch.Put([]byte("key28"), []byte("value3"))) + require.NoError(t, batch.Put([]byte("key29"), []byte("value4"))) + require.NoError(t, batch.Write()) batch = db.NewBatch() - err = batch.Put([]byte("key30"), []byte("value0")) - require.NoError(t, err) - err = batch.Put([]byte("key31"), []byte("value1")) - require.NoError(t, err) - err = batch.Put([]byte("key32"), []byte("value2")) - require.NoError(t, err) - err = batch.Delete([]byte("key21")) - require.NoError(t, err) - err = batch.Delete([]byte("key22")) - require.NoError(t, err) - err = batch.Write() - require.NoError(t, err) + require.NoError(t, batch.Put([]byte("key30"), []byte("value0"))) + require.NoError(t, batch.Put([]byte("key31"), []byte("value1"))) + require.NoError(t, batch.Put([]byte("key32"), []byte("value2"))) + require.NoError(t, batch.Delete([]byte("key21"))) + require.NoError(t, batch.Delete([]byte("key22"))) + require.NoError(t, batch.Write()) endRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(t, err) @@ -1005,27 +946,23 @@ func Test_ChangeProof_Verify(t *testing.T) { require.NoError(t, err) require.NotNil(t, proof) - err = proof.Verify(context.Background(), dbClone, []byte("key21"), []byte("key30"), db.getMerkleRoot()) - require.NoError(t, err) + require.NoError(t, proof.Verify(context.Background(), dbClone, []byte("key21"), []byte("key30"), db.getMerkleRoot())) // low maxLength proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, nil, nil, 5) require.NoError(t, err) require.NotNil(t, proof) - err = proof.Verify(context.Background(), dbClone, nil, nil, db.getMerkleRoot()) - require.NoError(t, err) + require.NoError(t, proof.Verify(context.Background(), dbClone, nil, nil, db.getMerkleRoot())) // nil start/end proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, nil, nil, 50) require.NoError(t, err) require.NotNil(t, proof) - err = proof.Verify(context.Background(), dbClone, nil, nil, endRoot) - require.NoError(t, err) + require.NoError(t, proof.Verify(context.Background(), dbClone, nil, nil, endRoot)) - err = dbClone.CommitChangeProof(context.Background(), proof) - require.NoError(t, err) + require.NoError(t, dbClone.CommitChangeProof(context.Background(), proof)) newRoot, err := dbClone.GetMerkleRoot(context.Background()) require.NoError(t, err) @@ -1035,8 +972,7 @@ func Test_ChangeProof_Verify(t *testing.T) { require.NoError(t, err) require.NotNil(t, proof) - err = proof.Verify(context.Background(), dbClone, []byte("key20"), []byte("key30"), db.getMerkleRoot()) - require.NoError(t, err) + require.NoError(t, proof.Verify(context.Background(), dbClone, []byte("key20"), []byte("key30"), db.getMerkleRoot())) } func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index 3163f8db082..889798cd716 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -187,16 +187,14 @@ func Test_Trie_ViewOnCommitedView(t *testing.T) { committedTrie, err := dbTrie.NewView() require.NoError(t, err) - err = committedTrie.Insert(context.Background(), []byte{0}, []byte{0}) - require.NoError(t, err) + require.NoError(t, committedTrie.Insert(context.Background(), []byte{0}, []byte{0})) require.NoError(t, committedTrie.CommitToDB(context.Background())) newView, err := committedTrie.NewView() require.NoError(t, err) - err = newView.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(t, err) + require.NoError(t, newView.Insert(context.Background(), []byte{1}, []byte{1})) require.NoError(t, newView.CommitToDB(context.Background())) val0, err := dbTrie.GetValue(context.Background(), []byte{0}) @@ -214,26 +212,21 @@ func Test_Trie_Partial_Commit_Leaves_Valid_Tries(t *testing.T) { trie2, err := dbTrie.NewView() require.NoError(t, err) - err = trie2.Insert(context.Background(), []byte("key"), []byte("value")) - require.NoError(t, err) + require.NoError(t, trie2.Insert(context.Background(), []byte("key"), []byte("value"))) trie3, err := trie2.NewView() require.NoError(t, err) - err = trie3.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(t, err) + require.NoError(t, trie3.Insert(context.Background(), []byte("key1"), []byte("value1"))) trie4, err := trie3.NewView() require.NoError(t, err) - err = trie4.Insert(context.Background(), []byte("key2"), []byte("value2")) - require.NoError(t, err) + require.NoError(t, trie4.Insert(context.Background(), []byte("key2"), []byte("value2"))) trie5, err := trie4.NewView() require.NoError(t, err) - err = trie5.Insert(context.Background(), []byte("key3"), []byte("value3")) - require.NoError(t, err) + require.NoError(t, trie5.Insert(context.Background(), []byte("key3"), []byte("value3"))) - err = trie3.CommitToDB(context.Background()) - require.NoError(t, err) + require.NoError(t, trie3.CommitToDB(context.Background())) root, err := trie3.GetMerkleRoot(context.Background()) require.NoError(t, err) @@ -303,15 +296,13 @@ func Test_Trie_Overwrite(t *testing.T) { require.NotNil(t, dbTrie) trie := Trie(dbTrie) - err = trie.Insert(context.Background(), []byte("key"), []byte("value0")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value0"))) value, err := getNodeValue(trie, "key") require.NoError(t, err) require.Equal(t, []byte("value0"), value) - err = trie.Insert(context.Background(), []byte("key"), []byte("value1")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value1"))) value, err = getNodeValue(trie, "key") require.NoError(t, err) @@ -324,15 +315,13 @@ func Test_Trie_Delete(t *testing.T) { require.NotNil(t, dbTrie) trie := Trie(dbTrie) - err = trie.Insert(context.Background(), []byte("key"), []byte("value0")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value0"))) value, err := getNodeValue(trie, "key") require.NoError(t, err) require.Equal(t, []byte("value0"), value) - err = trie.Remove(context.Background(), []byte("key")) - require.NoError(t, err) + require.NoError(t, trie.Remove(context.Background(), []byte("key"))) value, err = getNodeValue(trie, "key") require.ErrorIs(t, err, database.ErrNotFound) @@ -344,8 +333,7 @@ func Test_Trie_DeleteMissingKey(t *testing.T) { require.NoError(t, err) require.NotNil(t, trie) - err = trie.Remove(context.Background(), []byte("key")) - require.NoError(t, err) + require.NoError(t, trie.Remove(context.Background(), []byte("key"))) } func Test_Trie_ExpandOnKeyPath(t *testing.T) { @@ -354,15 +342,13 @@ func Test_Trie_ExpandOnKeyPath(t *testing.T) { require.NotNil(t, dbTrie) trie := Trie(dbTrie) - err = trie.Insert(context.Background(), []byte("key"), []byte("value0")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value0"))) value, err := getNodeValue(trie, "key") require.NoError(t, err) require.Equal(t, []byte("value0"), value) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("value1"))) value, err = getNodeValue(trie, "key") require.NoError(t, err) @@ -372,8 +358,7 @@ func Test_Trie_ExpandOnKeyPath(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte("value1"), value) - err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) value, err = getNodeValue(trie, "key") require.NoError(t, err) @@ -394,15 +379,13 @@ func Test_Trie_CompressedPaths(t *testing.T) { require.NotNil(t, dbTrie) trie := Trie(dbTrie) - err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) value, err := getNodeValue(trie, "key12") require.NoError(t, err) require.Equal(t, []byte("value12"), value) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value1")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("value1"))) value, err = getNodeValue(trie, "key12") require.NoError(t, err) @@ -412,8 +395,7 @@ func Test_Trie_CompressedPaths(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte("value1"), value) - err = trie.Insert(context.Background(), []byte("key"), []byte("value")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value"))) value, err = getNodeValue(trie, "key12") require.NoError(t, err) @@ -435,10 +417,8 @@ func Test_Trie_SplitBranch(t *testing.T) { trie := Trie(dbTrie) // force a new node to generate with common prefix "key1" and have these two nodes as children - err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key134"), []byte("value134")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key134"), []byte("value134"))) value, err := getNodeValue(trie, "key12") require.NoError(t, err) @@ -456,11 +436,9 @@ func Test_Trie_HashCountOnBranch(t *testing.T) { trie := Trie(dbTrie) // force a new node to generate with common prefix "key1" and have these two nodes as children - err = trie.Insert(context.Background(), []byte("key12"), []byte("value12")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) oldCount := dbTrie.metrics.(*mockMetrics).hashCount - err = trie.Insert(context.Background(), []byte("key134"), []byte("value134")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("key134"), []byte("value134"))) // only hashes the new branch node, the new child node, and root // shouldn't hash the existing node require.Equal(t, oldCount+3, dbTrie.metrics.(*mockMetrics).hashCount) @@ -471,30 +449,21 @@ func Test_Trie_HashCountOnDelete(t *testing.T) { require.NoError(t, err) require.NotNil(t, trie) - err = trie.Insert(context.Background(), []byte("k"), []byte("value0")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("ke"), []byte("value1")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key"), []byte("value2")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value3")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key2"), []byte("value4")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("k"), []byte("value0"))) + require.NoError(t, trie.Insert(context.Background(), []byte("ke"), []byte("value1"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value2"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("value3"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key2"), []byte("value4"))) oldCount := trie.metrics.(*mockMetrics).hashCount // delete the middle values view, err := trie.NewView() require.NoError(t, err) - err = view.Remove(context.Background(), []byte("k")) - require.NoError(t, err) - err = view.Remove(context.Background(), []byte("ke")) - require.NoError(t, err) - err = view.Remove(context.Background(), []byte("key")) - require.NoError(t, err) - err = view.CommitToDB(context.Background()) - require.NoError(t, err) + require.NoError(t, view.Remove(context.Background(), []byte("k"))) + require.NoError(t, view.Remove(context.Background(), []byte("ke"))) + require.NoError(t, view.Remove(context.Background(), []byte("key"))) + require.NoError(t, view.CommitToDB(context.Background())) // the root is the only updated node so only one new hash require.Equal(t, oldCount+1, trie.metrics.(*mockMetrics).hashCount) @@ -506,14 +475,10 @@ func Test_Trie_NoExistingResidual(t *testing.T) { require.NotNil(t, dbTrie) trie := Trie(dbTrie) - err = trie.Insert(context.Background(), []byte("k"), []byte("1")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("ke"), []byte("2")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key1"), []byte("3")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key123"), []byte("4")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("k"), []byte("1"))) + require.NoError(t, trie.Insert(context.Background(), []byte("ke"), []byte("2"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("3"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key123"), []byte("4"))) value, err := getNodeValue(trie, "k") require.NoError(t, err) @@ -662,30 +627,20 @@ func Test_Trie_ChainDeletion(t *testing.T) { newTrie, err := trie.NewView() require.NoError(t, err) - err = newTrie.Insert(context.Background(), []byte("k"), []byte("value0")) - require.NoError(t, err) - err = newTrie.Insert(context.Background(), []byte("ke"), []byte("value1")) - require.NoError(t, err) - err = newTrie.Insert(context.Background(), []byte("key"), []byte("value2")) - require.NoError(t, err) - err = newTrie.Insert(context.Background(), []byte("key1"), []byte("value3")) - require.NoError(t, err) - err = newTrie.(*trieView).calculateNodeIDs(context.Background()) - require.NoError(t, err) + require.NoError(t, newTrie.Insert(context.Background(), []byte("k"), []byte("value0"))) + require.NoError(t, newTrie.Insert(context.Background(), []byte("ke"), []byte("value1"))) + require.NoError(t, newTrie.Insert(context.Background(), []byte("key"), []byte("value2"))) + require.NoError(t, newTrie.Insert(context.Background(), []byte("key1"), []byte("value3"))) + require.NoError(t, newTrie.(*trieView).calculateNodeIDs(context.Background())) root, err := newTrie.getEditableNode(EmptyPath) require.NoError(t, err) require.Len(t, root.children, 1) - err = newTrie.Remove(context.Background(), []byte("k")) - require.NoError(t, err) - err = newTrie.Remove(context.Background(), []byte("ke")) - require.NoError(t, err) - err = newTrie.Remove(context.Background(), []byte("key")) - require.NoError(t, err) - err = newTrie.Remove(context.Background(), []byte("key1")) - require.NoError(t, err) - err = newTrie.(*trieView).calculateNodeIDs(context.Background()) - require.NoError(t, err) + require.NoError(t, newTrie.Remove(context.Background(), []byte("k"))) + require.NoError(t, newTrie.Remove(context.Background(), []byte("ke"))) + require.NoError(t, newTrie.Remove(context.Background(), []byte("key"))) + require.NoError(t, newTrie.Remove(context.Background(), []byte("key1"))) + require.NoError(t, newTrie.(*trieView).calculateNodeIDs(context.Background())) root, err = newTrie.getEditableNode(EmptyPath) require.NoError(t, err) // since all values have been deleted, the nodes should have been cleaned up @@ -711,8 +666,7 @@ func Test_Trie_Invalidate_Children_On_Edits(t *testing.T) { require.False(t, childTrie2.(*trieView).isInvalid()) require.False(t, childTrie3.(*trieView).isInvalid()) - err = trie.Insert(context.Background(), []byte{0}, []byte{0}) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte{0}, []byte{0})) require.True(t, childTrie1.(*trieView).isInvalid()) require.True(t, childTrie2.(*trieView).isInvalid()) @@ -753,19 +707,13 @@ func Test_Trie_NodeCollapse(t *testing.T) { trie, err := dbTrie.NewView() require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("k"), []byte("value0")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("ke"), []byte("value1")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key"), []byte("value2")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key1"), []byte("value3")) - require.NoError(t, err) - err = trie.Insert(context.Background(), []byte("key2"), []byte("value4")) - require.NoError(t, err) + require.NoError(t, trie.Insert(context.Background(), []byte("k"), []byte("value0"))) + require.NoError(t, trie.Insert(context.Background(), []byte("ke"), []byte("value1"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value2"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("value3"))) + require.NoError(t, trie.Insert(context.Background(), []byte("key2"), []byte("value4"))) - err = trie.(*trieView).calculateNodeIDs(context.Background()) - require.NoError(t, err) + require.NoError(t, trie.(*trieView).calculateNodeIDs(context.Background())) root, err := trie.getEditableNode(EmptyPath) require.NoError(t, err) require.Len(t, root.children, 1) @@ -779,15 +727,11 @@ func Test_Trie_NodeCollapse(t *testing.T) { require.Len(t, firstNode.children, 1) // delete the middle values - err = trie.Remove(context.Background(), []byte("k")) - require.NoError(t, err) - err = trie.Remove(context.Background(), []byte("ke")) - require.NoError(t, err) - err = trie.Remove(context.Background(), []byte("key")) - require.NoError(t, err) + require.NoError(t, trie.Remove(context.Background(), []byte("k"))) + require.NoError(t, trie.Remove(context.Background(), []byte("ke"))) + require.NoError(t, trie.Remove(context.Background(), []byte("key"))) - err = trie.(*trieView).calculateNodeIDs(context.Background()) - require.NoError(t, err) + require.NoError(t, trie.(*trieView).calculateNodeIDs(context.Background())) root, err = trie.getEditableNode(EmptyPath) require.NoError(t, err) @@ -1177,18 +1121,15 @@ func Test_Trie_CommitToParentView_Concurrent(t *testing.T) { parentView, err := baseView.NewView() require.NoError(t, err) - err = parentView.Insert(context.Background(), []byte{0}, []byte{0}) - require.NoError(t, err) + require.NoError(t, parentView.Insert(context.Background(), []byte{0}, []byte{0})) childView1, err := parentView.NewView() require.NoError(t, err) - err = childView1.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(t, err) + require.NoError(t, childView1.Insert(context.Background(), []byte{1}, []byte{1})) childView2, err := childView1.NewView() require.NoError(t, err) - err = childView2.Insert(context.Background(), []byte{2}, []byte{2}) - require.NoError(t, err) + require.NoError(t, childView2.Insert(context.Background(), []byte{2}, []byte{2})) var wg sync.WaitGroup wg.Add(3) @@ -1229,18 +1170,15 @@ func Test_Trie_CommitToParentDB_Concurrent(t *testing.T) { parentView, err := dbTrie.NewView() require.NoError(t, err) - err = parentView.Insert(context.Background(), []byte{0}, []byte{0}) - require.NoError(t, err) + require.NoError(t, parentView.Insert(context.Background(), []byte{0}, []byte{0})) childView1, err := parentView.NewView() require.NoError(t, err) - err = childView1.Insert(context.Background(), []byte{1}, []byte{1}) - require.NoError(t, err) + require.NoError(t, childView1.Insert(context.Background(), []byte{1}, []byte{1})) childView2, err := childView1.NewView() require.NoError(t, err) - err = childView2.Insert(context.Background(), []byte{2}, []byte{2}) - require.NoError(t, err) + require.NoError(t, childView2.Insert(context.Background(), []byte{2}, []byte{2})) var wg sync.WaitGroup wg.Add(3) diff --git a/x/sync/client_test.go b/x/sync/client_test.go index 55b5f049e8b..db6e2f590b6 100644 --- a/x/sync/client_test.go +++ b/x/sync/client_test.go @@ -401,8 +401,7 @@ func TestGetChangeProof(t *testing.T) { _, err = r.Read(val) require.NoError(t, err) - err = view.Insert(context.Background(), key, val) - require.NoError(t, err) + require.NoError(t, view.Insert(context.Background(), key, val)) } // delete a key @@ -412,8 +411,7 @@ func TestGetChangeProof(t *testing.T) { it := trieDB.NewIteratorWithStart(deleteKeyStart) if it.Next() { - err = view.Remove(context.Background(), it.Key()) - require.NoError(t, err) + require.NoError(t, view.Remove(context.Background(), it.Key())) } require.NoError(t, it.Error()) it.Release() diff --git a/x/sync/network_server_test.go b/x/sync/network_server_test.go index 4e686cf6af3..085e8ed1d35 100644 --- a/x/sync/network_server_test.go +++ b/x/sync/network_server_test.go @@ -160,8 +160,7 @@ func Test_Server_GetChangeProof(t *testing.T) { _, err = r.Read(val) require.NoError(t, err) - err = view.Insert(context.Background(), key, val) - require.NoError(t, err) + require.NoError(t, view.Insert(context.Background(), key, val)) deleteKeyStart := make([]byte, r.Intn(10)) _, err = r.Read(deleteKeyStart) @@ -169,8 +168,7 @@ func Test_Server_GetChangeProof(t *testing.T) { it := trieDB.NewIteratorWithStart(deleteKeyStart) if it.Next() { - err = view.Remove(context.Background(), it.Key()) - require.NoError(t, err) + require.NoError(t, view.Remove(context.Background(), it.Key())) } require.NoError(t, it.Error()) it.Release() diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index de31e57d682..744c6bc16d1 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -115,10 +115,8 @@ func Test_Completion(t *testing.T) { }) require.NoError(t, err) require.NotNil(t, syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) - err = syncer.Wait(context.Background()) - require.NoError(t, err) + require.NoError(t, syncer.StartSyncing(context.Background())) + require.NoError(t, syncer.Wait(context.Background())) syncer.workLock.Lock() require.Zero(t, syncer.unprocessedWork.Len()) require.Equal(t, 1, syncer.processedWork.Len()) @@ -217,10 +215,8 @@ func Test_Sync_FindNextKey_InSync(t *testing.T) { require.NoError(t, err) require.NotNil(t, syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) - err = syncer.Wait(context.Background()) - require.NoError(t, err) + require.NoError(t, syncer.StartSyncing(context.Background())) + require.NoError(t, syncer.Wait(context.Background())) proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 500) require.NoError(t, err) @@ -233,8 +229,7 @@ func Test_Sync_FindNextKey_InSync(t *testing.T) { // add an extra value to sync db past the last key returned newKey := midPoint(lastKey, nil) - err = db.Put(newKey, []byte{1}) - require.NoError(t, err) + require.NoError(t, db.Put(newKey, []byte{1})) // create a range endpoint that is before the newly added key, but after the last key endPointBeforeNewKey := make([]byte, 0, 2) @@ -411,10 +406,8 @@ func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { require.NoError(t, err) require.NotNil(t, syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) - err = syncer.Wait(context.Background()) - require.NoError(t, err) + require.NoError(t, syncer.StartSyncing(context.Background())) + require.NoError(t, syncer.Wait(context.Background())) proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 500) require.NoError(t, err) @@ -423,8 +416,7 @@ func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key midpoint := midPoint(lastKey, nil) - err = db.Put(midpoint, []byte{1}) - require.NoError(t, err) + require.NoError(t, db.Put(midpoint, []byte{1})) // next key at prefix of newly added point nextKey, err := syncer.findNextKey(context.Background(), lastKey, nil, proof.EndProof) @@ -433,11 +425,9 @@ func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { require.True(t, isPrefix(midpoint, nextKey)) - err = db.Delete(midpoint) - require.NoError(t, err) + require.NoError(t, db.Delete(midpoint)) - err = dbToSync.Put(midpoint, []byte{1}) - require.NoError(t, err) + require.NoError(t, dbToSync.Put(midpoint, []byte{1})) proof, err = dbToSync.GetRangeProof(context.Background(), nil, lastKey, 500) require.NoError(t, err) @@ -492,10 +482,8 @@ func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { }) require.NoError(t, err) require.NotNil(t, syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) - err = syncer.Wait(context.Background()) - require.NoError(t, err) + require.NoError(t, syncer.StartSyncing(context.Background())) + require.NoError(t, syncer.Wait(context.Background())) proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 100) require.NoError(t, err) @@ -503,11 +491,9 @@ func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { // local db has a different child than remote db lastKey = append(lastKey, 16) - err = db.Put(lastKey, []byte{1}) - require.NoError(t, err) + require.NoError(t, db.Put(lastKey, []byte{1})) - err = dbToSync.Put(lastKey, []byte{2}) - require.NoError(t, err) + require.NoError(t, dbToSync.Put(lastKey, []byte{2})) proof, err = dbToSync.GetRangeProof(context.Background(), nil, proof.KeyValues[len(proof.KeyValues)-1].Key, 100) require.NoError(t, err) @@ -760,11 +746,9 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { }) require.NoError(t, err) require.NotNil(t, syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) + require.NoError(t, syncer.StartSyncing(context.Background())) - err = syncer.Wait(context.Background()) - require.NoError(t, err) + require.NoError(t, syncer.Wait(context.Background())) require.NoError(t, syncer.Error()) // new db has fully sync'ed and should be at the same root as the original db @@ -781,20 +765,16 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { _, err = r.Read(val) require.NoError(t, err) - err = db.Put(addkey, val) - require.NoError(t, err) + require.NoError(t, db.Put(addkey, val)) - err = dbToSync.Put(addkey, val) - require.NoError(t, err) + require.NoError(t, dbToSync.Put(addkey, val)) addNilkey := make([]byte, r.Intn(50)) _, err = r.Read(addNilkey) require.NoError(t, err) - err = db.Put(addNilkey, nil) - require.NoError(t, err) + require.NoError(t, db.Put(addNilkey, nil)) - err = dbToSync.Put(addNilkey, nil) - require.NoError(t, err) + require.NoError(t, dbToSync.Put(addNilkey, nil)) deleteKeyStart := make([]byte, r.Intn(50)) _, err = r.Read(deleteKeyStart) @@ -802,10 +782,8 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { it := dbToSync.NewIteratorWithStart(deleteKeyStart) if it.Next() { - err = dbToSync.Delete(it.Key()) - require.NoError(t, err) - err = db.Delete(it.Key()) - require.NoError(t, err) + require.NoError(t, dbToSync.Delete(it.Key())) + require.NoError(t, db.Delete(it.Key())) } require.NoError(t, it.Error()) it.Release() @@ -848,8 +826,7 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { }) require.NoError(t, err) require.NotNil(t, syncer) - err = syncer.StartSyncing(context.Background()) - require.NoError(t, err) + require.NoError(t, syncer.StartSyncing(context.Background())) // Wait until we've processed some work // before updating the sync target. @@ -875,11 +852,9 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { }) require.NoError(t, err) require.NotNil(t, newSyncer) - err = newSyncer.StartSyncing(context.Background()) - require.NoError(t, err) + require.NoError(t, newSyncer.StartSyncing(context.Background())) require.NoError(t, newSyncer.Error()) - err = newSyncer.Wait(context.Background()) - require.NoError(t, err) + require.NoError(t, newSyncer.Wait(context.Background())) newRoot, err := db.GetMerkleRoot(context.Background()) require.NoError(t, err) require.Equal(t, syncRoot, newRoot) From 720bfb0beba049a1c46430bc524b2c240a422f6e Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 16:36:21 -0400 Subject: [PATCH 30/79] nit --- x/merkledb/history_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/x/merkledb/history_test.go b/x/merkledb/history_test.go index becdf85f6df..f3c7abfc83b 100644 --- a/x/merkledb/history_test.go +++ b/x/merkledb/history_test.go @@ -60,7 +60,6 @@ func Test_History_Simple(t *testing.T) { batch = db.NewBatch() require.NoError(batch.Put([]byte("k"), []byte("v"))) - require.NoError(err) require.NoError(batch.Write()) newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) From b71ff1c14234f35a61a83b46c3e1a35dbd07f079 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 16:45:20 -0400 Subject: [PATCH 31/79] include multi-line functions --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 95f1a12bc54..1f74c2cedc7 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -117,7 +117,7 @@ function test_require_nil { } function test_require_no_error_inline_func { - if grep -R -zo -P '\t+err :?=.*\n\t+require\.NoError\((t, )?err\)' .; then + if grep -R -zo -P '\t+err :?= (.|\n)*?(?:require\.NoError\((t, )?err\))' .; then echo "" echo "Checking that a function with a single error return doesn't error should be done in-line." echo "" From 31e4a37905820ff96bf89f1b66d80f3b87149ac1 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 16:53:10 -0400 Subject: [PATCH 32/79] should be good now --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 1f74c2cedc7..5f87309bd1a 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -117,7 +117,7 @@ function test_require_nil { } function test_require_no_error_inline_func { - if grep -R -zo -P '\t+err :?= (.|\n)*?(?:require\.NoError\((t, )?err\))' .; then + if grep -R -zo -P '\t+err :?= ((?!require).|\n)*require\.NoError\((t, )?err\)' .; then echo "" echo "Checking that a function with a single error return doesn't error should be done in-line." echo "" From c7226cfa90493bd34006cd65130df893d491e64c Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 17:09:30 -0400 Subject: [PATCH 33/79] moar --- api/health/health_test.go | 3 +- api/keystore/service_test.go | 5 +- snow/networking/router/chain_router_test.go | 46 ++++------ snow/networking/sender/sender_test.go | 15 ++-- vms/avm/service_test.go | 33 +++---- vms/avm/txs/base_tx_test.go | 5 +- .../txs/executor/semantic_verifier_test.go | 85 ++++++++----------- vms/avm/txs/export_tx_test.go | 5 +- vms/avm/txs/import_tx_test.go | 5 +- vms/avm/vm_regression_test.go | 7 +- vms/platformvm/blocks/builder/network_test.go | 3 +- .../blocks/builder/standard_block_test.go | 5 +- vms/platformvm/service_test.go | 5 +- vms/platformvm/txs/executor/import_test.go | 6 +- vms/platformvm/vm_regression_test.go | 10 +-- vms/platformvm/vm_test.go | 35 +++----- vms/proposervm/vm_test.go | 10 +-- x/merkledb/db_test.go | 10 +-- x/merkledb/history_test.go | 10 +-- x/merkledb/proof_test.go | 25 +++--- x/sync/sync_test.go | 5 +- 21 files changed, 131 insertions(+), 202 deletions(-) diff --git a/api/health/health_test.go b/api/health/health_test.go index 792030fc107..923f1f8558f 100644 --- a/api/health/health_test.go +++ b/api/health/health_test.go @@ -247,9 +247,8 @@ func TestDeadlockRegression(t *testing.T) { for i := 0; i < 1000; i++ { lock.Lock() - err = h.RegisterHealthCheck(fmt.Sprintf("check-%d", i), check) + require.NoError(h.RegisterHealthCheck(fmt.Sprintf("check-%d", i), check)) lock.Unlock() - require.NoError(err) } awaitHealthy(t, h, true) diff --git a/api/keystore/service_test.go b/api/keystore/service_test.go index 5f0592a75ce..a32fd6a716a 100644 --- a/api/keystore/service_test.go +++ b/api/keystore/service_test.go @@ -239,15 +239,14 @@ func TestServiceExportImport(t *testing.T) { } { - err := newS.ImportUser(nil, &ImportUserArgs{ + require.NoError(newS.ImportUser(nil, &ImportUserArgs{ UserPass: api.UserPass{ Username: "bob", Password: strongPassword, }, User: exportReply.User, Encoding: encoding, - }, &api.EmptyReply{}) - require.NoError(err) + }, &api.EmptyReply{})) } { diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 42ac9ab1af3..8d146efdde7 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -61,7 +61,7 @@ func TestShutdown(t *testing.T) { go tm.Dispatch() chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(t, chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -73,8 +73,7 @@ func TestShutdown(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) shutdownCalled := make(chan struct{}, 1) @@ -198,7 +197,7 @@ func TestShutdownTimesOut(t *testing.T) { chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(t, chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -210,8 +209,7 @@ func TestShutdownTimesOut(t *testing.T) { HealthConfig{}, "", metrics, - ) - require.NoError(t, err) + )) ctx := snow.DefaultConsensusContextTest() resourceTracker, err := tracker.NewResourceTracker( @@ -337,8 +335,7 @@ func TestRouterTimeout(t *testing.T) { // Create a router chainRouter := ChainRouter{} - - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -350,8 +347,7 @@ func TestRouterTimeout(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(err) + )) // Create bootstrapper, engine and handler var ( @@ -694,7 +690,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { // Create a router chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -706,8 +702,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(err) + )) h := handler.NewMockHandler(ctrl) @@ -820,7 +815,7 @@ func TestRouterClearTimeouts(t *testing.T) { // Create a router chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(t, chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -832,8 +827,7 @@ func TestRouterClearTimeouts(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) // Create bootstrapper, engine and handler ctx := snow.DefaultConsensusContextTest() @@ -1106,7 +1100,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { // Create a router chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(t, chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -1118,8 +1112,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) // Create bootstrapper, engine and handler calledF := false @@ -1253,7 +1246,7 @@ func TestRouterCrossChainMessages(t *testing.T) { // Create chain router nodeID := ids.GenerateTestNodeID() chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(t, chainRouter.Initialize( nodeID, logging.NoLog{}, tm, @@ -1265,8 +1258,7 @@ func TestRouterCrossChainMessages(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) // Set up validators vdrs := validators.NewSet() @@ -1404,7 +1396,7 @@ func TestConnectedSubnet(t *testing.T) { trackedSubnets := set.Set[ids.ID]{} trackedSubnets.Add(subnetID0, subnetID1) chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(t, chainRouter.Initialize( myNodeID, logging.NoLog{}, tm, @@ -1416,8 +1408,7 @@ func TestConnectedSubnet(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) // Create bootstrapper, engine and handler platform := snow.DefaultConsensusContextTest() @@ -1516,7 +1507,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { // Create a router chainRouter := ChainRouter{} - err = chainRouter.Initialize( + require.NoError(t, chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -1528,8 +1519,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) // Create bootstrapper, engine and handler calledF := false diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index ea80a621189..176ddba3ad4 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -80,7 +80,7 @@ func TestTimeout(t *testing.T) { ) require.NoError(err) - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -92,8 +92,7 @@ func TestTimeout(t *testing.T) { router.HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(err) + )) ctx := snow.DefaultConsensusContextTest() externalSender := &ExternalSenderTest{TB: t} @@ -350,7 +349,7 @@ func TestReliableMessages(t *testing.T) { ) require.NoError(t, err) - err = chainRouter.Initialize( + require.NoError(t, chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -362,8 +361,7 @@ func TestReliableMessages(t *testing.T) { router.HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) ctx := snow.DefaultConsensusContextTest() @@ -498,7 +496,7 @@ func TestReliableMessagesToMyself(t *testing.T) { ) require.NoError(t, err) - err = chainRouter.Initialize( + require.NoError(t, chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -510,8 +508,7 @@ func TestReliableMessagesToMyself(t *testing.T) { router.HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(t, err) + )) ctx := snow.DefaultConsensusContextTest() diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index d090c6e5f46..2c9e54bd3b5 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -642,29 +642,22 @@ func TestServiceGetAllBalances(t *testing.T) { } func TestServiceGetTx(t *testing.T) { + require := require.New(t) _, vm, s, _, genesisTx := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() txID := genesisTx.ID() reply := api.GetTxReply{} - err := s.GetTx(nil, &api.GetTxArgs{ + require.NoError(s.GetTx(nil, &api.GetTxArgs{ TxID: txID, - }, &reply) - require.NoError(t, err) - if err != nil { - t.Fatal(err) - } + }, &reply)) txBytes, err := formatting.Decode(reply.Encoding, reply.Tx.(string)) - if err != nil { - t.Fatal(err) - } - require.Equal(t, genesisTx.Bytes(), txBytes, "Wrong tx returned from service.GetTx") + require.NoError(err) + require.Equal(genesisTx.Bytes(), txBytes) } func TestServiceGetTxJSON_BaseTx(t *testing.T) { @@ -1022,7 +1015,7 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager, @@ -1045,8 +1038,7 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { }, }, &common.SenderTest{T: t}, - ) - require.NoError(err) + )) vm.batchTimeout = 0 require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) @@ -1054,7 +1046,7 @@ func TestServiceGetTxJSON_OperationTxWithSecpMintOp(t *testing.T) { key := keys[0] createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) + _, err := vm.IssueTx(createAssetTx.Bytes()) require.NoError(err) mintSecpOpTx := buildOperationTxWithOp(buildSecpMintOp(createAssetTx, key, 0)) @@ -1206,7 +1198,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager, @@ -1229,8 +1221,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { }, }, &common.SenderTest{T: t}, - ) - require.NoError(err) + )) vm.batchTimeout = 0 require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) @@ -1238,7 +1229,7 @@ func TestServiceGetTxJSON_OperationTxWithPropertyFxMintOp(t *testing.T) { key := keys[0] createAssetTx := newAvaxCreateAssetTxWithOutputs(t, vm) - _, err = vm.IssueTx(createAssetTx.Bytes()) + _, err := vm.IssueTx(createAssetTx.Bytes()) require.NoError(err) mintPropertyFxOpTx := buildOperationTxWithOp(buildPropertyFxMintOp(createAssetTx, key, 4)) diff --git a/vms/avm/txs/base_tx_test.go b/vms/avm/txs/base_tx_test.go index 0259c3eb525..f61f35ee60e 100644 --- a/vms/avm/txs/base_tx_test.go +++ b/vms/avm/txs/base_tx_test.go @@ -186,14 +186,13 @@ func TestBaseTxSerialization(t *testing.T) { 0xc8, 0x06, 0xd7, 0x43, 0x00, } - err = tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( parser.Codec(), [][]*secp256k1.PrivateKey{ {keys[0], keys[0]}, {keys[0], keys[0]}, }, - ) - require.NoError(err) + )) require.Equal(tx.ID().String(), "QnTUuie2qe6BKyYrC2jqd73bJ828QNhYnZbdA2HWsnVRPjBfV") // there are two credentials diff --git a/vms/avm/txs/executor/semantic_verifier_test.go b/vms/avm/txs/executor/semantic_verifier_test.go index 07600560c02..1eb3d2e3501 100644 --- a/vms/avm/txs/executor/semantic_verifier_test.go +++ b/vms/avm/txs/executor/semantic_verifier_test.go @@ -131,13 +131,12 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: nil, @@ -158,13 +157,12 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errAssetIDMismatch, @@ -190,13 +188,12 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errIncompatibleFx, @@ -215,13 +212,12 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[1]}, }, - ) - require.NoError(err) + )) return tx }, err: secp256k1fx.ErrWrongSig, @@ -239,13 +235,12 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: database.ErrNotFound, @@ -270,13 +265,12 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: secp256k1fx.ErrMismatchedAmounts, @@ -331,13 +325,12 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: database.ErrNotFound, @@ -360,13 +353,12 @@ func TestSemanticVerifierBaseTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &baseTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errNotAnAsset, @@ -508,13 +500,12 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: nil, @@ -535,13 +526,12 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errAssetIDMismatch, @@ -567,13 +557,12 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errIncompatibleFx, @@ -592,13 +581,12 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[1]}, }, - ) - require.NoError(err) + )) return tx }, err: secp256k1fx.ErrWrongSig, @@ -616,13 +604,12 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: database.ErrNotFound, @@ -647,13 +634,12 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: secp256k1fx.ErrMismatchedAmounts, @@ -708,13 +694,12 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: database.ErrNotFound, @@ -737,13 +722,12 @@ func TestSemanticVerifierExportTx(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err := tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) return tx }, err: errNotAnAsset, @@ -875,13 +859,12 @@ func TestSemanticVerifierExportTxDifferentSubnet(t *testing.T) { tx := &txs.Tx{ Unsigned: &exportTx, } - err = tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( codec, [][]*secp256k1.PrivateKey{ {keys[0]}, }, - ) - require.NoError(err) + )) err = tx.Unsigned.Visit(&SemanticVerifier{ Backend: backend, diff --git a/vms/avm/txs/export_tx_test.go b/vms/avm/txs/export_tx_test.go index c02504e9e07..a7c1ed16196 100644 --- a/vms/avm/txs/export_tx_test.go +++ b/vms/avm/txs/export_tx_test.go @@ -168,14 +168,13 @@ func TestExportTxSerialization(t *testing.T) { 0x8f, 0xe0, 0x2a, 0xf3, 0xcc, 0x31, 0x32, 0xef, 0xfe, 0x7d, 0x3d, 0x9f, 0x14, 0x94, 0x01, } - err = tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( parser.Codec(), [][]*secp256k1.PrivateKey{ {keys[0], keys[0]}, {keys[0], keys[0]}, }, - ) - require.NoError(err) + )) require.Equal(tx.ID().String(), "2oG52e7Cb7XF1yUzv3pRFndAypgbpswWRcSAKD5SH5VgaiTm5D") // there are two credentials diff --git a/vms/avm/txs/import_tx_test.go b/vms/avm/txs/import_tx_test.go index 47c1eb01201..4172a404779 100644 --- a/vms/avm/txs/import_tx_test.go +++ b/vms/avm/txs/import_tx_test.go @@ -168,14 +168,13 @@ func TestImportTxSerialization(t *testing.T) { 0x1f, 0x49, 0x9b, 0x0a, 0x4f, 0xbf, 0x95, 0xfc, 0x31, 0x39, 0x46, 0x4e, 0xa1, 0xaf, 0x00, } - err = tx.SignSECP256K1Fx( + require.NoError(tx.SignSECP256K1Fx( parser.Codec(), [][]*secp256k1.PrivateKey{ {keys[0], keys[0]}, {keys[0], keys[0]}, }, - ) - require.NoError(err) + )) require.Equal(tx.ID().String(), "pCW7sVBytzdZ1WrqzGY1DvA2S9UaMr72xpUMxVyx1QHBARNYx") // there are two credentials diff --git a/vms/avm/vm_regression_test.go b/vms/avm/vm_regression_test.go index de148c05754..1e9f0f6dd67 100644 --- a/vms/avm/vm_regression_test.go +++ b/vms/avm/vm_regression_test.go @@ -35,7 +35,7 @@ func TestVerifyFxUsage(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, manager.NewMemDB(version.Semantic1_0_0), @@ -54,8 +54,7 @@ func TestVerifyFxUsage(t *testing.T) { }, }, nil, - ) - require.NoError(err) + )) vm.batchTimeout = 0 require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) @@ -98,7 +97,7 @@ func TestVerifyFxUsage(t *testing.T) { }} require.NoError(vm.parser.InitializeTx(createAssetTx)) - _, err = vm.IssueTx(createAssetTx.Bytes()) + _, err := vm.IssueTx(createAssetTx.Bytes()) require.NoError(err) mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ diff --git a/vms/platformvm/blocks/builder/network_test.go b/vms/platformvm/blocks/builder/network_test.go index 29793b4b544..365fc130553 100644 --- a/vms/platformvm/blocks/builder/network_test.go +++ b/vms/platformvm/blocks/builder/network_test.go @@ -100,9 +100,8 @@ func TestMempoolInvalidGossipedTxIsNotAddedToMempool(t *testing.T) { msgBytes, err := message.Build(&msg) require.NoError(err) env.ctx.Lock.Unlock() - err = env.AppGossip(context.Background(), nodeID, msgBytes) + require.NoError(env.AppGossip(context.Background(), nodeID, msgBytes)) env.ctx.Lock.Lock() - require.NoError(err) require.False(env.Builder.Has(txID)) } diff --git a/vms/platformvm/blocks/builder/standard_block_test.go b/vms/platformvm/blocks/builder/standard_block_test.go index 8fa9c71682c..827d7357728 100644 --- a/vms/platformvm/blocks/builder/standard_block_test.go +++ b/vms/platformvm/blocks/builder/standard_block_test.go @@ -54,7 +54,7 @@ func TestAtomicTxImports(t *testing.T) { require.NoError(err) inputID := utxo.InputID() - err = peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ env.ctx.ChainID: {PutRequests: []*atomic.Element{{ Key: inputID[:], Value: utxoBytes, @@ -62,8 +62,7 @@ func TestAtomicTxImports(t *testing.T) { recipientKey.PublicKey().Address().Bytes(), }, }}}, - }) - require.NoError(err) + })) tx, err := env.txBuilder.NewImportTx( env.ctx.XChainID, diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 94b34479d7e..bed97713562 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -192,7 +192,7 @@ func TestGetTxStatus(t *testing.T) { require.NoError(err) inputID := utxo.InputID() - err = peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ service.vm.ctx.ChainID: { PutRequests: []*atomic.Element{ { @@ -204,8 +204,7 @@ func TestGetTxStatus(t *testing.T) { }, }, }, - }) - require.NoError(err) + })) oldSharedMemory := mutableSharedMemory.SharedMemory mutableSharedMemory.SharedMemory = sm diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index 3300751b6fa..c4aac439c10 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -72,7 +72,7 @@ func TestNewImportTx(t *testing.T) { require.NoError(t, err) inputID := utxo.InputID() - err = peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + require.NoError(t, peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ env.ctx.ChainID: { PutRequests: []*atomic.Element{ { @@ -84,9 +84,7 @@ func TestNewImportTx(t *testing.T) { }, }, }, - }, - ) - require.NoError(t, err) + })) } return sm diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index e1e2f33028e..6a81de2d634 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -590,7 +590,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require.NoError(err) inputID := utxo.InputID() - err = peerSharedMemory.Apply( + require.NoError(peerSharedMemory.Apply( map[ids.ID]*atomic.Requests{ vm.ctx.ChainID: { PutRequests: []*atomic.Element{ @@ -601,8 +601,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { }, }, }, - ) - require.NoError(err) + )) // Because the shared memory UTXO has now been populated, the block should // pass verification. @@ -839,7 +838,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) inputID := utxo.InputID() - err = peerSharedMemory.Apply( + require.NoError(peerSharedMemory.Apply( map[ids.ID]*atomic.Requests{ vm.ctx.ChainID: { PutRequests: []*atomic.Element{ @@ -850,8 +849,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { }, }, }, - ) - require.NoError(err) + )) // Because the shared memory UTXO has now been populated, the block should // pass verification. diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 109309fe968..5e10d73857c 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -1326,7 +1326,7 @@ func TestAtomicImport(t *testing.T) { require.NoError(err) inputID := utxo.InputID() - err = peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{ vm.ctx.ChainID: { PutRequests: []*atomic.Element{ { @@ -1339,8 +1339,7 @@ func TestAtomicImport(t *testing.T) { }, }, }, - ) - require.NoError(err) + )) tx, err := vm.txBuilder.NewImportTx( vm.ctx.XChainID, @@ -1464,7 +1463,7 @@ func TestRestartFullyAccepted(t *testing.T) { firstCtx.Lock.Lock() firstMsgChan := make(chan common.Message, 1) - err := firstVM.Initialize( + require.NoError(firstVM.Initialize( context.Background(), firstCtx, firstDB, @@ -1474,8 +1473,7 @@ func TestRestartFullyAccepted(t *testing.T) { firstMsgChan, nil, nil, - ) - require.NoError(err) + )) genesisID, err := firstVM.LastAccepted(context.Background()) require.NoError(err) @@ -1549,7 +1547,7 @@ func TestRestartFullyAccepted(t *testing.T) { secondDB := db.NewPrefixDBManager([]byte{}) secondMsgChan := make(chan common.Message, 1) - err = secondVM.Initialize( + require.NoError(secondVM.Initialize( context.Background(), secondCtx, secondDB, @@ -1559,8 +1557,7 @@ func TestRestartFullyAccepted(t *testing.T) { secondMsgChan, nil, nil, - ) - require.NoError(err) + )) lastAccepted, err := secondVM.LastAccepted(context.Background()) require.NoError(err) @@ -1609,7 +1606,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { ctx.Lock.Lock() msgChan := make(chan common.Message, 1) - err = vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, vmDBManager, @@ -1619,8 +1616,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { msgChan, nil, nil, - ) - require.NoError(err) + )) preferred, err := vm.Builder.Preferred() require.NoError(err) @@ -1689,7 +1685,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { mc, err := message.NewCreator(logging.NoLog{}, metrics, "dummyNamespace", constants.DefaultNetworkCompressionType, 10*time.Second) require.NoError(err) - err = chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, timeoutManager, @@ -1701,8 +1697,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { router.HealthConfig{}, "", prometheus.NewRegistry(), - ) - require.NoError(err) + )) externalSender := &sender.ExternalSenderTest{TB: t} externalSender.Default(true) @@ -2078,7 +2073,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { firstCtx.Lock.Lock() firstMsgChan := make(chan common.Message, 1) - err := firstVM.Initialize( + require.NoError(firstVM.Initialize( context.Background(), firstCtx, firstDB, @@ -2088,8 +2083,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { firstMsgChan, nil, nil, - ) - require.NoError(err) + )) initialClkTime := banffForkTime.Add(time.Second) firstVM.clock.Set(initialClkTime) @@ -2124,7 +2118,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { }() secondMsgChan := make(chan common.Message, 1) - err = secondVM.Initialize( + require.NoError(secondVM.Initialize( context.Background(), secondCtx, secondDB, @@ -2134,8 +2128,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { secondMsgChan, nil, nil, - ) - require.NoError(err) + )) secondVM.clock.Set(defaultValidateStartTime.Add(2 * defaultMinStakingDuration)) secondVM.uptimeManager.(uptime.TestManager).SetTime(defaultValidateStartTime.Add(2 * defaultMinStakingDuration)) diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index 01b03c3209e..1c9f339a37f 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -2006,7 +2006,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) // make sure that DBs are compressed correctly dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, dummyDBManager, @@ -2016,8 +2016,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { nil, nil, nil, - ) - require.NoError(err) + )) // Initialize shouldn't be called again coreVM.InitializeF = nil @@ -2215,7 +2214,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) // make sure that DBs are compressed correctly dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, dummyDBManager, @@ -2225,8 +2224,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { nil, nil, nil, - ) - require.NoError(err) + )) // Initialize shouldn't be called again coreVM.InitializeF = nil diff --git a/x/merkledb/db_test.go b/x/merkledb/db_test.go index dcb56ce0514..6aa627ed330 100644 --- a/x/merkledb/db_test.go +++ b/x/merkledb/db_test.go @@ -747,13 +747,12 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest) { } rangeProof, err := db.GetRangeProofAtRoot(context.Background(), root, step.key, step.value, 100) require.NoError(err) - err = rangeProof.Verify( + require.NoError(rangeProof.Verify( context.Background(), step.key, step.value, root, - ) - require.NoError(err) + )) require.LessOrEqual(len(rangeProof.KeyValues), 100) case opGenerateChangeProof: root, err := db.GetMerkleRoot(context.Background()) @@ -769,14 +768,13 @@ func runRandDBTest(require *require.Assertions, r *rand.Rand, rt randTest) { require.NoError(err) changeProofDB, err := getBasicDB() require.NoError(err) - err = changeProof.Verify( + require.NoError(changeProof.Verify( context.Background(), changeProofDB, step.key, step.value, root, - ) - require.NoError(err) + )) require.LessOrEqual(len(changeProof.KeyChanges), 100) case opWriteBatch: oldRoot, err := db.GetMerkleRoot(context.Background()) diff --git a/x/merkledb/history_test.go b/x/merkledb/history_test.go index f3c7abfc83b..951efb3c76c 100644 --- a/x/merkledb/history_test.go +++ b/x/merkledb/history_test.go @@ -225,13 +225,12 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { origProof, err := db.GetRangeProof(context.Background(), []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(origProof) - err = origProof.Verify( + require.NoError(origProof.Verify( context.Background(), []byte("k"), []byte("key3"), origRootID, - ) - require.NoError(err) + )) // write a new value into the db, now there should be 2 roots in the history batch = db.NewBatch() @@ -242,13 +241,12 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, []byte("k"), []byte("key3"), 10) require.NoError(err) require.NotNil(newProof) - err = newProof.Verify( + require.NoError(newProof.Verify( context.Background(), []byte("k"), []byte("key3"), origRootID, - ) - require.NoError(err) + )) // trigger a new root to be added to the history, which should cause rollover since there can only be 2 batch = db.NewBatch() diff --git a/x/merkledb/proof_test.go b/x/merkledb/proof_test.go index cb5f03cf465..0f1a41582dd 100644 --- a/x/merkledb/proof_test.go +++ b/x/merkledb/proof_test.go @@ -227,13 +227,12 @@ func Test_RangeProof_Extra_Value(t *testing.T) { require.NoError(t, err) require.NotNil(t, proof) - err = proof.Verify( + require.NoError(t, proof.Verify( context.Background(), []byte{1}, []byte{5, 5}, db.root.id, - ) - require.NoError(t, err) + )) proof.KeyValues = append(proof.KeyValues, KeyValue{Key: []byte{5}, Value: []byte{5}}) @@ -556,13 +555,12 @@ func Test_RangeProof(t *testing.T) { // only a single node here since others are duplicates in endproof require.Equal([]byte{1}, proof.StartProof[0].KeyPath.Value) - err = proof.Verify( + require.NoError(proof.Verify( context.Background(), []byte{1}, []byte{3, 5}, db.root.id, - ) - require.NoError(err) + )) } func Test_RangeProof_BadBounds(t *testing.T) { @@ -605,13 +603,12 @@ func Test_RangeProof_NilStart(t *testing.T) { require.Equal(t, SerializedPath{Value: []uint8{0x6b, 0x65, 0x79, 0x30}, NibbleLength: 7}, proof.EndProof[1].KeyPath) require.Equal(t, newPath([]byte("")).Serialize(), proof.EndProof[0].KeyPath) - err = proof.Verify( + require.NoError(t, proof.Verify( context.Background(), nil, []byte("key35"), db.root.id, - ) - require.NoError(t, err) + )) } func Test_RangeProof_NilEnd(t *testing.T) { @@ -638,13 +635,12 @@ func Test_RangeProof_NilEnd(t *testing.T) { require.Equal(t, []byte{0}, proof.EndProof[1].KeyPath.Value) require.Equal(t, []byte{2}, proof.EndProof[2].KeyPath.Value) - err = proof.Verify( + require.NoError(t, proof.Verify( context.Background(), []byte{1}, nil, db.root.id, - ) - require.NoError(t, err) + )) } func Test_RangeProof_EmptyValues(t *testing.T) { @@ -679,13 +675,12 @@ func Test_RangeProof_EmptyValues(t *testing.T) { require.Equal(t, newPath([]byte("key2")).Serialize(), proof.EndProof[2].KeyPath) require.Equal(t, newPath([]byte{}).Serialize(), proof.EndProof[0].KeyPath) - err = proof.Verify( + require.NoError(t, proof.Verify( context.Background(), []byte("key1"), []byte("key2"), db.root.id, - ) - require.NoError(t, err) + )) } func Test_RangeProof_Marshal_Nil(t *testing.T) { diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index 744c6bc16d1..71b6a1e3b40 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -583,12 +583,11 @@ func TestFindNextKeyRandom(t *testing.T) { // Commit the proof to the local database as we do // in the actual syncer. - err = localDB.CommitRangeProof( + require.NoError(localDB.CommitRangeProof( context.Background(), rangeStart, remoteProof, - ) - require.NoError(err) + )) localProof, err := localDB.GetProof( context.Background(), From e48fbb53c05f6b2f05eaf8f4206da43e502ef92d Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 17:10:11 -0400 Subject: [PATCH 34/79] nit --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 5f87309bd1a..0281efd1bc4 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -117,7 +117,7 @@ function test_require_nil { } function test_require_no_error_inline_func { - if grep -R -zo -P '\t+err :?= ((?!require).|\n)*require\.NoError\((t, )?err\)' .; then + if ggrep -R -zo -P '\t+err :?= ((?!require|panic).|\n)*require\.NoError\((t, )?err\)' .; then echo "" echo "Checking that a function with a single error return doesn't error should be done in-line." echo "" From 9de44644e8cfe31a6d2ad8fa011849b71a775aca Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 17:17:59 -0400 Subject: [PATCH 35/79] add linter --- scripts/lint.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 0281efd1bc4..7f8cca062c1 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -21,7 +21,7 @@ fi # by default, "./scripts/lint.sh" runs all lint tests # to run only "license_header" test # TESTS='license_header' ./scripts/lint.sh -TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_equal_zero require_len_zero require_equal_len require_nil require_no_error_inline_func"} +TESTS=${TESTS:-"golangci_lint license_header require_error_is_no_funcs_as_params single_import interface_compliance_nil require_equal_zero require_len_zero require_equal_len require_nil require_no_error_inline_func require_equal_error"} function test_golangci_lint { go install -v github.com/golangci/golangci-lint/cmd/golangci-lint@v1.51.2 @@ -125,6 +125,15 @@ function test_require_no_error_inline_func { fi } +function test_require_equal_error { + if grep -R -o -P 'require.Equal.+?err(\)|,)' .; then + echo "" + echo "Use require.ErrorIs instead of require.Equal when testing for error." + echo "" + return 1 + fi +} + # Ref: https://go.dev/doc/effective_go#blank_implements function test_interface_compliance_nil { if grep -R -o -P '_ .+? = &.+?\{\}' .; then From a81f769f15e9aa4027a2249f10ef58b01399d1bc Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 17:21:29 -0400 Subject: [PATCH 36/79] fix --- database/linkeddb/linkeddb_test.go | 8 ++++---- database/test_database.go | 18 +++++++++--------- scripts/lint.sh | 2 +- vms/components/avax/utxo_state_test.go | 6 +++--- vms/platformvm/blocks/executor/backend_test.go | 2 +- .../stakeable/stakeable_lock_test.go | 4 ++-- vms/platformvm/state/diff_test.go | 8 ++++---- vms/rpcchainvm/state_syncable_vm_test.go | 8 ++++---- 8 files changed, 28 insertions(+), 28 deletions(-) diff --git a/database/linkeddb/linkeddb_test.go b/database/linkeddb/linkeddb_test.go index c7b24693eaa..0c3d1861a26 100644 --- a/database/linkeddb/linkeddb_test.go +++ b/database/linkeddb/linkeddb_test.go @@ -26,7 +26,7 @@ func TestLinkedDB(t *testing.T) { require.False(has, "db unexpectedly had key %s", key) _, err = ldb.Get(key) - require.Equal(database.ErrNotFound, err, "Expected db.Get to return a Not Found error.") + require.ErrorIs(err, database.ErrNotFound, "Expected db.Get to return a Not Found error.") require.NoError(ldb.Delete(key)) @@ -47,7 +47,7 @@ func TestLinkedDB(t *testing.T) { require.False(has, "db unexpectedly had key %s", key) _, err = ldb.Get(key) - require.Equal(database.ErrNotFound, err, "Expected db.Get to return a Not Found error.") + require.ErrorIs(err, database.ErrNotFound, "Expected db.Get to return a Not Found error.") iterator := db.NewIterator() next := iterator.Next() @@ -401,7 +401,7 @@ func TestLinkedDBHeadKey(t *testing.T) { ldb := NewDefault(db) _, err := ldb.HeadKey() - require.Equal(database.ErrNotFound, err) + require.ErrorIs(err, database.ErrNotFound) key0 := []byte("hello0") value0 := []byte("world0") @@ -434,7 +434,7 @@ func TestLinkedDBHead(t *testing.T) { ldb := NewDefault(db) _, _, err := ldb.Head() - require.Equal(database.ErrNotFound, err) + require.ErrorIs(err, database.ErrNotFound) key0 := []byte("hello0") value0 := []byte("world0") diff --git a/database/test_database.go b/database/test_database.go index 1fcaeb49da6..e3c05892749 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -75,7 +75,7 @@ func TestSimpleKeyValue(t *testing.T, db Database) { require.False(has) _, err = db.Get(key) - require.Equal(ErrNotFound, err) + require.ErrorIs(err, ErrNotFound) require.NoError(db.Delete(key)) require.NoError(db.Put(key, value)) @@ -95,7 +95,7 @@ func TestSimpleKeyValue(t *testing.T, db Database) { require.False(has) _, err = db.Get(key) - require.Equal(ErrNotFound, err) + require.ErrorIs(err, ErrNotFound) require.NoError(db.Delete(key)) } @@ -107,7 +107,7 @@ func TestKeyEmptyValue(t *testing.T, db Database) { val := []byte(nil) _, err := db.Get(key) - require.Equal(ErrNotFound, err) + require.ErrorIs(err, ErrNotFound) require.NoError(db.Put(key, val)) @@ -128,7 +128,7 @@ func TestEmptyKey(t *testing.T, db Database) { // Test that nil key can be retrieved by empty key _, err := db.Get(nilKey) - require.Equal(ErrNotFound, err) + require.ErrorIs(err, ErrNotFound) require.NoError(db.Put(nilKey, val1)) @@ -157,7 +157,7 @@ func TestSimpleKeyValueClosed(t *testing.T, db Database) { require.False(has) _, err = db.Get(key) - require.Equal(ErrNotFound, err) + require.ErrorIs(err, ErrNotFound) require.NoError(db.Delete(key)) require.NoError(db.Put(key, value)) @@ -173,10 +173,10 @@ func TestSimpleKeyValueClosed(t *testing.T, db Database) { require.NoError(db.Close()) _, err = db.Has(key) - require.Equal(ErrClosed, err) + require.ErrorIs(err, ErrClosed) _, err = db.Get(key) - require.Equal(ErrClosed, err) + require.ErrorIs(err, ErrClosed) require.Equal(ErrClosed, db.Put(key, value)) require.Equal(ErrClosed, db.Delete(key)) @@ -287,7 +287,7 @@ func TestBatchDelete(t *testing.T, db Database) { require.False(has) _, err = db.Get(key) - require.Equal(ErrNotFound, err) + require.ErrorIs(err, ErrNotFound) require.NoError(db.Delete(key)) } @@ -1160,6 +1160,6 @@ func FuzzKeyValue(f *testing.F, db Database) { require.False(exists) _, err = db.Get(key) - require.Equal(ErrNotFound, err) + require.ErrorIs(err, ErrNotFound) }) } diff --git a/scripts/lint.sh b/scripts/lint.sh index 7f8cca062c1..dbe069799cf 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -126,7 +126,7 @@ function test_require_no_error_inline_func { } function test_require_equal_error { - if grep -R -o -P 'require.Equal.+?err(\)|,)' .; then + if ggrep -R -o -P 'require.Equal.+?err(\)|,)' .; then echo "" echo "Use require.ErrorIs instead of require.Equal when testing for error." echo "" diff --git a/vms/components/avax/utxo_state_test.go b/vms/components/avax/utxo_state_test.go index 864d31f3565..0444285607f 100644 --- a/vms/components/avax/utxo_state_test.go +++ b/vms/components/avax/utxo_state_test.go @@ -58,10 +58,10 @@ func TestUTXOState(t *testing.T) { s := NewUTXOState(db, manager) _, err := s.GetUTXO(utxoID) - require.Equal(database.ErrNotFound, err) + require.ErrorIs(err, database.ErrNotFound) _, err = s.GetUTXO(utxoID) - require.Equal(database.ErrNotFound, err) + require.ErrorIs(err, database.ErrNotFound) require.NoError(s.DeleteUTXO(utxoID)) @@ -78,7 +78,7 @@ func TestUTXOState(t *testing.T) { require.NoError(s.DeleteUTXO(utxoID)) _, err = s.GetUTXO(utxoID) - require.Equal(database.ErrNotFound, err) + require.ErrorIs(err, database.ErrNotFound) require.NoError(s.PutUTXO(utxo)) diff --git a/vms/platformvm/blocks/executor/backend_test.go b/vms/platformvm/blocks/executor/backend_test.go index 63d30873e85..c48e28774f2 100644 --- a/vms/platformvm/blocks/executor/backend_test.go +++ b/vms/platformvm/blocks/executor/backend_test.go @@ -100,7 +100,7 @@ func TestBackendGetBlock(t *testing.T) { blkID := ids.GenerateTestID() state.EXPECT().GetStatelessBlock(blkID).Return(nil, choices.Unknown, database.ErrNotFound) _, err := b.GetBlock(blkID) - require.Equal(database.ErrNotFound, err) + require.ErrorIs(err, database.ErrNotFound) } { diff --git a/vms/platformvm/stakeable/stakeable_lock_test.go b/vms/platformvm/stakeable/stakeable_lock_test.go index 5a6cfce5d8a..f1e5ec78183 100644 --- a/vms/platformvm/stakeable/stakeable_lock_test.go +++ b/vms/platformvm/stakeable/stakeable_lock_test.go @@ -70,7 +70,7 @@ func TestLockOutVerify(t *testing.T) { Locktime: tt.locktime, TransferableOut: tt.transferableOutF(ctrl), } - require.Equal(t, tt.expectedErr, lockOut.Verify()) + require.ErrorIs(t, lockOut.Verify(), tt.expectedErr) }) } } @@ -129,7 +129,7 @@ func TestLockInVerify(t *testing.T) { Locktime: tt.locktime, TransferableIn: tt.transferableInF(ctrl), } - require.Equal(t, tt.expectedErr, lockOut.Verify()) + require.ErrorIs(t, lockOut.Verify(), tt.expectedErr) }) } } diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index a91ee816d0a..c6668ffa92c 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -476,14 +476,14 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedCurrentStakerIterator, expectedErr := expected.GetCurrentStakerIterator() actualCurrentStakerIterator, actualErr := actual.GetCurrentStakerIterator() - require.Equal(t, expectedErr, actualErr) + require.ErrorIs(t, actualErr, expectedErr) if expectedErr == nil { assertIteratorsEqual(t, expectedCurrentStakerIterator, actualCurrentStakerIterator) } expectedPendingStakerIterator, expectedErr := expected.GetPendingStakerIterator() actualPendingStakerIterator, actualErr := actual.GetPendingStakerIterator() - require.Equal(t, expectedErr, actualErr) + require.ErrorIs(t, actualErr, expectedErr) if expectedErr == nil { assertIteratorsEqual(t, expectedPendingStakerIterator, actualPendingStakerIterator) } @@ -500,7 +500,7 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedSubnets, expectedErr := expected.GetSubnets() actualSubnets, actualErr := actual.GetSubnets() - require.Equal(t, expectedErr, actualErr) + require.ErrorIs(t, actualErr, expectedErr) if expectedErr == nil { require.Equal(t, expectedSubnets, actualSubnets) @@ -509,7 +509,7 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedChains, expectedErr := expected.GetChains(subnetID) actualChains, actualErr := actual.GetChains(subnetID) - require.Equal(t, expectedErr, actualErr) + require.ErrorIs(t, actualErr, expectedErr) if expectedErr == nil { require.Equal(t, expectedChains, actualChains) } diff --git a/vms/rpcchainvm/state_syncable_vm_test.go b/vms/rpcchainvm/state_syncable_vm_test.go index 4be3941815e..4af9ed7727c 100644 --- a/vms/rpcchainvm/state_syncable_vm_test.go +++ b/vms/rpcchainvm/state_syncable_vm_test.go @@ -339,7 +339,7 @@ func TestGetOngoingSyncStateSummary(t *testing.T) { // test unimplemented case; this is just a guard _, err := vm.GetOngoingSyncStateSummary(context.Background()) - require.Equal(block.ErrStateSyncableVMNotImplemented, err) + require.ErrorIs(err, block.ErrStateSyncableVMNotImplemented) // test successful retrieval summary, err := vm.GetOngoingSyncStateSummary(context.Background()) @@ -364,7 +364,7 @@ func TestGetLastStateSummary(t *testing.T) { // test unimplemented case; this is just a guard _, err := vm.GetLastStateSummary(context.Background()) - require.Equal(block.ErrStateSyncableVMNotImplemented, err) + require.ErrorIs(err, block.ErrStateSyncableVMNotImplemented) // test successful retrieval summary, err := vm.GetLastStateSummary(context.Background()) @@ -389,7 +389,7 @@ func TestParseStateSummary(t *testing.T) { // test unimplemented case; this is just a guard _, err := vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) - require.Equal(block.ErrStateSyncableVMNotImplemented, err) + require.ErrorIs(err, block.ErrStateSyncableVMNotImplemented) // test successful parsing summary, err := vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) @@ -418,7 +418,7 @@ func TestGetStateSummary(t *testing.T) { // test unimplemented case; this is just a guard _, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) - require.Equal(block.ErrStateSyncableVMNotImplemented, err) + require.ErrorIs(err, block.ErrStateSyncableVMNotImplemented) // test successful retrieval summary, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) From 488d1a89899a59707b6d07dcb6607cdb7ba4102d Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 17:23:14 -0400 Subject: [PATCH 37/79] nit --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 0281efd1bc4..eee9b3b79cd 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -117,7 +117,7 @@ function test_require_nil { } function test_require_no_error_inline_func { - if ggrep -R -zo -P '\t+err :?= ((?!require|panic).|\n)*require\.NoError\((t, )?err\)' .; then + if grep -R -zo -P '\t+err :?= ((?!require|panic).|\n)*require\.NoError\((t, )?err\)' .; then echo "" echo "Checking that a function with a single error return doesn't error should be done in-line." echo "" From f17e6743b28bd2872c847bfb8d3fe6564637b44c Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 17:24:31 -0400 Subject: [PATCH 38/79] fix --- network/throttling/dial_throttler_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/throttling/dial_throttler_test.go b/network/throttling/dial_throttler_test.go index 2eaa32206ab..f8e33846e0a 100644 --- a/network/throttling/dial_throttler_test.go +++ b/network/throttling/dial_throttler_test.go @@ -102,7 +102,7 @@ func TestNoDialThrottler(t *testing.T) { throttler := NewNoDialThrottler() for i := 0; i < 250; i++ { startTime := time.Now() - require.NoError(t, throttler.Acquire(context.Background()) // Should always immediately return) + require.NoError(t, throttler.Acquire(context.Background())) // Should always immediately return require.WithinDuration(t, time.Now(), startTime, 25*time.Millisecond) } } From 52f623e122f108688689971b666f64333a91d7b1 Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 17:26:18 -0400 Subject: [PATCH 39/79] Update scripts/lint.sh Signed-off-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 002393f9162..4a575b74858 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -126,7 +126,7 @@ function test_require_no_error_inline_func { } function test_require_equal_error { - if ggrep -R -o -P 'require.Equal.+?err(\)|,)' .; then + if grep -R -o -P 'require.Equal.+?err(\)|,)' .; then echo "" echo "Use require.ErrorIs instead of require.Equal when testing for error." echo "" From b30352c39434ddb9c929011f8d5242600252ce65 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 17:33:25 -0400 Subject: [PATCH 40/79] nit --- vms/platformvm/stakeable/stakeable_lock_test.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/stakeable/stakeable_lock_test.go b/vms/platformvm/stakeable/stakeable_lock_test.go index f1e5ec78183..cb71b4e9ebd 100644 --- a/vms/platformvm/stakeable/stakeable_lock_test.go +++ b/vms/platformvm/stakeable/stakeable_lock_test.go @@ -70,7 +70,8 @@ func TestLockOutVerify(t *testing.T) { Locktime: tt.locktime, TransferableOut: tt.transferableOutF(ctrl), } - require.ErrorIs(t, lockOut.Verify(), tt.expectedErr) + err := lockOut.Verify() + require.ErrorIs(t, err, tt.expectedErr) }) } } @@ -129,7 +130,8 @@ func TestLockInVerify(t *testing.T) { Locktime: tt.locktime, TransferableIn: tt.transferableInF(ctrl), } - require.ErrorIs(t, lockOut.Verify(), tt.expectedErr) + err := lockOut.Verify() + require.ErrorIs(t, err, tt.expectedErr) }) } } From a96a2d1e8d2b4d8c2a86946d03839e96c603aeb9 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 18:37:10 -0400 Subject: [PATCH 41/79] fix regex --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index eee9b3b79cd..0bc73518947 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -117,7 +117,7 @@ function test_require_nil { } function test_require_no_error_inline_func { - if grep -R -zo -P '\t+err :?= ((?!require|panic).|\n)*require\.NoError\((t, )?err\)' .; then + if grep -R -zo -P '\t+err :?= ((?!require|if).|\n)*require\.NoError\((t, )?err\)' .; then echo "" echo "Checking that a function with a single error return doesn't error should be done in-line." echo "" From c7d6c759eec948a3f7a3ea6aef2547eea7613cf5 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat, 13 May 2023 19:20:17 -0400 Subject: [PATCH 42/79] Improve `snow/` tests with `require` --- .../snowball/binary_snowball_test.go | 155 +- .../snowball/binary_snowflake_test.go | 43 +- .../snowball/consensus_performance_test.go | 17 +- .../snowball/consensus_reversibility_test.go | 12 +- .../consensus/snowball/nnary_snowball_test.go | 126 +- .../snowball/nnary_snowflake_test.go | 113 +- snow/consensus/snowball/parameters_test.go | 68 +- .../consensus/snowball/unary_snowball_test.go | 49 +- .../snowball/unary_snowflake_test.go | 20 +- snow/consensus/snowman/consensus_test.go | 716 +++------ .../poll/early_term_no_traversal_test.go | 110 +- .../snowman/poll/no_early_term_test.go | 45 +- snow/consensus/snowman/poll/set_test.go | 213 ++- .../avalanche/bootstrap/bootstrapper_test.go | 32 +- snow/engine/avalanche/getter/getter_test.go | 48 +- snow/engine/avalanche/state/unique_vertex.go | 11 +- .../avalanche/state/unique_vertex_test.go | 201 +-- snow/engine/avalanche/vertex/heap_test.go | 68 +- snow/engine/avalanche/vertex/parser_test.go | 12 +- .../avalanche/vertex/stateless_vertex.go | 6 +- .../avalanche/vertex/stateless_vertex_test.go | 38 +- snow/engine/avalanche/vertex/test_builder.go | 5 +- snow/engine/avalanche/vertex/test_parser.go | 5 +- snow/engine/avalanche/vertex/test_storage.go | 11 +- snow/engine/avalanche/vertex/test_vm.go | 14 +- snow/engine/common/queue/jobs_test.go | 82 +- snow/engine/common/queue/test_job.go | 17 +- snow/engine/common/queue/test_parser.go | 5 +- snow/engine/common/requests_test.go | 70 +- snow/engine/common/test_bootstrap_tracker.go | 11 +- snow/engine/common/test_bootstrapable.go | 22 +- snow/engine/common/test_engine.go | 134 +- snow/engine/common/test_sender.go | 71 +- snow/engine/common/test_timer.go | 5 +- snow/engine/common/test_vm.go | 53 +- snow/engine/snowman/block/batched_vm_test.go | 16 +- snow/engine/snowman/block/test_batched_vm.go | 8 +- .../snowman/block/test_height_indexed_vm.go | 8 +- .../snowman/block/test_state_summary.go | 5 +- .../snowman/block/test_state_syncable_vm.go | 17 +- snow/engine/snowman/block/test_vm.go | 17 +- .../snowman/bootstrap/bootstrapper_test.go | 491 ++---- snow/engine/snowman/getter/getter_test.go | 60 +- .../snowman/syncer/state_syncer_test.go | 60 +- snow/engine/snowman/syncer/utils_test.go | 14 +- snow/engine/snowman/test_engine.go | 5 +- snow/engine/snowman/transitive_test.go | 1311 +++++------------ snow/events/blocker_test.go | 44 +- snow/networking/benchlist/benchlist_test.go | 200 ++- snow/networking/benchlist/test_benchable.go | 8 +- snow/networking/handler/engine_test.go | 4 +- snow/networking/handler/handler_test.go | 58 +- snow/networking/router/chain_router_test.go | 106 +- snow/networking/sender/sender_test.go | 33 +- snow/networking/timeout/manager_test.go | 8 +- .../tracker/resource_tracker_test.go | 36 +- snow/networking/tracker/targeter_test.go | 12 +- snow/uptime/locked_calculator_test.go | 2 +- .../gvalidators/validator_state_test.go | 10 +- snow/validators/manager_test.go | 15 +- snow/validators/set_test.go | 19 +- snow/validators/test_state.go | 14 +- 62 files changed, 2009 insertions(+), 3180 deletions(-) diff --git a/snow/consensus/snowball/binary_snowball_test.go b/snow/consensus/snowball/binary_snowball_test.go index 274138c1e1b..69f4e9f44f3 100644 --- a/snow/consensus/snowball/binary_snowball_test.go +++ b/snow/consensus/snowball/binary_snowball_test.go @@ -3,9 +3,15 @@ package snowball -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/require" +) func TestBinarySnowball(t *testing.T) { + require := require.New(t) + red := 0 blue := 1 @@ -13,47 +19,29 @@ func TestBinarySnowball(t *testing.T) { sb := binarySnowball{} sb.Initialize(beta, red) - - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(red) - - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Didn't finalized correctly") - } + require.Equal(blue, sb.Preference()) + require.True(sb.Finalized()) } func TestBinarySnowballRecordUnsuccessfulPoll(t *testing.T) { + require := require.New(t) + red := 0 blue := 1 @@ -61,46 +49,30 @@ func TestBinarySnowballRecordUnsuccessfulPoll(t *testing.T) { sb := binarySnowball{} sb.Initialize(beta, red) - - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordUnsuccessfulPoll() sb.RecordSuccessfulPoll(blue) - - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(blue, sb.Preference()) + require.True(sb.Finalized()) expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 3, SF(Confidence = 2, Finalized = true, SL(Preference = 1)))" - if str := sb.String(); str != expected { - t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) - } + require.Equal(expected, sb.String()) } func TestBinarySnowballAcceptWeirdColor(t *testing.T) { + require := require.New(t) + blue := 0 red := 1 @@ -109,53 +81,39 @@ func TestBinarySnowballAcceptWeirdColor(t *testing.T) { sb := binarySnowball{} sb.Initialize(beta, red) - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(red) sb.RecordUnsuccessfulPoll() - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(red) + sb.RecordUnsuccessfulPoll() - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(blue, sb.Preference()) + require.True(sb.Finalized()) expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 2, SF(Confidence = 2, Finalized = true, SL(Preference = 0)))" - if str := sb.String(); str != expected { - t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) - } + require.Equal(expected, sb.String()) } func TestBinarySnowballLockColor(t *testing.T) { + require := require.New(t) + red := 0 blue := 1 @@ -166,30 +124,19 @@ func TestBinarySnowballLockColor(t *testing.T) { sb.RecordSuccessfulPoll(red) - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(red, sb.Preference()) + require.True(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(red, sb.Preference()) + require.True(sb.Finalized()) sb.RecordSuccessfulPoll(blue) - if pref := sb.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(red, sb.Preference()) + require.True(sb.Finalized()) expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 2, SF(Confidence = 1, Finalized = true, SL(Preference = 0)))" - if str := sb.String(); str != expected { - t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) - } + require.Equal(expected, sb.String()) } diff --git a/snow/consensus/snowball/binary_snowflake_test.go b/snow/consensus/snowball/binary_snowflake_test.go index d93e81cacc1..fd6dea5c176 100644 --- a/snow/consensus/snowball/binary_snowflake_test.go +++ b/snow/consensus/snowball/binary_snowflake_test.go @@ -3,9 +3,15 @@ package snowball -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/require" +) func TestBinarySnowflake(t *testing.T) { + require := require.New(t) + blue := 0 red := 1 @@ -14,41 +20,26 @@ func TestBinarySnowflake(t *testing.T) { sf := binarySnowflake{} sf.Initialize(beta, red) - if pref := sf.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(blue) - if pref := sf.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(red) - if pref := sf.Preference(); pref != red { - t.Fatalf("Wrong preference. Expected %d got %d", red, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(red, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(blue) - if pref := sf.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(blue, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(blue) - if pref := sf.Preference(); pref != blue { - t.Fatalf("Wrong preference. Expected %d got %d", blue, pref) - } else if !sf.Finalized() { - t.Fatalf("Didn't finalized correctly") - } + require.Equal(blue, sf.Preference()) + require.True(sf.Finalized()) } diff --git a/snow/consensus/snowball/consensus_performance_test.go b/snow/consensus/snowball/consensus_performance_test.go index 16d1a924623..59ae6de287f 100644 --- a/snow/consensus/snowball/consensus_performance_test.go +++ b/snow/consensus/snowball/consensus_performance_test.go @@ -6,10 +6,14 @@ package snowball import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/utils/sampler" ) func TestSnowballOptimized(t *testing.T) { + require := require.New(t) + numColors := 10 numNodes := 100 params := Parameters{ @@ -42,16 +46,11 @@ func TestSnowballOptimized(t *testing.T) { numRounds++ } - if nBitwise.Disagreement() || nNaive.Disagreement() { - t.Fatalf("Network agreed on inconsistent values") - } + require.False(nBitwise.Disagreement()) + require.False(nNaive.Disagreement()) // Although this can theoretically fail with a correct implementation, it // shouldn't in practice - if !nBitwise.Finalized() { - t.Fatalf("Network agreed on values faster with naive implementation") - } - if !nBitwise.Agreement() { - t.Fatalf("Network agreed on inconsistent values") - } + require.True(nBitwise.Finalized()) + require.True(nBitwise.Agreement()) } diff --git a/snow/consensus/snowball/consensus_reversibility_test.go b/snow/consensus/snowball/consensus_reversibility_test.go index 7ce053c2d66..96c4315c5d9 100644 --- a/snow/consensus/snowball/consensus_reversibility_test.go +++ b/snow/consensus/snowball/consensus_reversibility_test.go @@ -6,10 +6,14 @@ package snowball import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/utils/sampler" ) func TestSnowballGovernance(t *testing.T) { + require := require.New(t) + numColors := 2 numNodes := 100 numByzantine := 10 @@ -28,9 +32,7 @@ func TestSnowballGovernance(t *testing.T) { } for _, node := range nBitwise.nodes { - if node.Preference() != nBitwise.colors[0] { - t.Fatalf("Wrong preferences") - } + require.Equal(nBitwise.colors[0], node.Preference()) } for i := 0; i < numNodes-numByzantine-numRed; i++ { @@ -49,8 +51,6 @@ func TestSnowballGovernance(t *testing.T) { if _, ok := node.(*Byzantine); ok { continue } - if node.Preference() != nBitwise.colors[0] { - t.Fatalf("Wrong preferences") - } + require.Equal(nBitwise.colors[0], node.Preference()) } } diff --git a/snow/consensus/snowball/nnary_snowball_test.go b/snow/consensus/snowball/nnary_snowball_test.go index af708ec79de..c8864fee57a 100644 --- a/snow/consensus/snowball/nnary_snowball_test.go +++ b/snow/consensus/snowball/nnary_snowball_test.go @@ -3,9 +3,15 @@ package snowball -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/require" +) func TestNnarySnowball(t *testing.T) { + require := require.New(t) + betaVirtuous := 2 betaRogue := 2 @@ -14,68 +20,46 @@ func TestNnarySnowball(t *testing.T) { sb.Add(Blue) sb.Add(Green) - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Blue) - - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) - - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Blue) - - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Blue) - - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Should be finalized") - } + require.Equal(Blue, sb.Preference()) + require.True(sb.Finalized()) } func TestVirtuousNnarySnowball(t *testing.T) { + require := require.New(t) + betaVirtuous := 1 betaRogue := 2 sb := nnarySnowball{} sb.Initialize(betaVirtuous, betaRogue, Red) - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) - - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sb.Finalized() { - t.Fatalf("Should be finalized") - } + require.Equal(Red, sb.Preference()) + require.True(sb.Finalized()) } func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { + require := require.New(t) + betaVirtuous := 2 betaRogue := 2 @@ -83,55 +67,39 @@ func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { sb.Initialize(betaVirtuous, betaRogue, Red) sb.Add(Blue) - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Blue) - - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordUnsuccessfulPoll() sb.RecordSuccessfulPoll(Blue) - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Blue, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Blue) - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(Blue, sb.Preference()) + require.True(sb.Finalized()) expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF(Confidence = 2, Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" - if str := sb.String(); str != expected { - t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) - } + require.Equal(expected, sb.String()) for i := 0; i < 4; i++ { sb.RecordSuccessfulPoll(Red) - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if !sb.Finalized() { - t.Fatalf("Finalized too late") - } + require.Equal(Blue, sb.Preference()) + require.True(sb.Finalized()) } } func TestNarySnowballDifferentSnowflakeColor(t *testing.T) { + require := require.New(t) + betaVirtuous := 2 betaRogue := 2 @@ -139,23 +107,15 @@ func TestNarySnowballDifferentSnowflakeColor(t *testing.T) { sb.Initialize(betaVirtuous, betaRogue, Red) sb.Add(Blue) - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Blue) - if pref := sb.nnarySnowflake.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } + require.Equal(Blue, sb.nnarySnowflake.Preference()) sb.RecordSuccessfulPoll(Red) - if pref := sb.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if pref := sb.nnarySnowflake.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } + require.Equal(Blue, sb.Preference()) + require.Equal(Red, sb.nnarySnowflake.Preference()) } diff --git a/snow/consensus/snowball/nnary_snowflake_test.go b/snow/consensus/snowball/nnary_snowflake_test.go index 5a958d55cf0..40c4f5e248e 100644 --- a/snow/consensus/snowball/nnary_snowflake_test.go +++ b/snow/consensus/snowball/nnary_snowflake_test.go @@ -3,9 +3,15 @@ package snowball -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/require" +) func TestNnarySnowflake(t *testing.T) { + require := require.New(t) + betaVirtuous := 2 betaRogue := 2 @@ -14,119 +20,74 @@ func TestNnarySnowflake(t *testing.T) { sf.Add(Blue) sf.Add(Green) - if pref := sf.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(Blue) - - if pref := sf.Preference(); Blue != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Blue, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(Red) - - if pref := sf.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sf.Preference()) + require.False(sf.Finalized()) sf.RecordSuccessfulPoll(Red) - - if pref := sf.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sf.Finalized() { - t.Fatalf("Should be finalized") - } + require.Equal(Red, sf.Preference()) + require.True(sf.Finalized()) sf.RecordSuccessfulPoll(Blue) - - if pref := sf.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sf.Finalized() { - t.Fatalf("Should be finalized") - } + require.Equal(Red, sf.Preference()) + require.True(sf.Finalized()) } func TestVirtuousNnarySnowflake(t *testing.T) { + require := require.New(t) + betaVirtuous := 2 betaRogue := 3 sb := nnarySnowflake{} sb.Initialize(betaVirtuous, betaRogue, Red) - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) - - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) - - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sb.Finalized() { - t.Fatalf("Should be finalized") - } + require.Equal(Red, sb.Preference()) + require.True(sb.Finalized()) } func TestRogueNnarySnowflake(t *testing.T) { + require := require.New(t) + betaVirtuous := 1 betaRogue := 2 sb := nnarySnowflake{} sb.Initialize(betaVirtuous, betaRogue, Red) - if sb.rogue { - t.Fatalf("Shouldn't be rogue") - } + require.False(sb.rogue) sb.Add(Red) - if sb.rogue { - t.Fatalf("Shouldn't be rogue") - } + require.False(sb.rogue) sb.Add(Blue) - if !sb.rogue { - t.Fatalf("Should be rogue") - } + require.True(sb.rogue) sb.Add(Red) - if !sb.rogue { - t.Fatalf("Should be rogue") - } + require.True(sb.rogue) - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) - - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sb.Finalized() { - t.Fatalf("Finalized too early") - } + require.Equal(Red, sb.Preference()) + require.False(sb.Finalized()) sb.RecordSuccessfulPoll(Red) - - if pref := sb.Preference(); Red != pref { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sb.Finalized() { - t.Fatalf("Should be finalized") - } + require.Equal(Red, sb.Preference()) + require.True(sb.Finalized()) } diff --git a/snow/consensus/snowball/parameters_test.go b/snow/consensus/snowball/parameters_test.go index 3fc2632cdc6..da75749115b 100644 --- a/snow/consensus/snowball/parameters_test.go +++ b/snow/consensus/snowball/parameters_test.go @@ -5,11 +5,14 @@ package snowball import ( "fmt" - "strings" "testing" + + "github.com/stretchr/testify/require" ) func TestParametersVerify(t *testing.T) { + require := require.New(t) + p := Parameters{ K: 1, Alpha: 1, @@ -21,12 +24,12 @@ func TestParametersVerify(t *testing.T) { MaxItemProcessingTime: 1, } - if err := p.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(p.Verify()) } func TestParametersAnotherVerify(t *testing.T) { + require := require.New(t) + p := Parameters{ K: 1, Alpha: 1, @@ -38,12 +41,12 @@ func TestParametersAnotherVerify(t *testing.T) { MaxItemProcessingTime: 1, } - if err := p.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(p.Verify()) } func TestParametersYetAnotherVerify(t *testing.T) { + require := require.New(t) + p := Parameters{ K: 1, Alpha: 1, @@ -55,9 +58,7 @@ func TestParametersYetAnotherVerify(t *testing.T) { MaxItemProcessingTime: 1, } - if err := p.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(p.Verify()) } func TestParametersInvalidK(t *testing.T) { @@ -72,9 +73,8 @@ func TestParametersInvalidK(t *testing.T) { MaxItemProcessingTime: 1, } - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid k") - } + err := p.Verify() + require.ErrorIs(t, err, ErrParametersInvalid) } func TestParametersInvalidAlpha(t *testing.T) { @@ -89,9 +89,8 @@ func TestParametersInvalidAlpha(t *testing.T) { MaxItemProcessingTime: 1, } - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid alpha") - } + err := p.Verify() + require.ErrorIs(t, err, ErrParametersInvalid) } func TestParametersInvalidBetaVirtuous(t *testing.T) { @@ -106,9 +105,8 @@ func TestParametersInvalidBetaVirtuous(t *testing.T) { MaxItemProcessingTime: 1, } - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid beta virtuous") - } + err := p.Verify() + require.ErrorIs(t, err, ErrParametersInvalid) } func TestParametersInvalidBetaRogue(t *testing.T) { @@ -123,9 +121,8 @@ func TestParametersInvalidBetaRogue(t *testing.T) { MaxItemProcessingTime: 1, } - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid beta rogue") - } + err := p.Verify() + require.ErrorIs(t, err, ErrParametersInvalid) } func TestParametersAnotherInvalidBetaRogue(t *testing.T) { @@ -140,11 +137,8 @@ func TestParametersAnotherInvalidBetaRogue(t *testing.T) { MaxItemProcessingTime: 1, } - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid beta rogue") - } else if !strings.Contains(err.Error(), "\n") { - t.Fatalf("Should have described the extensive error") - } + err := p.Verify() + require.ErrorIs(t, err, ErrParametersInvalid) } func TestParametersInvalidConcurrentRepolls(t *testing.T) { @@ -173,9 +167,8 @@ func TestParametersInvalidConcurrentRepolls(t *testing.T) { for _, p := range tests { label := fmt.Sprintf("ConcurrentRepolls=%d", p.ConcurrentRepolls) t.Run(label, func(t *testing.T) { - if err := p.Verify(); err == nil { - t.Error("Should have failed due to invalid concurrent repolls") - } + err := p.Verify() + require.ErrorIs(t, err, ErrParametersInvalid) }) } } @@ -192,9 +185,8 @@ func TestParametersInvalidOptimalProcessing(t *testing.T) { MaxItemProcessingTime: 1, } - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid optimal processing") - } + err := p.Verify() + require.ErrorIs(t, err, ErrParametersInvalid) } func TestParametersInvalidMaxOutstandingItems(t *testing.T) { @@ -209,9 +201,8 @@ func TestParametersInvalidMaxOutstandingItems(t *testing.T) { MaxItemProcessingTime: 1, } - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid max outstanding items") - } + err := p.Verify() + require.ErrorIs(t, err, ErrParametersInvalid) } func TestParametersInvalidMaxItemProcessingTime(t *testing.T) { @@ -226,7 +217,6 @@ func TestParametersInvalidMaxItemProcessingTime(t *testing.T) { MaxItemProcessingTime: 0, } - if err := p.Verify(); err == nil { - t.Fatalf("Should have failed due to invalid max item processing time") - } + err := p.Verify() + require.ErrorIs(t, err, ErrParametersInvalid) } diff --git a/snow/consensus/snowball/unary_snowball_test.go b/snow/consensus/snowball/unary_snowball_test.go index 54c5e47b443..99ba4ec81c4 100644 --- a/snow/consensus/snowball/unary_snowball_test.go +++ b/snow/consensus/snowball/unary_snowball_test.go @@ -10,13 +10,11 @@ import ( ) func UnarySnowballStateTest(t *testing.T, sb *unarySnowball, expectedNumSuccessfulPolls, expectedConfidence int, expectedFinalized bool) { - if numSuccessfulPolls := sb.numSuccessfulPolls; numSuccessfulPolls != expectedNumSuccessfulPolls { - t.Fatalf("Wrong numSuccessfulPolls. Expected %d got %d", expectedNumSuccessfulPolls, numSuccessfulPolls) - } else if confidence := sb.confidence; confidence != expectedConfidence { - t.Fatalf("Wrong confidence. Expected %d got %d", expectedConfidence, confidence) - } else if finalized := sb.Finalized(); finalized != expectedFinalized { - t.Fatalf("Wrong finalized status. Expected %v got %v", expectedFinalized, finalized) - } + require := require.New(t) + + require.Equal(expectedNumSuccessfulPolls, sb.numSuccessfulPolls) + require.Equal(expectedConfidence, sb.confidence) + require.Equal(expectedFinalized, sb.Finalized()) } func TestUnarySnowball(t *testing.T) { @@ -45,44 +43,27 @@ func TestUnarySnowball(t *testing.T) { binarySnowball := sbClone.Extend(beta, 0) expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0)))" - if result := binarySnowball.String(); result != expected { - t.Fatalf("Expected:\n%s\nReturned:\n%s", expected, result) - } + require.Equal(expected, binarySnowball.String()) binarySnowball.RecordUnsuccessfulPoll() for i := 0; i < 3; i++ { - if binarySnowball.Preference() != 0 { - t.Fatalf("Wrong preference") - } else if binarySnowball.Finalized() { - t.Fatalf("Should not have finalized") - } + require.Zero(binarySnowball.Preference()) + require.False(binarySnowball.Finalized()) binarySnowball.RecordSuccessfulPoll(1) binarySnowball.RecordUnsuccessfulPoll() } - if binarySnowball.Preference() != 1 { - t.Fatalf("Wrong preference") - } else if binarySnowball.Finalized() { - t.Fatalf("Should not have finalized") - } + require.Equal(1, binarySnowball.Preference()) + require.False(binarySnowball.Finalized()) binarySnowball.RecordSuccessfulPoll(1) - if binarySnowball.Preference() != 1 { - t.Fatalf("Wrong preference") - } else if binarySnowball.Finalized() { - t.Fatalf("Should not have finalized") - } + require.Equal(1, binarySnowball.Preference()) + require.False(binarySnowball.Finalized()) binarySnowball.RecordSuccessfulPoll(1) - - if binarySnowball.Preference() != 1 { - t.Fatalf("Wrong preference") - } else if !binarySnowball.Finalized() { - t.Fatalf("Should have finalized") - } + require.Equal(1, binarySnowball.Preference()) + require.True(binarySnowball.Finalized()) expected = "SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false))" - if str := sb.String(); str != expected { - t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) - } + require.Equal(expected, sb.String()) } diff --git a/snow/consensus/snowball/unary_snowflake_test.go b/snow/consensus/snowball/unary_snowflake_test.go index 850c3116b81..7694070ef90 100644 --- a/snow/consensus/snowball/unary_snowflake_test.go +++ b/snow/consensus/snowball/unary_snowflake_test.go @@ -10,11 +10,10 @@ import ( ) func UnarySnowflakeStateTest(t *testing.T, sf *unarySnowflake, expectedConfidence int, expectedFinalized bool) { - if confidence := sf.confidence; confidence != expectedConfidence { - t.Fatalf("Wrong confidence. Expected %d got %d", expectedConfidence, confidence) - } else if finalized := sf.Finalized(); finalized != expectedFinalized { - t.Fatalf("Wrong finalized status. Expected %v got %v", expectedFinalized, finalized) - } + require := require.New(t) + + require.Equal(expectedConfidence, sf.confidence) + require.Equal(expectedFinalized, sf.Finalized()) } func TestUnarySnowflake(t *testing.T) { @@ -46,17 +45,12 @@ func TestUnarySnowflake(t *testing.T) { binarySnowflake.RecordSuccessfulPoll(1) - if binarySnowflake.Finalized() { - t.Fatalf("Should not have finalized") - } + require.False(binarySnowflake.Finalized()) binarySnowflake.RecordSuccessfulPoll(1) - if binarySnowflake.Preference() != 1 { - t.Fatalf("Wrong preference") - } else if !binarySnowflake.Finalized() { - t.Fatalf("Should have finalized") - } + require.Equal(1, binarySnowflake.Preference()) + require.True(binarySnowflake.Finalized()) sf.RecordSuccessfulPoll() UnarySnowflakeStateTest(t, sf, 2, true) diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index 0affcf87155..d5e77647b4a 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -87,6 +87,8 @@ func getTestName(i interface{}) string { // Make sure that initialize sets the state correctly func InitializeTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -101,19 +103,16 @@ func InitializeTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) - if pref := sm.Preference(); pref != GenesisID { - t.Fatalf("Wrong preference returned") - } else if !sm.Finalized() { - t.Fatalf("Wrong should have marked the instance as being finalized") - } + require.Equal(GenesisID, sm.Preference()) + require.True(sm.Finalized()) } // Make sure that the number of processing blocks is tracked correctly func NumProcessingTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -127,9 +126,7 @@ func NumProcessingTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -140,32 +137,24 @@ func NumProcessingTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if numProcessing := sm.NumProcessing(); numProcessing != 0 { - t.Fatalf("expected %d blocks to be processing but returned %d", 0, numProcessing) - } + require.Zero(sm.NumProcessing()) // Adding to the previous preference will update the preference - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block)) - if numProcessing := sm.NumProcessing(); numProcessing != 1 { - t.Fatalf("expected %d blocks to be processing but returned %d", 1, numProcessing) - } + require.Equal(1, sm.NumProcessing()) votes := bag.Bag[ids.ID]{} votes.Add(block.ID()) - if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } + require.NoError(sm.RecordPoll(context.Background(), votes)) - if numProcessing := sm.NumProcessing(); numProcessing != 0 { - t.Fatalf("expected %d blocks to be processing but returned %d", 0, numProcessing) - } + require.Zero(sm.NumProcessing()) } // Make sure that adding a block to the tail updates the preference func AddToTailTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -179,9 +168,7 @@ func AddToTailTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -193,17 +180,15 @@ func AddToTailTest(t *testing.T, factory Factory) { } // Adding to the previous preference will update the preference - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != block.ID() { - t.Fatalf("Wrong preference. Expected %s, got %s", block.ID(), pref) - } else if !sm.IsPreferred(block) { - t.Fatalf("Should have marked %s as being Preferred", pref) - } + require.NoError(sm.Add(context.Background(), block)) + require.Equal(block.ID(), sm.Preference()) + require.True(sm.IsPreferred(block)) } // Make sure that adding a block not to the tail doesn't change the preference func AddToNonTailTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -217,9 +202,7 @@ func AddToNonTailTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) firstBlock := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -239,24 +222,20 @@ func AddToNonTailTest(t *testing.T, factory Factory) { } // Adding to the previous preference will update the preference - if err := sm.Add(context.Background(), firstBlock); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != firstBlock.IDV { - t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.IDV, pref) - } + require.NoError(sm.Add(context.Background(), firstBlock)) + require.Equal(firstBlock.IDV, sm.Preference()) // Adding to something other than the previous preference won't update the // preference - if err := sm.Add(context.Background(), secondBlock); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != firstBlock.IDV { - t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.IDV, pref) - } + require.NoError(sm.Add(context.Background(), secondBlock)) + require.Equal(firstBlock.IDV, sm.Preference()) } // Make sure that adding a block that is detached from the rest of the tree // rejects the block func AddToUnknownTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -270,9 +249,7 @@ func AddToUnknownTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) parent := &TestBlock{TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1), @@ -290,16 +267,14 @@ func AddToUnknownTest(t *testing.T, factory Factory) { // Adding a block with an unknown parent means the parent must have already // been rejected. Therefore the block should be immediately rejected - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != GenesisID { - t.Fatalf("Wrong preference. Expected %s, got %s", GenesisID, pref) - } else if status := block.Status(); status != choices.Rejected { - t.Fatalf("Should have rejected the block") - } + require.NoError(sm.Add(context.Background(), block)) + require.Equal(GenesisID, sm.Preference()) + require.Equal(choices.Rejected, block.Status()) } func StatusOrProcessingPreviouslyAcceptedTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -313,25 +288,17 @@ func StatusOrProcessingPreviouslyAcceptedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) - if Genesis.Status() != choices.Accepted { - t.Fatalf("Should have marked an accepted block as having been accepted") - } - if sm.Processing(Genesis.ID()) { - t.Fatalf("Shouldn't have marked an accepted block as having been processing") - } - if !sm.Decided(Genesis) { - t.Fatalf("Should have marked an accepted block as having been decided") - } - if !sm.IsPreferred(Genesis) { - t.Fatalf("Should have marked an accepted block as being preferred") - } + require.Equal(choices.Accepted, Genesis.Status()) + require.False(sm.Processing(Genesis.ID())) + require.True(sm.Decided(Genesis)) + require.True(sm.IsPreferred(Genesis)) } func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -345,9 +312,7 @@ func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -358,21 +323,15 @@ func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if block.Status() == choices.Accepted { - t.Fatalf("Shouldn't have marked a rejected block as having been accepted") - } - if sm.Processing(block.ID()) { - t.Fatalf("Shouldn't have marked a rejected block as having been processing") - } - if !sm.Decided(block) { - t.Fatalf("Should have marked a rejected block as having been decided") - } - if sm.IsPreferred(block) { - t.Fatalf("Shouldn't have marked a rejected block as being preferred") - } + require.NotEqual(choices.Accepted, block.Status()) + require.False(sm.Processing(block.ID())) + require.True(sm.Decided(block)) + require.False(sm.IsPreferred(block)) } func StatusOrProcessingUnissuedTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -386,9 +345,7 @@ func StatusOrProcessingUnissuedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -399,21 +356,15 @@ func StatusOrProcessingUnissuedTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if block.Status() == choices.Accepted { - t.Fatalf("Shouldn't have marked an unissued block as having been accepted") - } - if sm.Processing(block.ID()) { - t.Fatalf("Shouldn't have marked an unissued block as having been processing") - } - if sm.Decided(block) { - t.Fatalf("Should't have marked an unissued block as having been decided") - } - if sm.IsPreferred(block) { - t.Fatalf("Shouldn't have marked an unissued block as being preferred") - } + require.Equal(choices.Processing, block.Status()) + require.False(sm.Processing(block.ID())) + require.False(sm.Decided(block)) + require.False(sm.IsPreferred(block)) } func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -427,9 +378,7 @@ func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -440,24 +389,16 @@ func StatusOrProcessingIssuedTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } - if block.Status() == choices.Accepted { - t.Fatalf("Shouldn't have marked the block as accepted") - } - if !sm.Processing(block.ID()) { - t.Fatalf("Should have marked the block as processing") - } - if sm.Decided(block) { - t.Fatalf("Shouldn't have marked the block as decided") - } - if !sm.IsPreferred(block) { - t.Fatalf("Should have marked the tail as being preferred") - } + require.NoError(sm.Add(context.Background(), block)) + require.NotEqual(choices.Accepted, block.Status()) + require.True(sm.Processing(block.ID())) + require.False(sm.Decided(block)) + require.True(sm.IsPreferred(block)) } func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -471,9 +412,7 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -484,32 +423,24 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block)) votes := bag.Bag[ids.ID]{} votes.Add(block.ID()) - if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != block.ID() { - t.Fatalf("Preference returned the wrong block") - } else if sm.Finalized() { - t.Fatalf("Snowman instance finalized too soon") - } else if status := block.Status(); status != choices.Processing { - t.Fatalf("Block's status changed unexpectedly") - } else if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != block.ID() { - t.Fatalf("Preference returned the wrong block") - } else if !sm.Finalized() { - t.Fatalf("Snowman instance didn't finalize") - } else if status := block.Status(); status != choices.Accepted { - t.Fatalf("Block's status should have been set to accepted") - } + require.NoError(sm.RecordPoll(context.Background(), votes)) + require.Equal(block.ID(), sm.Preference()) + require.False(sm.Finalized()) + require.Equal(choices.Processing, block.Status()) + + require.NoError(sm.RecordPoll(context.Background(), votes)) + require.Equal(block.ID(), sm.Preference()) + require.True(sm.Finalized()) + require.Equal(choices.Accepted, block.Status()) } func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -523,9 +454,7 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) firstBlock := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -544,36 +473,23 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(context.Background(), firstBlock); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), secondBlock); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), firstBlock)) + require.NoError(sm.Add(context.Background(), secondBlock)) votes := bag.Bag[ids.ID]{} votes.Add(firstBlock.ID()) - if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != firstBlock.ID() { - t.Fatalf("Preference returned the wrong block") - } else if sm.Finalized() { - t.Fatalf("Snowman instance finalized too soon") - } else if status := firstBlock.Status(); status != choices.Processing { - t.Fatalf("Block's status changed unexpectedly") - } else if status := secondBlock.Status(); status != choices.Processing { - t.Fatalf("Block's status changed unexpectedly") - } else if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } else if pref := sm.Preference(); pref != firstBlock.ID() { - t.Fatalf("Preference returned the wrong block") - } else if !sm.Finalized() { - t.Fatalf("Snowman instance didn't finalize") - } else if status := firstBlock.Status(); status != choices.Accepted { - t.Fatalf("Block's status should have been set to accepted") - } else if status := secondBlock.Status(); status != choices.Rejected { - t.Fatalf("Block's status should have been set to rejected") - } + require.NoError(sm.RecordPoll(context.Background(), votes)) + require.Equal(firstBlock.ID(), sm.Preference()) + require.False(sm.Finalized()) + require.Equal(choices.Processing, firstBlock.Status()) + require.Equal(choices.Processing, secondBlock.Status()) + + require.NoError(sm.RecordPoll(context.Background(), votes)) + require.Equal(firstBlock.ID(), sm.Preference()) + require.True(sm.Finalized()) + require.Equal(choices.Accepted, firstBlock.Status()) + require.Equal(choices.Rejected, secondBlock.Status()) } func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { @@ -640,6 +556,8 @@ func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { } func RecordPollWhenFinalizedTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -653,22 +571,18 @@ func RecordPollWhenFinalizedTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) votes := bag.Bag[ids.ID]{} votes.Add(GenesisID) - if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } else if !sm.Finalized() { - t.Fatalf("Consensus should still be finalized") - } else if pref := sm.Preference(); GenesisID != pref { - t.Fatalf("Wrong preference listed") - } + require.NoError(sm.RecordPoll(context.Background(), votes)) + require.True(sm.Finalized()) + require.Equal(GenesisID, sm.Preference()) } func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -682,9 +596,7 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -711,13 +623,9 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { HeightV: block1.HeightV + 1, } - if err := sm.Add(context.Background(), block0); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block1); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block2); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(context.Background(), block2)) // Current graph structure: // G @@ -729,28 +637,22 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { votes := bag.Bag[ids.ID]{} votes.Add(block0.ID()) - if err := sm.RecordPoll(context.Background(), votes); err != nil { - t.Fatal(err) - } + require.NoError(sm.RecordPoll(context.Background(), votes)) // Current graph structure: // 0 // Tail = 0 - if !sm.Finalized() { - t.Fatalf("Finalized too late") - } else if pref := sm.Preference(); block0.ID() != pref { - t.Fatalf("Wrong preference listed") - } else if status := block0.Status(); status != choices.Accepted { - t.Fatalf("Wrong status returned") - } else if status := block1.Status(); status != choices.Rejected { - t.Fatalf("Wrong status returned") - } else if status := block2.Status(); status != choices.Rejected { - t.Fatalf("Wrong status returned") - } + require.True(sm.Finalized()) + require.Equal(block0.ID(), sm.Preference()) + require.Equal(choices.Accepted, block0.Status()) + require.Equal(choices.Rejected, block1.Status()) + require.Equal(choices.Rejected, block2.Status()) } func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -764,9 +666,7 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -801,15 +701,10 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { HeightV: block1.HeightV + 1, } - if err := sm.Add(context.Background(), block0); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block1); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block2); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block3); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.Add(context.Background(), block3)) // Current graph structure: // G @@ -820,55 +715,37 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { votesFor2 := bag.Bag[ids.ID]{} votesFor2.Add(block2.ID()) - if err := sm.RecordPoll(context.Background(), votesFor2); err != nil { - t.Fatal(err) - } else if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if pref := sm.Preference(); block2.ID() != pref { - t.Fatalf("Wrong preference listed") - } + require.NoError(sm.RecordPoll(context.Background(), votesFor2)) + require.False(sm.Finalized()) + require.Equal(block2.ID(), sm.Preference()) emptyVotes := bag.Bag[ids.ID]{} - if err := sm.RecordPoll(context.Background(), emptyVotes); err != nil { - t.Fatal(err) - } else if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if pref := sm.Preference(); block2.ID() != pref { - t.Fatalf("Wrong preference listed") - } else if err := sm.RecordPoll(context.Background(), votesFor2); err != nil { - t.Fatal(err) - } else if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if pref := sm.Preference(); block2.ID() != pref { - t.Fatalf("Wrong preference listed") - } + require.NoError(sm.RecordPoll(context.Background(), emptyVotes)) + require.False(sm.Finalized()) + require.Equal(block2.ID(), sm.Preference()) + + require.NoError(sm.RecordPoll(context.Background(), votesFor2)) + require.False(sm.Finalized()) + require.Equal(block2.ID(), sm.Preference()) votesFor3 := bag.Bag[ids.ID]{} votesFor3.Add(block3.ID()) - if err := sm.RecordPoll(context.Background(), votesFor3); err != nil { - t.Fatal(err) - } else if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if pref := sm.Preference(); block2.ID() != pref { - t.Fatalf("Wrong preference listed") - } else if err := sm.RecordPoll(context.Background(), votesFor3); err != nil { - t.Fatal(err) - } else if !sm.Finalized() { - t.Fatalf("Finalized too late") - } else if pref := sm.Preference(); block3.ID() != pref { - t.Fatalf("Wrong preference listed") - } else if status := block0.Status(); status != choices.Rejected { - t.Fatalf("Wrong status returned") - } else if status := block1.Status(); status != choices.Accepted { - t.Fatalf("Wrong status returned") - } else if status := block2.Status(); status != choices.Rejected { - t.Fatalf("Wrong status returned") - } else if status := block3.Status(); status != choices.Accepted { - t.Fatalf("Wrong status returned") - } + require.NoError(sm.RecordPoll(context.Background(), votesFor3)) + require.False(sm.Finalized()) + require.Equal(block2.ID(), sm.Preference()) + + require.NoError(sm.RecordPoll(context.Background(), votesFor3)) + require.True(sm.Finalized()) + require.Equal(block3.ID(), sm.Preference()) + require.Equal(choices.Rejected, block0.Status()) + require.Equal(choices.Accepted, block1.Status()) + require.Equal(choices.Rejected, block2.Status()) + require.Equal(choices.Accepted, block3.Status()) } func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -882,9 +759,7 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -896,30 +771,23 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { } unknownBlockID := ids.Empty.Prefix(2) - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block)) validVotes := bag.Bag[ids.ID]{} validVotes.Add(block.ID()) - if err := sm.RecordPoll(context.Background(), validVotes); err != nil { - t.Fatal(err) - } + require.NoError(sm.RecordPoll(context.Background(), validVotes)) invalidVotes := bag.Bag[ids.ID]{} invalidVotes.Add(unknownBlockID) - if err := sm.RecordPoll(context.Background(), invalidVotes); err != nil { - t.Fatal(err) - } else if err := sm.RecordPoll(context.Background(), validVotes); err != nil { - t.Fatal(err) - } else if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if pref := sm.Preference(); block.ID() != pref { - t.Fatalf("Wrong preference listed") - } + require.NoError(sm.RecordPoll(context.Background(), invalidVotes)) + require.NoError(sm.RecordPoll(context.Background(), validVotes)) + require.False(sm.Finalized()) + require.Equal(block.ID(), sm.Preference()) } func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -933,9 +801,7 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -978,17 +844,11 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { HeightV: block3.HeightV + 1, } - if err := sm.Add(context.Background(), block0); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block1); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block2); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block3); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block4); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(context.Background(), block2)) + require.NoError(sm.Add(context.Background(), block3)) + require.NoError(sm.Add(context.Background(), block4)) // Current graph structure: // G @@ -1006,9 +866,7 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { block2.ID(), block4.ID(), ) - if err := sm.RecordPoll(context.Background(), votes0_2_4); err != nil { - t.Fatal(err) - } + require.NoError(sm.RecordPoll(context.Background(), votes0_2_4)) // Current graph structure: // 0 @@ -1018,51 +876,29 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { // 2 4 // Tail = 2 - pref := sm.Preference() - switch { - case block2.ID() != pref: - t.Fatalf("Wrong preference listed") - case sm.Finalized(): - t.Fatalf("Finalized too early") - case block0.Status() != choices.Accepted: - t.Fatalf("Should have accepted") - case block1.Status() != choices.Processing: - t.Fatalf("Should have accepted") - case block2.Status() != choices.Processing: - t.Fatalf("Should have accepted") - case block3.Status() != choices.Processing: - t.Fatalf("Should have rejected") - case block4.Status() != choices.Processing: - t.Fatalf("Should have rejected") - } + require.False(sm.Finalized()) + require.Equal(block2.ID(), sm.Preference()) + require.Equal(choices.Accepted, block0.Status()) + require.Equal(choices.Processing, block1.Status()) + require.Equal(choices.Processing, block2.Status()) + require.Equal(choices.Processing, block3.Status()) + require.Equal(choices.Processing, block4.Status()) dep2_2_2 := bag.Bag[ids.ID]{} dep2_2_2.AddCount(block2.ID(), 3) - if err := sm.RecordPoll(context.Background(), dep2_2_2); err != nil { - t.Fatal(err) - } + require.NoError(sm.RecordPoll(context.Background(), dep2_2_2)) // Current graph structure: // 2 // Tail = 2 - pref = sm.Preference() - switch { - case block2.ID() != pref: - t.Fatalf("Wrong preference listed") - case !sm.Finalized(): - t.Fatalf("Finalized too late") - case block0.Status() != choices.Accepted: - t.Fatalf("Should have accepted") - case block1.Status() != choices.Accepted: - t.Fatalf("Should have accepted") - case block2.Status() != choices.Accepted: - t.Fatalf("Should have accepted") - case block3.Status() != choices.Rejected: - t.Fatalf("Should have rejected") - case block4.Status() != choices.Rejected: - t.Fatalf("Should have rejected") - } + require.True(sm.Finalized()) + require.Equal(block2.ID(), sm.Preference()) + require.Equal(choices.Accepted, block0.Status()) + require.Equal(choices.Accepted, block1.Status()) + require.Equal(choices.Accepted, block2.Status()) + require.Equal(choices.Rejected, block3.Status()) + require.Equal(choices.Rejected, block4.Status()) } func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { @@ -1274,6 +1110,8 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact } func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -1287,9 +1125,7 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { MaxOutstandingItems: 1, MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) a1Block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1324,89 +1160,45 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { HeightV: b1Block.HeightV + 1, } - if err := sm.Add(context.Background(), a1Block); err != nil { - t.Fatal(err) - } - if err := sm.Add(context.Background(), a2Block); err != nil { - t.Fatal(err) - } - if err := sm.Add(context.Background(), b1Block); err != nil { - t.Fatal(err) - } - if err := sm.Add(context.Background(), b2Block); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), a1Block)) + require.NoError(sm.Add(context.Background(), a2Block)) + require.NoError(sm.Add(context.Background(), b1Block)) + require.NoError(sm.Add(context.Background(), b2Block)) - if sm.Preference() != a2Block.ID() { - t.Fatal("Wrong preference reported") - } + require.Equal(a2Block.ID(), sm.Preference()) - if !sm.IsPreferred(a1Block) { - t.Fatalf("Should have reported a1 as being preferred") - } - if !sm.IsPreferred(a2Block) { - t.Fatalf("Should have reported a2 as being preferred") - } - if sm.IsPreferred(b1Block) { - t.Fatalf("Shouldn't have reported b1 as being preferred") - } - if sm.IsPreferred(b2Block) { - t.Fatalf("Shouldn't have reported b2 as being preferred") - } + require.True(sm.IsPreferred(a1Block)) + require.True(sm.IsPreferred(a2Block)) + require.False(sm.IsPreferred(b1Block)) + require.False(sm.IsPreferred(b2Block)) b2Votes := bag.Bag[ids.ID]{} b2Votes.Add(b2Block.ID()) - if err := sm.RecordPoll(context.Background(), b2Votes); err != nil { - t.Fatal(err) - } + require.NoError(sm.RecordPoll(context.Background(), b2Votes)) - if sm.Preference() != b2Block.ID() { - t.Fatal("Wrong preference reported") - } - - if sm.IsPreferred(a1Block) { - t.Fatalf("Shouldn't have reported a1 as being preferred") - } - if sm.IsPreferred(a2Block) { - t.Fatalf("Shouldn't have reported a2 as being preferred") - } - if !sm.IsPreferred(b1Block) { - t.Fatalf("Should have reported b1 as being preferred") - } - if !sm.IsPreferred(b2Block) { - t.Fatalf("Should have reported b2 as being preferred") - } + require.Equal(b2Block.ID(), sm.Preference()) + require.False(sm.IsPreferred(a1Block)) + require.False(sm.IsPreferred(a2Block)) + require.True(sm.IsPreferred(b1Block)) + require.True(sm.IsPreferred(b2Block)) a1Votes := bag.Bag[ids.ID]{} a1Votes.Add(a1Block.ID()) - if err := sm.RecordPoll(context.Background(), a1Votes); err != nil { - t.Fatal(err) - } - if err := sm.RecordPoll(context.Background(), a1Votes); err != nil { - t.Fatal(err) - } - - if sm.Preference() != a2Block.ID() { - t.Fatal("Wrong preference reported") - } + require.NoError(sm.RecordPoll(context.Background(), a1Votes)) + require.NoError(sm.RecordPoll(context.Background(), a1Votes)) - if !sm.IsPreferred(a1Block) { - t.Fatalf("Should have reported a1 as being preferred") - } - if !sm.IsPreferred(a2Block) { - t.Fatalf("Should have reported a2 as being preferred") - } - if sm.IsPreferred(b1Block) { - t.Fatalf("Shouldn't have reported b1 as being preferred") - } - if sm.IsPreferred(b2Block) { - t.Fatalf("Shouldn't have reported b2 as being preferred") - } + require.Equal(a2Block.ID(), sm.Preference()) + require.True(sm.IsPreferred(a1Block)) + require.True(sm.IsPreferred(a2Block)) + require.False(sm.IsPreferred(b1Block)) + require.False(sm.IsPreferred(b2Block)) } func MetricsProcessingErrorTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -1426,16 +1218,15 @@ func MetricsProcessingErrorTest(t *testing.T, factory Factory) { Name: "blks_processing", }) - if err := ctx.Registerer.Register(numProcessing); err != nil { - t.Fatal(err) - } + require.NoError(ctx.Registerer.Register(numProcessing)) - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err == nil { - t.Fatalf("should have errored during initialization due to a duplicate metric") - } + err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) + require.Error(err) //nolint:forbidigo // error is not exported https://github.com/prometheus/client_golang/blob/main/prometheus/registry.go#L315 } func MetricsAcceptedErrorTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -1455,16 +1246,15 @@ func MetricsAcceptedErrorTest(t *testing.T, factory Factory) { Name: "blks_accepted_count", }) - if err := ctx.Registerer.Register(numAccepted); err != nil { - t.Fatal(err) - } + require.NoError(ctx.Registerer.Register(numAccepted)) - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err == nil { - t.Fatalf("should have errored during initialization due to a duplicate metric") - } + err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) + require.Error(err) //nolint:forbidigo // error is not exported https://github.com/prometheus/client_golang/blob/main/prometheus/registry.go#L315 } func MetricsRejectedErrorTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -1484,16 +1274,15 @@ func MetricsRejectedErrorTest(t *testing.T, factory Factory) { Name: "blks_rejected_count", }) - if err := ctx.Registerer.Register(numRejected); err != nil { - t.Fatal(err) - } + require.NoError(ctx.Registerer.Register(numRejected)) - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err == nil { - t.Fatalf("should have errored during initialization due to a duplicate metric") - } + err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp) + require.Error(err) //nolint:forbidigo // error is not exported https://github.com/prometheus/client_golang/blob/main/prometheus/registry.go#L315 } func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -1508,9 +1297,7 @@ func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) rejectedBlock := &TestBlock{TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(1), @@ -1527,12 +1314,13 @@ func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { HeightV: rejectedBlock.HeightV + 1, } - if err := sm.Add(context.Background(), block); err == nil { - t.Fatalf("Should have errored on rejecting the rejectable block") - } + err := sm.Add(context.Background(), block) + require.ErrorIs(err, errTest) } func ErrorOnAcceptTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -1547,9 +1335,7 @@ func ErrorOnAcceptTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1561,18 +1347,17 @@ func ErrorOnAcceptTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(context.Background(), block); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block)) votes := bag.Bag[ids.ID]{} votes.Add(block.ID()) - if err := sm.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on accepted the block") - } + err := sm.RecordPoll(context.Background(), votes) + require.ErrorIs(err, errTest) } func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -1587,9 +1372,7 @@ func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1609,20 +1392,18 @@ func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { HeightV: Genesis.HeightV + 1, } - if err := sm.Add(context.Background(), block0); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block1); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) votes := bag.Bag[ids.ID]{} votes.Add(block0.ID()) - if err := sm.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on rejecting the block's sibling") - } + err := sm.RecordPoll(context.Background(), votes) + require.ErrorIs(err, errTest) } func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { + require := require.New(t) + sm := factory.New() ctx := snow.DefaultConsensusContextTest() @@ -1637,9 +1418,7 @@ func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { MaxItemProcessingTime: 1, } - if err := sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp); err != nil { - t.Fatal(err) - } + require.NoError(sm.Initialize(ctx, params, GenesisID, GenesisHeight, GenesisTimestamp)) block0 := &TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1667,22 +1446,19 @@ func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { HeightV: block1.HeightV + 1, } - if err := sm.Add(context.Background(), block0); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block1); err != nil { - t.Fatal(err) - } else if err := sm.Add(context.Background(), block2); err != nil { - t.Fatal(err) - } + require.NoError(sm.Add(context.Background(), block0)) + require.NoError(sm.Add(context.Background(), block1)) + require.NoError(sm.Add(context.Background(), block2)) votes := bag.Bag[ids.ID]{} votes.Add(block0.ID()) - if err := sm.RecordPoll(context.Background(), votes); err == nil { - t.Fatalf("Should have errored on transitively rejecting the block") - } + err := sm.RecordPoll(context.Background(), votes) + require.ErrorIs(err, errTest) } func RandomizedConsistencyTest(t *testing.T, factory Factory) { + require := require.New(t) + numColors := 50 numNodes := 100 params := snowball.Parameters{ @@ -1703,20 +1479,14 @@ func RandomizedConsistencyTest(t *testing.T, factory Factory) { n.Initialize(params, numColors) for i := 0; i < numNodes; i++ { - if err := n.AddNode(factory.New()); err != nil { - t.Fatal(err) - } + require.NoError(n.AddNode(factory.New())) } for !n.Finalized() { - if err := n.Round(); err != nil { - t.Fatal(err) - } + require.NoError(n.Round()) } - if !n.Agreement() { - t.Fatalf("Network agreed on inconsistent values") - } + require.True(n.Agreement()) } func ErrorOnAddDecidedBlock(t *testing.T, factory Factory) { @@ -1788,10 +1558,10 @@ func ErrorOnAddDuplicateBlockID(t *testing.T, factory Factory) { } func gatherCounterGauge(t *testing.T, reg *prometheus.Registry) map[string]float64 { + require := require.New(t) + ms, err := reg.Gather() - if err != nil { - t.Fatal(err) - } + require.NoError(err) mss := make(map[string]float64) for _, mf := range ms { name := mf.GetName() diff --git a/snow/consensus/snowman/poll/early_term_no_traversal_test.go b/snow/consensus/snowman/poll/early_term_no_traversal_test.go index 63cca569543..c1737b00ce9 100644 --- a/snow/consensus/snowman/poll/early_term_no_traversal_test.go +++ b/snow/consensus/snowman/poll/early_term_no_traversal_test.go @@ -6,11 +6,15 @@ package poll import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" ) func TestEarlyTermNoTraversalResults(t *testing.T) { + require := require.New(t) + alpha := 1 vtxID := ids.ID{1} @@ -24,21 +28,18 @@ func TestEarlyTermNoTraversalResults(t *testing.T) { poll := factory.New(vdrs) poll.Vote(vdr1, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } + require.True(poll.Finished()) result := poll.Result() - if list := result.List(); len(list) != 1 { - t.Fatalf("Wrong number of vertices returned") - } else if retVtxID := list[0]; retVtxID != vtxID { - t.Fatalf("Wrong vertex returned") - } else if result.Count(vtxID) != 1 { - t.Fatalf("Wrong number of votes returned") - } + list := result.List() + require.Len(list, 1) + require.Equal(vtxID, list[0]) + require.Equal(1, result.Count(vtxID)) } func TestEarlyTermNoTraversalString(t *testing.T) { + require := require.New(t) + alpha := 2 vtxID := ids.ID{1} @@ -61,12 +62,11 @@ func TestEarlyTermNoTraversalString(t *testing.T) { NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 received Bag: (Size = 1) SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 1` - if result := poll.String(); expected != result { - t.Fatalf("Poll should have returned %s but returned %s", expected, result) - } + require.Equal(expected, poll.String()) } func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { + require := require.New(t) alpha := 2 vtxID := ids.ID{1} @@ -84,20 +84,18 @@ func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { poll := factory.New(vdrs) poll.Vote(vdr1, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } + require.False(poll.Finished()) + poll.Vote(vdr1, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after getting a duplicated vote") - } + require.False(poll.Finished()) + poll.Vote(vdr2, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } + require.True(poll.Finished()) } func TestEarlyTermNoTraversalTerminatesEarly(t *testing.T) { + require := require.New(t) + alpha := 3 vtxID := ids.ID{1} @@ -121,20 +119,18 @@ func TestEarlyTermNoTraversalTerminatesEarly(t *testing.T) { poll := factory.New(vdrs) poll.Vote(vdr1, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } + require.False(poll.Finished()) + poll.Vote(vdr2, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } + require.False(poll.Finished()) + poll.Vote(vdr3, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate early after receiving alpha votes for one vertex and none for other vertices") - } + require.True(poll.Finished()) } func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) { + require := require.New(t) + alpha := 4 vtxA := ids.ID{1} @@ -164,24 +160,21 @@ func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) { poll := factory.New(vdrs) poll.Vote(vdr1, vtxB) - if poll.Finished() { - t.Fatalf("Poll finished early after receiving one vote") - } + require.False(poll.Finished()) + poll.Vote(vdr2, vtxC) - if poll.Finished() { - t.Fatalf("Poll finished early after receiving two votes") - } + require.False(poll.Finished()) + poll.Vote(vdr3, vtxD) - if poll.Finished() { - t.Fatalf("Poll terminated early, when a shared ancestor could have received alpha votes") - } + require.False(poll.Finished()) + poll.Vote(vdr4, vtxA) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving all outstanding votes") - } + require.True(poll.Finished()) } func TestEarlyTermNoTraversalWithFastDrops(t *testing.T) { + require := require.New(t) + alpha := 2 vdr1 := ids.NodeID{1} @@ -199,16 +192,15 @@ func TestEarlyTermNoTraversalWithFastDrops(t *testing.T) { poll := factory.New(vdrs) poll.Drop(vdr1) - if poll.Finished() { - t.Fatalf("Poll finished early after dropping one vote") - } + require.False(poll.Finished()) + poll.Drop(vdr2) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after dropping two votes") - } + require.True(poll.Finished()) } func TestEarlyTermNoTraversalWithWeightedResponses(t *testing.T) { + require := require.New(t) + alpha := 2 vtxID := ids.ID{1} @@ -227,21 +219,17 @@ func TestEarlyTermNoTraversalWithWeightedResponses(t *testing.T) { poll := factory.New(vdrs) poll.Vote(vdr2, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving two votes") - } + require.True(poll.Finished()) result := poll.Result() - if list := result.List(); len(list) != 1 { - t.Fatalf("Wrong number of vertices returned") - } else if retVtxID := list[0]; retVtxID != vtxID { - t.Fatalf("Wrong vertex returned") - } else if result.Count(vtxID) != 2 { - t.Fatalf("Wrong number of votes returned") - } + list := result.List() + require.Len(list, 1) + require.Equal(vtxID, list[0]) + require.Equal(2, result.Count(vtxID)) } func TestEarlyTermNoTraversalDropWithWeightedResponses(t *testing.T) { + require := require.New(t) alpha := 2 vdr1 := ids.NodeID{1} @@ -258,7 +246,5 @@ func TestEarlyTermNoTraversalDropWithWeightedResponses(t *testing.T) { poll := factory.New(vdrs) poll.Drop(vdr2) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after dropping two votes") - } + require.True(poll.Finished()) } diff --git a/snow/consensus/snowman/poll/no_early_term_test.go b/snow/consensus/snowman/poll/no_early_term_test.go index fdc42a57bbc..8e7199fac1d 100644 --- a/snow/consensus/snowman/poll/no_early_term_test.go +++ b/snow/consensus/snowman/poll/no_early_term_test.go @@ -6,11 +6,15 @@ package poll import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" ) func TestNoEarlyTermResults(t *testing.T) { + require := require.New(t) + vtxID := ids.ID{1} vdr1 := ids.NodeID{1} // k = 1 @@ -22,21 +26,18 @@ func TestNoEarlyTermResults(t *testing.T) { poll := factory.New(vdrs) poll.Vote(vdr1, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } + require.True(poll.Finished()) result := poll.Result() - if list := result.List(); len(list) != 1 { - t.Fatalf("Wrong number of vertices returned") - } else if retVtxID := list[0]; retVtxID != vtxID { - t.Fatalf("Wrong vertex returned") - } else if result.Count(vtxID) != 1 { - t.Fatalf("Wrong number of votes returned") - } + list := result.List() + require.Len(list, 1) + require.Equal(vtxID, list[0]) + require.Equal(1, result.Count(vtxID)) } func TestNoEarlyTermString(t *testing.T) { + require := require.New(t) + vtxID := ids.ID{1} vdr1 := ids.NodeID{1} @@ -57,12 +58,11 @@ func TestNoEarlyTermString(t *testing.T) { NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 received Bag: (Size = 1) SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 1` - if result := poll.String(); expected != result { - t.Fatalf("Poll should have returned %s but returned %s", expected, result) - } + require.Equal(expected, poll.String()) } func TestNoEarlyTermDropsDuplicatedVotes(t *testing.T) { + require := require.New(t) vtxID := ids.ID{1} vdr1 := ids.NodeID{1} @@ -78,19 +78,14 @@ func TestNoEarlyTermDropsDuplicatedVotes(t *testing.T) { poll := factory.New(vdrs) poll.Vote(vdr1, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } + require.False(poll.Finished()) + poll.Vote(vdr1, vtxID) - if poll.Finished() { - t.Fatalf("Poll finished after getting a duplicated vote") - } + require.False(poll.Finished()) + poll.Drop(vdr1) - if poll.Finished() { - t.Fatalf("Poll finished after getting a duplicated vote") - } + require.False(poll.Finished()) + poll.Vote(vdr2, vtxID) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving k votes") - } + require.True(poll.Finished()) } diff --git a/snow/consensus/snowman/poll/set_test.go b/snow/consensus/snowman/poll/set_test.go index 75d82355eec..09a2e13c192 100644 --- a/snow/consensus/snowman/poll/set_test.go +++ b/snow/consensus/snowman/poll/set_test.go @@ -13,35 +13,30 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bag" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" ) func TestNewSetErrorOnMetrics(t *testing.T) { + require := require.New(t) + factory := NewNoEarlyTermFactory() log := logging.NoLog{} namespace := "" registerer := prometheus.NewRegistry() - errs := wrappers.Errs{} - errs.Add( - registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "polls", - })), - registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ - Name: "poll_duration", - })), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + Name: "polls", + }))) + require.NoError(registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + Name: "poll_duration", + }))) s := NewSet(factory, log, namespace, registerer) - if s == nil { - t.Fatalf("shouldn't have failed due to a metrics initialization err") - } + require.NotNil(s) } func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { + require := require.New(t) + factory := NewNoEarlyTermFactory() log := logging.NoLog{} namespace := "" @@ -59,13 +54,13 @@ func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { vdrBag := bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added := s.Add(1, vdrBag) - require.True(t, added) + require.True(added) vdrBag = bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added = s.Add(2, vdrBag) - require.True(t, added) - require.Equal(t, s.Len(), 2) + require.True(added) + require.Equal(s.Len(), 2) // vote vtx1 for poll 1 // vote vtx2 for poll 2 @@ -75,26 +70,24 @@ func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { var results []bag.Bag[ids.ID] // vote out of order - results = s.Vote(1, vdr1, vtx1) - require.Empty(t, results) - results = s.Vote(2, vdr2, vtx2) - require.Empty(t, results) - results = s.Vote(2, vdr3, vtx2) - require.Empty(t, results) + require.Empty(s.Vote(1, vdr1, vtx1)) + require.Empty(s.Vote(2, vdr2, vtx2)) + require.Empty(s.Vote(2, vdr3, vtx2)) - results = s.Vote(2, vdr1, vtx2) // poll 2 finished - require.Empty(t, results) // expect 2 to not have finished because 1 is still pending + // poll 2 finished + require.Empty(s.Vote(2, vdr1, vtx2)) // expect 2 to not have finished because 1 is still pending - results = s.Vote(1, vdr2, vtx1) - require.Empty(t, results) + require.Empty(s.Vote(1, vdr2, vtx1)) results = s.Vote(1, vdr3, vtx1) // poll 1 finished, poll 2 should be finished as well - require.Len(t, results, 2) - require.Equal(t, vtx1, results[0].List()[0]) - require.Equal(t, vtx2, results[1].List()[0]) + require.Len(results, 2) + require.Equal(vtx1, results[0].List()[0]) + require.Equal(vtx2, results[1].List()[0]) } func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { + require := require.New(t) + factory := NewNoEarlyTermFactory() log := logging.NoLog{} namespace := "" @@ -112,13 +105,13 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { vdrBag := bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added := s.Add(1, vdrBag) - require.True(t, added) + require.True(added) vdrBag = bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added = s.Add(2, vdrBag) - require.True(t, added) - require.Equal(t, s.Len(), 2) + require.True(added) + require.Equal(s.Len(), 2) // vote vtx1 for poll 1 // vote vtx2 for poll 2 @@ -128,26 +121,24 @@ func TestCreateAndFinishPollOutOfOrder_OlderFinishesFirst(t *testing.T) { var results []bag.Bag[ids.ID] // vote out of order - results = s.Vote(1, vdr1, vtx1) - require.Empty(t, results) - results = s.Vote(2, vdr2, vtx2) - require.Empty(t, results) - results = s.Vote(2, vdr3, vtx2) - require.Empty(t, results) + require.Empty(s.Vote(1, vdr1, vtx1)) + require.Empty(s.Vote(2, vdr2, vtx2)) + require.Empty(s.Vote(2, vdr3, vtx2)) - results = s.Vote(1, vdr2, vtx1) - require.Empty(t, results) + require.Empty(s.Vote(1, vdr2, vtx1)) results = s.Vote(1, vdr3, vtx1) // poll 1 finished, poll 2 still remaining - require.Len(t, results, 1) // because 1 is the oldest - require.Equal(t, vtx1, results[0].List()[0]) + require.Len(results, 1) // because 1 is the oldest + require.Equal(vtx1, results[0].List()[0]) results = s.Vote(2, vdr1, vtx2) // poll 2 finished - require.Len(t, results, 1) // because 2 is the oldest now - require.Equal(t, vtx2, results[0].List()[0]) + require.Len(results, 1) // because 2 is the oldest now + require.Equal(vtx2, results[0].List()[0]) } func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { + require := require.New(t) + factory := NewNoEarlyTermFactory() log := logging.NoLog{} namespace := "" @@ -165,18 +156,18 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { vdrBag := bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added := s.Add(1, vdrBag) - require.True(t, added) + require.True(added) vdrBag = bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added = s.Add(2, vdrBag) - require.True(t, added) + require.True(added) vdrBag = bag.Bag[ids.NodeID]{} vdrBag.Add(vdrs...) added = s.Add(3, vdrBag) - require.True(t, added) - require.Equal(t, s.Len(), 3) + require.True(added) + require.Equal(s.Len(), 3) // vote vtx1 for poll 1 // vote vtx2 for poll 2 @@ -189,34 +180,28 @@ func TestCreateAndFinishPollOutOfOrder_UnfinishedPollsGaps(t *testing.T) { // vote out of order // 2 finishes first to create a gap of finished poll between two unfinished polls 1 and 3 - results = s.Vote(2, vdr3, vtx2) - require.Empty(t, results) - results = s.Vote(2, vdr2, vtx2) - require.Empty(t, results) - results = s.Vote(2, vdr1, vtx2) - require.Empty(t, results) + require.Empty(s.Vote(2, vdr3, vtx2)) + require.Empty(s.Vote(2, vdr2, vtx2)) + require.Empty(s.Vote(2, vdr1, vtx2)) // 3 finishes now, 2 has already finished but 1 is not finished so we expect to receive no results still - results = s.Vote(3, vdr2, vtx3) - require.Empty(t, results) - results = s.Vote(3, vdr3, vtx3) - require.Empty(t, results) - results = s.Vote(3, vdr1, vtx3) - require.Empty(t, results) + require.Empty(s.Vote(3, vdr2, vtx3)) + require.Empty(s.Vote(3, vdr3, vtx3)) + require.Empty(s.Vote(3, vdr1, vtx3)) // 1 finishes now, 2 and 3 have already finished so we expect 3 items in results - results = s.Vote(1, vdr1, vtx1) - require.Empty(t, results) - results = s.Vote(1, vdr2, vtx1) - require.Empty(t, results) + require.Empty(s.Vote(1, vdr1, vtx1)) + require.Empty(s.Vote(1, vdr2, vtx1)) results = s.Vote(1, vdr3, vtx1) - require.Len(t, results, 3) - require.Equal(t, vtx1, results[0].List()[0]) - require.Equal(t, vtx2, results[1].List()[0]) - require.Equal(t, vtx3, results[2].List()[0]) + require.Len(results, 3) + require.Equal(vtx1, results[0].List()[0]) + require.Equal(vtx2, results[1].List()[0]) + require.Equal(vtx3, results[2].List()[0]) } func TestCreateAndFinishSuccessfulPoll(t *testing.T) { + require := require.New(t) + factory := NewNoEarlyTermFactory() log := logging.NoLog{} namespace := "" @@ -234,36 +219,25 @@ func TestCreateAndFinishSuccessfulPoll(t *testing.T) { vdr2, ) - if s.Len() != 0 { - t.Fatalf("Shouldn't have any active polls yet") - } else if !s.Add(0, vdrs) { - t.Fatalf("Should have been able to add a new poll") - } else if s.Len() != 1 { - t.Fatalf("Should only have one active poll") - } else if s.Add(0, vdrs) { - t.Fatalf("Shouldn't have been able to add a duplicated poll") - } else if s.Len() != 1 { - t.Fatalf("Should only have one active poll") - } else if results := s.Vote(1, vdr1, vtxID); len(results) > 0 { - t.Fatalf("Shouldn't have been able to finish a non-existent poll") - } else if results := s.Vote(0, vdr1, vtxID); len(results) > 0 { - t.Fatalf("Shouldn't have been able to finish an ongoing poll") - } else if results := s.Vote(0, vdr1, vtxID); len(results) > 0 { - t.Fatalf("Should have dropped a duplicated poll") - } else if results := s.Vote(0, vdr2, vtxID); len(results) == 0 { - t.Fatalf("Should have finished the") - } else if len(results) != 1 { - t.Fatalf("Wrong number of results returned") - } else if list := results[0].List(); len(list) != 1 { - t.Fatalf("Wrong number of vertices returned") - } else if retVtxID := list[0]; retVtxID != vtxID { - t.Fatalf("Wrong vertex returned") - } else if results[0].Count(vtxID) != 2 { - t.Fatalf("Wrong number of votes returned") - } + require.Zero(s.Len()) + require.True(s.Add(0, vdrs)) + require.Equal(1, s.Len()) + require.False(s.Add(0, vdrs)) + require.Equal(1, s.Len()) + require.Empty(s.Vote(1, vdr1, vtxID)) + require.Empty(s.Vote(0, vdr1, vtxID)) + require.Empty(s.Vote(0, vdr1, vtxID)) + results := s.Vote(0, vdr2, vtxID) + require.Len(results, 1) + list := results[0].List() + require.Len(list, 1) + require.Equal(vtxID, list[0]) + require.Equal(2, results[0].Count(vtxID)) } func TestCreateAndFinishFailedPoll(t *testing.T) { + require := require.New(t) + factory := NewNoEarlyTermFactory() log := logging.NoLog{} namespace := "" @@ -279,30 +253,22 @@ func TestCreateAndFinishFailedPoll(t *testing.T) { vdr2, ) - if s.Len() != 0 { - t.Fatalf("Shouldn't have any active polls yet") - } else if !s.Add(0, vdrs) { - t.Fatalf("Should have been able to add a new poll") - } else if s.Len() != 1 { - t.Fatalf("Should only have one active poll") - } else if s.Add(0, vdrs) { - t.Fatalf("Shouldn't have been able to add a duplicated poll") - } else if s.Len() != 1 { - t.Fatalf("Should only have one active poll") - } else if results := s.Drop(1, vdr1); len(results) > 0 { - t.Fatalf("Shouldn't have been able to finish a non-existent poll") - } else if results := s.Drop(0, vdr1); len(results) > 0 { - t.Fatalf("Shouldn't have been able to finish an ongoing poll") - } else if results := s.Drop(0, vdr1); len(results) > 0 { - t.Fatalf("Should have dropped a duplicated poll") - } else if results := s.Drop(0, vdr2); len(results) == 0 { - t.Fatalf("Should have finished the") - } else if list := results[0].List(); len(list) != 0 { - t.Fatalf("Wrong number of vertices returned") - } + require.Zero(s.Len()) + require.True(s.Add(0, vdrs)) + require.Equal(1, s.Len()) + require.False(s.Add(0, vdrs)) + require.Equal(1, s.Len()) + require.Empty(s.Drop(1, vdr1)) + require.Empty(s.Drop(0, vdr1)) + require.Empty(s.Drop(0, vdr1)) + results := s.Drop(0, vdr2) + require.Len(results, 1) + require.Empty(results[0].List()) } func TestSetString(t *testing.T) { + require := require.New(t) + factory := NewNoEarlyTermFactory() log := logging.NoLog{} namespace := "" @@ -319,11 +285,6 @@ func TestSetString(t *testing.T) { waiting on Bag: (Size = 1) NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt: 1 received Bag: (Size = 0)` - if !s.Add(0, vdrs) { - t.Fatalf("Should have been able to add a new poll") - } else if str := s.String(); expected != str { - t.Fatalf("Set return wrong string, Expected:\n%s\nReturned:\n%s", - expected, - str) - } + require.True(s.Add(0, vdrs)) + require.Equal(expected, s.String()) } diff --git a/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/snow/engine/avalanche/bootstrap/bootstrapper_test.go index 6e03702ec39..0c702014db3 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -169,7 +169,7 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return vtx2, nil default: require.FailNow(errUnknownVertex.Error()) - panic(errUnknownVertex) + return nil, errUnknownVertex } } @@ -183,7 +183,7 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return vtx2, nil default: require.FailNow(errParsedUnknownVertex.Error()) - panic(errParsedUnknownVertex) + return nil, errParsedUnknownVertex } } @@ -275,7 +275,7 @@ func TestBootstrapperByzantineResponses(t *testing.T) { return nil, errUnknownVertex default: require.FailNow(errUnknownVertex.Error()) - panic(errUnknownVertex) + return nil, errUnknownVertex } } @@ -302,7 +302,7 @@ func TestBootstrapperByzantineResponses(t *testing.T) { return vtx2, nil default: require.FailNow(errParsedUnknownVertex.Error()) - panic(errParsedUnknownVertex) + return nil, errParsedUnknownVertex } } @@ -322,7 +322,7 @@ func TestBootstrapperByzantineResponses(t *testing.T) { return vtx0, nil default: require.FailNow(errUnknownVertex.Error()) - panic(errUnknownVertex) + return nil, errUnknownVertex } } @@ -443,7 +443,7 @@ func TestBootstrapperTxDependencies(t *testing.T) { return vtx0, nil default: require.FailNow(errParsedUnknownVertex.Error()) - panic(errParsedUnknownVertex) + return nil, errParsedUnknownVertex } } manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { @@ -454,7 +454,7 @@ func TestBootstrapperTxDependencies(t *testing.T) { return nil, errUnknownVertex default: require.FailNow(errUnknownVertex.Error()) - panic(errUnknownVertex) + return nil, errUnknownVertex } } @@ -477,7 +477,7 @@ func TestBootstrapperTxDependencies(t *testing.T) { return vtx0, nil default: require.FailNow(errParsedUnknownVertex.Error()) - panic(errParsedUnknownVertex) + return nil, errParsedUnknownVertex } } @@ -570,7 +570,7 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { return vtx2, nil default: require.FailNow(errUnknownVertex.Error()) - panic(errUnknownVertex) + return nil, errUnknownVertex } } manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { @@ -585,7 +585,7 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { return vtx2, nil default: require.FailNow(errParsedUnknownVertex.Error()) - panic(errParsedUnknownVertex) + return nil, errParsedUnknownVertex } } reqIDPtr := new(uint32) @@ -687,7 +687,7 @@ func TestBootstrapperFinalized(t *testing.T) { return nil, errUnknownVertex default: require.FailNow(errUnknownVertex.Error()) - panic(errUnknownVertex) + return nil, errUnknownVertex } } manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { @@ -702,7 +702,7 @@ func TestBootstrapperFinalized(t *testing.T) { return vtx1, nil default: require.FailNow(errUnknownVertex.Error()) - panic(errUnknownVertex) + return nil, errUnknownVertex } } @@ -817,7 +817,7 @@ func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { } default: require.FailNow(errUnknownVertex.Error()) - panic(errUnknownVertex) + return nil, errUnknownVertex } return nil, errUnknownVertex } @@ -837,7 +837,7 @@ func TestBootstrapperAcceptsAncestorsParents(t *testing.T) { return vtx2, nil default: require.FailNow(errUnknownVertex.Error()) - panic(errUnknownVertex) + return nil, errUnknownVertex } } @@ -997,7 +997,7 @@ func TestRestartBootstrapping(t *testing.T) { } default: require.FailNow(errUnknownVertex.Error()) - panic(errUnknownVertex) + return nil, errUnknownVertex } return nil, errUnknownVertex } @@ -1029,7 +1029,7 @@ func TestRestartBootstrapping(t *testing.T) { return vtx5, nil default: require.FailNow(errUnknownVertex.Error()) - panic(errUnknownVertex) + return nil, errUnknownVertex } } diff --git a/snow/engine/avalanche/getter/getter_test.go b/snow/engine/avalanche/getter/getter_test.go index b4028d7cd8e..62337933f91 100644 --- a/snow/engine/avalanche/getter/getter_test.go +++ b/snow/engine/avalanche/getter/getter_test.go @@ -23,11 +23,11 @@ import ( var errUnknownVertex = errors.New("unknown vertex") func testSetup(t *testing.T) (*vertex.TestManager, *common.SenderTest, common.Config) { + require := require.New(t) + peers := validators.NewSet() peer := ids.GenerateTestNodeID() - if err := peers.Add(peer, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(peers.Add(peer, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} sender.Default(true) @@ -73,9 +73,7 @@ func TestAcceptedFrontier(t *testing.T) { vtxID2 := ids.GenerateTestID() bsIntf, err := New(manager, config) - if err != nil { - t.Fatal(err) - } + require.NoError(err) require.IsType(&getter{}, bsIntf) bs := bsIntf.(*getter) @@ -91,24 +89,16 @@ func TestAcceptedFrontier(t *testing.T) { accepted = frontier } - if err := bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0)) acceptedSet := set.Set[ids.ID]{} acceptedSet.Add(accepted...) manager.EdgeF = nil - if !acceptedSet.Contains(vtxID0) { - t.Fatalf("Vtx should be accepted") - } - if !acceptedSet.Contains(vtxID1) { - t.Fatalf("Vtx should be accepted") - } - if acceptedSet.Contains(vtxID2) { - t.Fatalf("Vtx shouldn't be accepted") - } + require.Contains(acceptedSet, vtxID0) + require.Contains(acceptedSet, vtxID1) + require.NotContains(acceptedSet, vtxID2) } func TestFilterAccepted(t *testing.T) { @@ -130,9 +120,7 @@ func TestFilterAccepted(t *testing.T) { }} bsIntf, err := New(manager, config) - if err != nil { - t.Fatal(err) - } + require.NoError(err) require.IsType(&getter{}, bsIntf) bs := bsIntf.(*getter) @@ -147,7 +135,7 @@ func TestFilterAccepted(t *testing.T) { case vtxID2: return nil, errUnknownVertex } - t.Fatal(errUnknownVertex) + require.FailNow(errUnknownVertex.Error()) return nil, errUnknownVertex } @@ -156,22 +144,14 @@ func TestFilterAccepted(t *testing.T) { accepted = frontier } - if err := bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, vtxIDs); err != nil { - t.Fatal(err) - } + require.NoError(bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, vtxIDs)) acceptedSet := set.Set[ids.ID]{} acceptedSet.Add(accepted...) manager.GetVtxF = nil - if !acceptedSet.Contains(vtxID0) { - t.Fatalf("Vtx should be accepted") - } - if !acceptedSet.Contains(vtxID1) { - t.Fatalf("Vtx should be accepted") - } - if acceptedSet.Contains(vtxID2) { - t.Fatalf("Vtx shouldn't be accepted") - } + require.Contains(acceptedSet, vtxID0) + require.Contains(acceptedSet, vtxID1) + require.NotContains(acceptedSet, vtxID2) } diff --git a/snow/engine/avalanche/state/unique_vertex.go b/snow/engine/avalanche/state/unique_vertex.go index 9f44d258630..73c1ef94ccd 100644 --- a/snow/engine/avalanche/state/unique_vertex.go +++ b/snow/engine/avalanche/state/unique_vertex.go @@ -5,6 +5,7 @@ package state import ( "context" + "errors" "fmt" "strings" @@ -21,6 +22,10 @@ import ( var ( _ cache.Evictable[ids.ID] = (*uniqueVertex)(nil) _ avalanche.Vertex = (*uniqueVertex)(nil) + + errGetParents = errors.New("failed to get parents for vertex") + errGetHeight = errors.New("failed to get height for vertex") + errGetTxs = errors.New("failed to get txs for vertex") ) // uniqueVertex acts as a cache for vertices in the database. @@ -220,7 +225,7 @@ func (vtx *uniqueVertex) Parents() ([]avalanche.Vertex, error) { vtx.refresh() if vtx.v.vtx == nil { - return nil, fmt.Errorf("failed to get parents for vertex with status: %s", vtx.v.status) + return nil, fmt.Errorf("%w with status: %s", errGetParents, vtx.v.status) } parentIDs := vtx.v.vtx.ParentIDs() @@ -241,7 +246,7 @@ func (vtx *uniqueVertex) Height() (uint64, error) { vtx.refresh() if vtx.v.vtx == nil { - return 0, fmt.Errorf("failed to get height for vertex with status: %s", vtx.v.status) + return 0, fmt.Errorf("%w with status: %s", errGetHeight, vtx.v.status) } return vtx.v.vtx.Height(), nil @@ -251,7 +256,7 @@ func (vtx *uniqueVertex) Txs(ctx context.Context) ([]snowstorm.Tx, error) { vtx.refresh() if vtx.v.vtx == nil { - return nil, fmt.Errorf("failed to get txs for vertex with status: %s", vtx.v.status) + return nil, fmt.Errorf("%w with status: %s", errGetTxs, vtx.v.status) } txs := vtx.v.vtx.Txs() diff --git a/snow/engine/avalanche/state/unique_vertex_test.go b/snow/engine/avalanche/state/unique_vertex_test.go index bf50dd2e778..d3c55c24035 100644 --- a/snow/engine/avalanche/state/unique_vertex_test.go +++ b/snow/engine/avalanche/state/unique_vertex_test.go @@ -9,6 +9,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -41,6 +43,7 @@ func newTestSerializer(t *testing.T, parse func(context.Context, []byte) (snowst } func TestUnknownUniqueVertexErrors(t *testing.T) { + require := require.New(t) s := newTestSerializer(t, nil) uVtx := &uniqueVertex{ @@ -49,35 +52,27 @@ func TestUnknownUniqueVertexErrors(t *testing.T) { } status := uVtx.Status() - if status != choices.Unknown { - t.Fatalf("Expected vertex to have Unknown status") - } + require.Equal(choices.Unknown, status) _, err := uVtx.Parents() - if err == nil { - t.Fatalf("Parents should have produced error for unknown vertex") - } + require.ErrorIs(err, errGetParents) _, err = uVtx.Height() - if err == nil { - t.Fatalf("Height should have produced error for unknown vertex") - } + require.ErrorIs(err, errGetHeight) _, err = uVtx.Txs(context.Background()) - if err == nil { - t.Fatalf("Txs should have produced an error for unknown vertex") - } + require.ErrorIs(err, errGetTxs) } func TestUniqueVertexCacheHit(t *testing.T) { + require := require.New(t) + testTx := &snowstorm.TestTx{TestDecidable: choices.TestDecidable{ IDV: ids.ID{1}, }} s := newTestSerializer(t, func(_ context.Context, b []byte) (snowstorm.Tx, error) { - if !bytes.Equal(b, []byte{0}) { - t.Fatal("unknown tx") - } + require.Equal([]byte{0}, b) return testTx, nil }) @@ -92,17 +87,13 @@ func TestUniqueVertexCacheHit(t *testing.T) { parentIDs, [][]byte{{0}}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) uVtx := &uniqueVertex{ id: id, serializer: s, } - if err := uVtx.setVertex(context.Background(), vtx); err != nil { - t.Fatalf("Failed to set vertex due to: %s", err) - } + require.NoError(uVtx.setVertex(context.Background(), vtx)) newUVtx := &uniqueVertex{ id: id, @@ -110,41 +101,25 @@ func TestUniqueVertexCacheHit(t *testing.T) { } parents, err := newUVtx.Parents() - if err != nil { - t.Fatalf("Error while retrieving parents of known vertex") - } - if len(parents) != 1 { - t.Fatalf("Parents should have length 1") - } - if parents[0].ID() != parentID { - t.Fatalf("ParentID is incorrect") - } + require.NoError(err) + require.Len(parents, 1) + require.Equal(parentID, parents[0].ID()) newHeight, err := newUVtx.Height() - if err != nil { - t.Fatalf("Error while retrieving height of known vertex") - } - if height != newHeight { - t.Fatalf("Vertex height should have been %d, but was: %d", height, newHeight) - } + require.NoError(err) + require.Equal(height, newHeight) txs, err := newUVtx.Txs(context.Background()) - if err != nil { - t.Fatalf("Error while retrieving txs of known vertex: %s", err) - } - if len(txs) != 1 { - t.Fatalf("Incorrect number of transactions") - } - if txs[0] != testTx { - t.Fatalf("Txs retrieved the wrong Tx") - } + require.NoError(err) + require.Len(txs, 1) + require.Equal(testTx, txs[0]) - if newUVtx.v != uVtx.v { - t.Fatalf("Unique vertex failed to get corresponding vertex state from cache") - } + require.Equal(uVtx.v, newUVtx.v) } func TestUniqueVertexCacheMiss(t *testing.T) { + require := require.New(t) + txBytesParent := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} testTxParent := &snowstorm.TestTx{ TestDecidable: choices.TestDecidable{ @@ -168,16 +143,14 @@ func TestUniqueVertexCacheMiss(t *testing.T) { if bytes.Equal(txBytes, b) { return testTx, nil } - t.Fatal("asked to parse unexpected transaction") + require.FailNow("asked to parse unexpected transaction") return nil, nil } s := newTestSerializer(t, parseTx) uvtxParent := newTestUniqueVertex(t, s, nil, [][]byte{txBytesParent}, false) - if err := uvtxParent.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(uvtxParent.Accept(context.Background())) parentID := uvtxParent.ID() parentIDs := []ids.ID{parentID} @@ -189,9 +162,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { parentIDs, [][]byte{txBytes}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) id := innerVertex.ID() vtxBytes := innerVertex.Bytes() @@ -202,56 +173,34 @@ func TestUniqueVertexCacheMiss(t *testing.T) { } // Register a cache miss - if status := uVtx.Status(); status != choices.Unknown { - t.Fatalf("expected status to be unknown, but found: %s", status) - } + require.Equal(choices.Unknown, uVtx.Status()) // Register cache hit vtx, err := newUniqueVertex(context.Background(), s, vtxBytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if status := vtx.Status(); status != choices.Processing { - t.Fatalf("expected status to be processing, but found: %s", status) - } + require.Equal(choices.Processing, vtx.Status()) validateVertex := func(vtx *uniqueVertex, expectedStatus choices.Status) { - if status := vtx.Status(); status != expectedStatus { - t.Fatalf("expected status to be %s, but found: %s", expectedStatus, status) - } + require.Equal(expectedStatus, vtx.Status()) // Call bytes first to check for regression bug // where it's unsafe to call Bytes or Verify directly // after calling Status to refresh a vertex - if !bytes.Equal(vtx.Bytes(), vtxBytes) { - t.Fatalf("Found unexpected vertex bytes") - } + require.Equal(vtxBytes, vtx.Bytes()) vtxParents, err := vtx.Parents() - if err != nil { - t.Fatalf("Fetching vertex parents errored with: %s", err) - } + require.NoError(err) vtxHeight, err := vtx.Height() - if err != nil { - t.Fatalf("Fetching vertex height errored with: %s", err) - } + require.NoError(err) vtxTxs, err := vtx.Txs(context.Background()) - if err != nil { - t.Fatalf("Fetching vertx txs errored with: %s", err) - } - switch { - case vtxHeight != height: - t.Fatalf("Expected vertex height to be %d, but found %d", height, vtxHeight) - case len(vtxParents) != 1: - t.Fatalf("Expected vertex to have 1 parent, but found %d", len(vtxParents)) - case vtxParents[0].ID() != parentID: - t.Fatalf("Found unexpected parentID: %s, expected: %s", vtxParents[0].ID(), parentID) - case len(vtxTxs) != 1: - t.Fatalf("Exepcted vertex to have 1 transaction, but found %d", len(vtxTxs)) - case !bytes.Equal(vtxTxs[0].Bytes(), txBytes): - t.Fatalf("Found unexpected transaction bytes") - } + require.NoError(err) + + require.Equal(height, vtxHeight) + require.Len(vtxParents, 1) + require.Equal(parentID, vtxParents[0].ID()) + require.Len(vtxTxs, 1) + require.Equal(txBytes, vtxTxs[0].Bytes()) } // Replace the vertex, so that it loses reference to parents, etc. @@ -265,9 +214,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { // Check that a newly parsed vertex refreshed from the cache is valid vtx, err = newUniqueVertex(context.Background(), s, vtxBytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) validateVertex(vtx, choices.Processing) // Check that refreshing a vertex when it has been removed from @@ -282,22 +229,20 @@ func TestUniqueVertexCacheMiss(t *testing.T) { s.state.uniqueVtx.Flush() vtx, err = newUniqueVertex(context.Background(), s, vtxBytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) validateVertex(vtx, choices.Processing) } func TestParseVertexWithIncorrectChainID(t *testing.T) { + require := require.New(t) + statelessVertex, err := vertex.Build( // regular, non-stop vertex ids.GenerateTestID(), 0, nil, [][]byte{{1}}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vtxBytes := statelessVertex.Bytes() s := newTestSerializer(t, func(_ context.Context, b []byte) (snowstorm.Tx, error) { @@ -307,12 +252,13 @@ func TestParseVertexWithIncorrectChainID(t *testing.T) { return nil, errUnknownTx }) - if _, err := s.ParseVtx(context.Background(), vtxBytes); err == nil { - t.Fatal("should have failed to parse the vertex due to invalid chainID") - } + _, err = s.ParseVtx(context.Background(), vtxBytes) + require.ErrorIs(err, errWrongChainID) } func TestParseVertexWithInvalidTxs(t *testing.T) { + require := require.New(t) + ctx := snow.DefaultContextTest() statelessVertex, err := vertex.Build( // regular, non-stop vertex ctx.ChainID, @@ -320,9 +266,7 @@ func TestParseVertexWithInvalidTxs(t *testing.T) { nil, [][]byte{{1}}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vtxBytes := statelessVertex.Bytes() s := newTestSerializer(t, func(_ context.Context, b []byte) (snowstorm.Tx, error) { @@ -334,18 +278,15 @@ func TestParseVertexWithInvalidTxs(t *testing.T) { } }) - if _, err := s.ParseVtx(context.Background(), vtxBytes); err == nil { - t.Fatal("should have failed to parse the vertex due to invalid transactions") - } + _, err = s.ParseVtx(context.Background(), vtxBytes) + require.ErrorIs(err, errUnknownTx) - if _, err := s.ParseVtx(context.Background(), vtxBytes); err == nil { - t.Fatal("should have failed to parse the vertex after previously error on parsing invalid transactions") - } + _, err = s.ParseVtx(context.Background(), vtxBytes) + require.ErrorIs(err, errUnknownTx) id := hashing.ComputeHash256Array(vtxBytes) - if _, err := s.GetVtx(context.Background(), id); err == nil { - t.Fatal("should have failed to lookup invalid vertex after previously error on parsing invalid transactions") - } + _, err = s.GetVtx(context.Background(), id) + require.ErrorIs(err, errUnknownVertex) childStatelessVertex, err := vertex.Build( // regular, non-stop vertex ctx.ChainID, @@ -353,28 +294,18 @@ func TestParseVertexWithInvalidTxs(t *testing.T) { []ids.ID{id}, [][]byte{{2}}, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) childVtxBytes := childStatelessVertex.Bytes() childVtx, err := s.ParseVtx(context.Background(), childVtxBytes) - if err != nil { - t.Fatal(err) - } + require.NoError(err) parents, err := childVtx.Parents() - if err != nil { - t.Fatal(err) - } - if len(parents) != 1 { - t.Fatal("wrong number of parents") - } + require.NoError(err) + require.Len(parents, 1) parent := parents[0] - if parent.Status().Fetched() { - t.Fatal("the parent is invalid, so it shouldn't be marked as fetched") - } + require.False(parent.Status().Fetched()) } func newTestUniqueVertex( @@ -384,6 +315,8 @@ func newTestUniqueVertex( txs [][]byte, stopVertex bool, ) *uniqueVertex { + require := require.New(t) + var ( vtx vertex.StatelessVertex err error @@ -402,12 +335,8 @@ func newTestUniqueVertex( parentIDs, ) } - if err != nil { - t.Fatal(err) - } + require.NoError(err) uvtx, err := newUniqueVertex(context.Background(), s, vtx.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) return uvtx } diff --git a/snow/engine/avalanche/vertex/heap_test.go b/snow/engine/avalanche/vertex/heap_test.go index b4e049b5505..66df601942b 100644 --- a/snow/engine/avalanche/vertex/heap_test.go +++ b/snow/engine/avalanche/vertex/heap_test.go @@ -6,6 +6,8 @@ package vertex import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" @@ -14,6 +16,8 @@ import ( // This example inserts several ints into an IntHeap, checks the minimum, // and removes them in order of priority. func TestUniqueVertexHeapReturnsOrdered(t *testing.T) { + require := require.New(t) + h := NewHeap() vtx0 := &avalanche.TestVertex{ @@ -59,48 +63,40 @@ func TestUniqueVertexHeapReturnsOrdered(t *testing.T) { } vtxZ := h.Pop() - if vtxZ.ID() != vtx4.ID() { - t.Fatalf("Heap did not pop unknown element first") - } + require.Equal(vtx4.ID(), vtxZ.ID()) vtxA := h.Pop() - if height, err := vtxA.Height(); err != nil || height != 3 { - t.Fatalf("First height from heap was incorrect") - } else if vtxA.ID() != vtx3.ID() { - t.Fatalf("Incorrect ID on vertex popped from heap") - } + height, err := vtxA.Height() + require.NoError(err) + require.Equal(uint64(3), height) + require.Equal(vtx3.ID(), vtxA.ID()) vtxB := h.Pop() - if height, err := vtxB.Height(); err != nil || height != 1 { - t.Fatalf("First height from heap was incorrect") - } else if vtxB.ID() != vtx1.ID() && vtxB.ID() != vtx2.ID() { - t.Fatalf("Incorrect ID on vertex popped from heap") - } + height, err = vtxB.Height() + require.NoError(err) + require.Equal(uint64(1), height) + require.Contains([]ids.ID{vtx1.ID(), vtx2.ID()}, vtxB.ID()) vtxC := h.Pop() - if height, err := vtxC.Height(); err != nil || height != 1 { - t.Fatalf("First height from heap was incorrect") - } else if vtxC.ID() != vtx1.ID() && vtxC.ID() != vtx2.ID() { - t.Fatalf("Incorrect ID on vertex popped from heap") - } + height, err = vtxC.Height() + require.NoError(err) + require.Equal(uint64(1), height) + require.Contains([]ids.ID{vtx1.ID(), vtx2.ID()}, vtxC.ID()) - if vtxB.ID() == vtxC.ID() { - t.Fatalf("Heap returned same element more than once") - } + require.NotEqual(vtxB.ID(), vtxC.ID()) vtxD := h.Pop() - if height, err := vtxD.Height(); err != nil || height != 0 { - t.Fatalf("Last height returned was incorrect") - } else if vtxD.ID() != vtx0.ID() { - t.Fatalf("Last item from heap had incorrect ID") - } + height, err = vtxD.Height() + require.NoError(err) + require.Zero(height) + require.Equal(vtx0.ID(), vtxD.ID()) - if h.Len() != 0 { - t.Fatalf("Heap was not empty after popping all of its elements") - } + require.Zero(h.Len()) } func TestUniqueVertexHeapRemainsUnique(t *testing.T) { + require := require.New(t) + h := NewHeap() vtx0 := &avalanche.TestVertex{ @@ -138,12 +134,10 @@ func TestUniqueVertexHeapRemainsUnique(t *testing.T) { pushed2 := h.Push(vtx1) pushed3 := h.Push(vtx2) pushed4 := h.Push(vtx3) - switch { - case h.Len() != 3: - t.Fatalf("Unique Vertex Heap has incorrect length: %d", h.Len()) - case !(pushed1 && pushed2 && pushed3): - t.Fatalf("Failed to push a new unique element") - case pushed4: - t.Fatalf("Pushed non-unique element to the unique vertex heap") - } + + require.Equal(3, h.Len()) + require.True(pushed1) + require.True(pushed2) + require.True(pushed3) + require.False(pushed4) } diff --git a/snow/engine/avalanche/vertex/parser_test.go b/snow/engine/avalanche/vertex/parser_test.go index acffb5c8d66..16c2c9f425f 100644 --- a/snow/engine/avalanche/vertex/parser_test.go +++ b/snow/engine/avalanche/vertex/parser_test.go @@ -13,12 +13,16 @@ import ( ) func TestParseInvalid(t *testing.T) { + require := require.New(t) + vtxBytes := []byte{1, 2, 3, 4, 5} _, err := Parse(vtxBytes) - require.ErrorIs(t, err, codec.ErrUnknownVersion) + require.ErrorIs(err, codec.ErrUnknownVersion) } func TestParseValid(t *testing.T) { + require := require.New(t) + chainID := ids.ID{1} height := uint64(2) parentIDs := []ids.ID{{4}, {5}} @@ -29,10 +33,10 @@ func TestParseValid(t *testing.T) { parentIDs, txs, ) - require.NoError(t, err) + require.NoError(err) vtxBytes := vtx.Bytes() parsedVtx, err := Parse(vtxBytes) - require.NoError(t, err) - require.Equal(t, vtx, parsedVtx) + require.NoError(err) + require.Equal(vtx, parsedVtx) } diff --git a/snow/engine/avalanche/vertex/stateless_vertex.go b/snow/engine/avalanche/vertex/stateless_vertex.go index f87996a4352..fb92565f281 100644 --- a/snow/engine/avalanche/vertex/stateless_vertex.go +++ b/snow/engine/avalanche/vertex/stateless_vertex.go @@ -23,7 +23,7 @@ const ( var ( errBadVersion = errors.New("invalid version") errBadEpoch = errors.New("invalid epoch") - errTooManyparentIDs = fmt.Errorf("vertex contains more than %d parentIDs", maxNumParents) + errTooManyParentIDs = fmt.Errorf("vertex contains more than %d parentIDs", maxNumParents) errNoOperations = errors.New("vertex contains no operations") errTooManyTxs = fmt.Errorf("vertex contains more than %d transactions", maxTxsPerVtx) errInvalidParents = errors.New("vertex contains non-sorted or duplicated parentIDs") @@ -115,7 +115,7 @@ func (v innerStatelessVertex) verify() error { case v.Epoch != 0: return errBadEpoch case len(v.ParentIDs) > maxNumParents: - return errTooManyparentIDs + return errTooManyParentIDs case len(v.Txs) == 0: return errNoOperations case len(v.Txs) > maxTxsPerVtx: @@ -136,7 +136,7 @@ func (v innerStatelessVertex) verifyStopVertex() error { case v.Epoch != 0: return errBadEpoch case len(v.ParentIDs) > maxNumParents: - return errTooManyparentIDs + return errTooManyParentIDs case len(v.Txs) != 0: return errTooManyTxs case !utils.IsSortedAndUniqueSortable(v.ParentIDs): diff --git a/snow/engine/avalanche/vertex/stateless_vertex_test.go b/snow/engine/avalanche/vertex/stateless_vertex_test.go index af9819da988..a18a045a95d 100644 --- a/snow/engine/avalanche/vertex/stateless_vertex_test.go +++ b/snow/engine/avalanche/vertex/stateless_vertex_test.go @@ -6,6 +6,8 @@ package vertex import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -24,14 +26,14 @@ func TestVertexVerify(t *testing.T) { } tests := []struct { - name string - vertex StatelessVertex - shouldErr bool + name string + vertex StatelessVertex + expectedErr error }{ { - name: "zero vertex", - vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{}}, - shouldErr: true, + name: "zero vertex", + vertex: statelessVertex{innerStatelessVertex: innerStatelessVertex{}}, + expectedErr: errNoOperations, }, { name: "valid vertex", @@ -43,7 +45,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: [][]byte{{}}, }}, - shouldErr: false, + expectedErr: nil, }, { name: "invalid vertex epoch", @@ -55,7 +57,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: [][]byte{{}}, }}, - shouldErr: true, + expectedErr: errBadEpoch, }, { name: "too many vertex parents", @@ -67,7 +69,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: tooManyParents, Txs: [][]byte{{}}, }}, - shouldErr: true, + expectedErr: errTooManyParentIDs, }, { name: "no vertex txs", @@ -79,7 +81,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: [][]byte{}, }}, - shouldErr: true, + expectedErr: errNoOperations, }, { name: "too many vertex txs", @@ -91,7 +93,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: tooManyTxs, }}, - shouldErr: true, + expectedErr: errTooManyTxs, }, { name: "unsorted vertex parents", @@ -103,7 +105,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{{1}, {0}}, Txs: [][]byte{{}}, }}, - shouldErr: true, + expectedErr: errInvalidParents, }, { name: "unsorted vertex txs", @@ -115,7 +117,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: [][]byte{{0}, {1}}, // note that txs are sorted by their hashes }}, - shouldErr: true, + expectedErr: errInvalidTxs, }, { name: "duplicate vertex parents", @@ -127,7 +129,7 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{{0}, {0}}, Txs: [][]byte{{}}, }}, - shouldErr: true, + expectedErr: errInvalidParents, }, { name: "duplicate vertex txs", @@ -139,17 +141,13 @@ func TestVertexVerify(t *testing.T) { ParentIDs: []ids.ID{}, Txs: [][]byte{{0}, {0}}, // note that txs are sorted by their hashes }}, - shouldErr: true, + expectedErr: errInvalidTxs, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { err := test.vertex.Verify() - if test.shouldErr && err == nil { - t.Fatal("expected verify to return an error but it didn't") - } else if !test.shouldErr && err != nil { - t.Fatalf("expected verify to pass but it returned: %s", err) - } + require.ErrorIs(t, err, test.expectedErr) }) } } diff --git a/snow/engine/avalanche/vertex/test_builder.go b/snow/engine/avalanche/vertex/test_builder.go index f50dea2ae00..c8203138e48 100644 --- a/snow/engine/avalanche/vertex/test_builder.go +++ b/snow/engine/avalanche/vertex/test_builder.go @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" ) @@ -33,7 +35,8 @@ func (b *TestBuilder) BuildStopVtx(ctx context.Context, parentIDs []ids.ID) (ava return b.BuildStopVtxF(ctx, parentIDs) } if b.CantBuildVtx && b.T != nil { - b.T.Fatal(errBuild) + require := require.New(b.T) + require.FailNow(errBuild.Error()) } return nil, errBuild } diff --git a/snow/engine/avalanche/vertex/test_parser.go b/snow/engine/avalanche/vertex/test_parser.go index ef680ee8fa2..56a2fdae2fe 100644 --- a/snow/engine/avalanche/vertex/test_parser.go +++ b/snow/engine/avalanche/vertex/test_parser.go @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/snow/consensus/avalanche" ) @@ -32,7 +34,8 @@ func (p *TestParser) ParseVtx(ctx context.Context, b []byte) (avalanche.Vertex, return p.ParseVtxF(ctx, b) } if p.CantParseVtx && p.T != nil { - p.T.Fatal(errParse) + require := require.New(p.T) + require.FailNow(errParse.Error()) } return nil, errParse } diff --git a/snow/engine/avalanche/vertex/test_storage.go b/snow/engine/avalanche/vertex/test_storage.go index 10403a92001..9fc4b3d2274 100644 --- a/snow/engine/avalanche/vertex/test_storage.go +++ b/snow/engine/avalanche/vertex/test_storage.go @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/avalanche" ) @@ -38,7 +40,8 @@ func (s *TestStorage) GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Verte return s.GetVtxF(ctx, vtxID) } if s.CantGetVtx && s.T != nil { - s.T.Fatal(errGet) + require := require.New(s.T) + require.FailNow(errGet.Error()) } return nil, errGet } @@ -48,7 +51,8 @@ func (s *TestStorage) Edge(ctx context.Context) []ids.ID { return s.EdgeF(ctx) } if s.CantEdge && s.T != nil { - s.T.Fatal(errEdge) + require := require.New(s.T) + require.FailNow(errEdge.Error()) } return nil } @@ -58,7 +62,8 @@ func (s *TestStorage) StopVertexAccepted(ctx context.Context) (bool, error) { return s.StopVertexAcceptedF(ctx) } if s.CantStopVertexAccepted && s.T != nil { - s.T.Fatal(errStopVertexAccepted) + require := require.New(s.T) + require.FailNow(errStopVertexAccepted.Error()) } return false, nil } diff --git a/snow/engine/avalanche/vertex/test_vm.go b/snow/engine/avalanche/vertex/test_vm.go index 576cfba16d8..1a3a346db86 100644 --- a/snow/engine/avalanche/vertex/test_vm.go +++ b/snow/engine/avalanche/vertex/test_vm.go @@ -7,6 +7,8 @@ import ( "context" "errors" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowstorm" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" @@ -43,7 +45,8 @@ func (vm *TestVM) Linearize(ctx context.Context, stopVertexID ids.ID) error { return vm.LinearizeF(ctx, stopVertexID) } if vm.CantLinearize && vm.T != nil { - vm.T.Fatal(errLinearize) + require := require.New(vm.T) + require.FailNow(errLinearize.Error()) } return errLinearize } @@ -53,7 +56,8 @@ func (vm *TestVM) PendingTxs(ctx context.Context) []snowstorm.Tx { return vm.PendingTxsF(ctx) } if vm.CantPendingTxs && vm.T != nil { - vm.T.Fatal(errPending) + require := require.New(vm.T) + require.FailNow(errPending.Error()) } return nil } @@ -63,7 +67,8 @@ func (vm *TestVM) ParseTx(ctx context.Context, b []byte) (snowstorm.Tx, error) { return vm.ParseTxF(ctx, b) } if vm.CantParse && vm.T != nil { - vm.T.Fatal(errParse) + require := require.New(vm.T) + require.FailNow(errParse.Error()) } return nil, errParse } @@ -73,7 +78,8 @@ func (vm *TestVM) GetTx(ctx context.Context, txID ids.ID) (snowstorm.Tx, error) return vm.GetTxF(ctx, txID) } if vm.CantGet && vm.T != nil { - vm.T.Fatal(errGet) + require := require.New(vm.T) + require.FailNow(errGet.Error()) } return nil, errGet } diff --git a/snow/engine/common/queue/jobs_test.go b/snow/engine/common/queue/jobs_test.go index 68bc8a1b7d8..1694bf0cb16 100644 --- a/snow/engine/common/queue/jobs_test.go +++ b/snow/engine/common/queue/jobs_test.go @@ -62,12 +62,8 @@ func TestNew(t *testing.T) { db := memdb.New() jobs, err := New(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(parser)) dbSize, err := database.Size(db) require.NoError(err) @@ -83,12 +79,8 @@ func TestPushAndExecute(t *testing.T) { db := memdb.New() jobs, err := New(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(parser)) jobID := ids.GenerateTestID() job := testJob(t, jobID, nil, ids.Empty, nil) @@ -108,9 +100,7 @@ func TestPushAndExecute(t *testing.T) { jobs, err = New(db, "", prometheus.NewRegistry()) require.NoError(err) - if err := jobs.SetParser(parser); err != nil { - t.Fatal(err) - } + require.NoError(jobs.SetParser(parser)) has, err = jobs.Has(jobID) require.NoError(err) @@ -151,12 +141,8 @@ func TestRemoveDependency(t *testing.T) { db := memdb.New() jobs, err := New(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(parser)) job0ID, executed0 := ids.GenerateTestID(), false job1ID, executed1 := ids.GenerateTestID(), false @@ -217,9 +203,7 @@ func TestDuplicatedExecutablePush(t *testing.T) { db := memdb.New() jobs, err := New(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) jobID := ids.GenerateTestID() job := testJob(t, jobID, nil, ids.Empty, nil) @@ -249,9 +233,7 @@ func TestDuplicatedNotExecutablePush(t *testing.T) { db := memdb.New() jobs, err := New(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) job0ID, executed0 := ids.GenerateTestID(), false job1ID := ids.GenerateTestID() @@ -283,9 +265,7 @@ func TestMissingJobs(t *testing.T) { jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(jobs.SetParser(context.Background(), parser)) job0ID := ids.GenerateTestID() job1ID := ids.GenerateTestID() @@ -313,9 +293,7 @@ func TestMissingJobs(t *testing.T) { jobs, err = NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(jobs.SetParser(context.Background(), parser)) missingIDSet = set.Set[ids.ID]{} missingIDSet.Add(jobs.MissingIDs()...) @@ -334,12 +312,8 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { db := memdb.New() jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(context.Background(), parser)) job0ID, executed0 := ids.GenerateTestID(), false job1ID, executed1 := ids.GenerateTestID(), false @@ -398,12 +372,8 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { // Create jobs queue from the same database and ensure that the jobs queue // recovers correctly. jobs, err = NewWithMissing(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(context.Background(), parser)) missingIDs := jobs.MissingIDs() require.Len(missingIDs, 1) @@ -431,12 +401,8 @@ func TestInitializeNumJobs(t *testing.T) { db := memdb.New() jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(context.Background(), parser)) job0ID := ids.GenerateTestID() job1ID := ids.GenerateTestID() @@ -485,13 +451,13 @@ func TestInitializeNumJobs(t *testing.T) { require.Equal(uint64(2), jobs.state.numJobs) require.NoError(jobs.Commit()) + require.NoError(database.Clear(jobs.state.metadataDB, jobs.state.metadataDB)) + require.NoError(jobs.Commit()) jobs, err = NewWithMissing(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) require.Equal(uint64(2), jobs.state.numJobs) } @@ -502,12 +468,8 @@ func TestClearAll(t *testing.T) { db := memdb.New() jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) - if err != nil { - t.Fatal(err) - } - if err := jobs.SetParser(context.Background(), parser); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(jobs.SetParser(context.Background(), parser)) job0ID, executed0 := ids.GenerateTestID(), false job1ID, executed1 := ids.GenerateTestID(), false job0 := testJob(t, job0ID, &executed0, ids.Empty, nil) diff --git a/snow/engine/common/queue/test_job.go b/snow/engine/common/queue/test_job.go index 09e51855acc..66ff7990011 100644 --- a/snow/engine/common/queue/test_job.go +++ b/snow/engine/common/queue/test_job.go @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" ) @@ -47,7 +49,8 @@ func (j *TestJob) ID() ids.ID { return j.IDF() } if j.CantID && j.T != nil { - j.T.Fatalf("Unexpectedly called ID") + require := require.New(j.T) + require.FailNow("Unexpectedly called ID") } return ids.ID{} } @@ -57,7 +60,8 @@ func (j *TestJob) MissingDependencies(ctx context.Context) (set.Set[ids.ID], err return j.MissingDependenciesF(ctx) } if j.CantMissingDependencies && j.T != nil { - j.T.Fatalf("Unexpectedly called MissingDependencies") + require := require.New(j.T) + require.FailNow("Unexpectedly called MissingDependencies") } return set.Set[ids.ID]{}, nil } @@ -67,7 +71,8 @@ func (j *TestJob) Execute(ctx context.Context) error { return j.ExecuteF(ctx) } if j.CantExecute && j.T != nil { - j.T.Fatal(errExecute) + require := require.New(j.T) + require.FailNow(errExecute.Error()) } return errExecute } @@ -77,7 +82,8 @@ func (j *TestJob) Bytes() []byte { return j.BytesF() } if j.CantBytes && j.T != nil { - j.T.Fatalf("Unexpectedly called Bytes") + require := require.New(j.T) + require.FailNow("Unexpectedly called Bytes") } return nil } @@ -87,7 +93,8 @@ func (j *TestJob) HasMissingDependencies(ctx context.Context) (bool, error) { return j.HasMissingDependenciesF(ctx) } if j.CantHasMissingDependencies && j.T != nil { - j.T.Fatal(errHasMissingDependencies) + require := require.New(j.T) + require.FailNow(errHasMissingDependencies.Error()) } return false, errHasMissingDependencies } diff --git a/snow/engine/common/queue/test_parser.go b/snow/engine/common/queue/test_parser.go index 1e7fa9cdacc..d71f9730396 100644 --- a/snow/engine/common/queue/test_parser.go +++ b/snow/engine/common/queue/test_parser.go @@ -7,6 +7,8 @@ import ( "context" "errors" "testing" + + "github.com/stretchr/testify/require" ) var errParse = errors.New("unexpectedly called Parse") @@ -29,7 +31,8 @@ func (p *TestParser) Parse(ctx context.Context, b []byte) (Job, error) { return p.ParseF(ctx, b) } if p.CantParse && p.T != nil { - p.T.Fatal(errParse) + require := require.New(p.T) + require.FailNow(errParse.Error()) } return nil, errParse } diff --git a/snow/engine/common/requests_test.go b/snow/engine/common/requests_test.go index 02213469c95..2e3bf2d5a6a 100644 --- a/snow/engine/common/requests_test.go +++ b/snow/engine/common/requests_test.go @@ -12,79 +12,61 @@ import ( ) func TestRequests(t *testing.T) { + require := require.New(t) + req := Requests{} - length := req.Len() - require.Zero(t, length, "should have had no outstanding requests") + require.Empty(req) _, removed := req.Remove(ids.EmptyNodeID, 0) - require.False(t, removed, "shouldn't have removed the request") + require.False(removed) removed = req.RemoveAny(ids.Empty) - require.False(t, removed, "shouldn't have removed the request") - - constains := req.Contains(ids.Empty) - require.False(t, constains, "shouldn't contain this request") + require.False(removed) + require.False(req.Contains(ids.Empty)) req.Add(ids.EmptyNodeID, 0, ids.Empty) - - length = req.Len() - require.Equal(t, 1, length, "should have had one outstanding request") + require.Equal(1, req.Len()) _, removed = req.Remove(ids.EmptyNodeID, 1) - require.False(t, removed, "shouldn't have removed the request") + require.False(removed) _, removed = req.Remove(ids.NodeID{1}, 0) - require.False(t, removed, "shouldn't have removed the request") + require.False(removed) - constains = req.Contains(ids.Empty) - require.True(t, constains, "should contain this request") - - length = req.Len() - require.Equal(t, 1, length, "should have had one outstanding request") + require.True(req.Contains(ids.Empty)) + require.Equal(1, req.Len()) req.Add(ids.EmptyNodeID, 10, ids.Empty.Prefix(0)) - - length = req.Len() - require.Equal(t, 2, length, "should have had two outstanding requests") + require.Equal(2, req.Len()) _, removed = req.Remove(ids.EmptyNodeID, 1) - require.False(t, removed, "shouldn't have removed the request") + require.False(removed) _, removed = req.Remove(ids.NodeID{1}, 0) - require.False(t, removed, "shouldn't have removed the request") + require.False(removed) - constains = req.Contains(ids.Empty) - require.True(t, constains, "should contain this request") - - length = req.Len() - require.Equal(t, 2, length, "should have had two outstanding requests") + require.True(req.Contains(ids.Empty)) + require.Equal(2, req.Len()) removedID, removed := req.Remove(ids.EmptyNodeID, 0) - require.Equal(t, ids.Empty, removedID, "should have removed the requested ID") - require.True(t, removed, "should have removed the request") + require.True(removed) + require.Equal(ids.Empty, removedID) removedID, removed = req.Remove(ids.EmptyNodeID, 10) - require.Equal(t, ids.Empty.Prefix(0), removedID, "should have removed the requested ID") - require.True(t, removed, "should have removed the request") + require.True(removed) + require.Equal(ids.Empty.Prefix(0), removedID) - length = req.Len() - require.Zero(t, length, "should have had no outstanding requests") + require.Zero(req.Len()) req.Add(ids.EmptyNodeID, 0, ids.Empty) - - length = req.Len() - require.Equal(t, 1, length, "should have had one outstanding request") + require.Equal(1, req.Len()) removed = req.RemoveAny(ids.Empty) - require.True(t, removed, "should have removed the request") - - length = req.Len() - require.Zero(t, length, "should have had no outstanding requests") + require.True(removed) + require.Zero(req.Len()) removed = req.RemoveAny(ids.Empty) - require.False(t, removed, "shouldn't have removed the request") - - length = req.Len() - require.Zero(t, length, "should have had no outstanding requests") + require.False(removed) + require.Zero(req.Len()) } diff --git a/snow/engine/common/test_bootstrap_tracker.go b/snow/engine/common/test_bootstrap_tracker.go index 5c5ec4d7ebc..3e6670cb40b 100644 --- a/snow/engine/common/test_bootstrap_tracker.go +++ b/snow/engine/common/test_bootstrap_tracker.go @@ -6,6 +6,8 @@ package common import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -36,7 +38,8 @@ func (s *BootstrapTrackerTest) IsBootstrapped() bool { return s.IsBootstrappedF() } if s.CantIsBootstrapped && s.T != nil { - s.T.Fatalf("Unexpectedly called IsBootstrapped") + require := require.New(s.T) + require.FailNow("Unexpectedly called IsBootstrapped") } return false } @@ -48,7 +51,8 @@ func (s *BootstrapTrackerTest) Bootstrapped(chainID ids.ID) { if s.BootstrappedF != nil { s.BootstrappedF(chainID) } else if s.CantBootstrapped && s.T != nil { - s.T.Fatalf("Unexpectedly called Bootstrapped") + require := require.New(s.T) + require.FailNow("Unexpectedly called Bootstrapped") } } @@ -56,7 +60,8 @@ func (s *BootstrapTrackerTest) OnBootstrapCompleted() chan struct{} { if s.OnBootstrapCompletedF != nil { return s.OnBootstrapCompletedF() } else if s.CantOnBootstrapCompleted && s.T != nil { - s.T.Fatalf("Unexpectedly called OnBootstrapCompleted") + require := require.New(s.T) + require.FailNow("Unexpectedly called OnBootstrapCompleted") } return nil } diff --git a/snow/engine/common/test_bootstrapable.go b/snow/engine/common/test_bootstrapable.go index ddc67b48b17..c9829ab793d 100644 --- a/snow/engine/common/test_bootstrapable.go +++ b/snow/engine/common/test_bootstrapable.go @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -36,23 +38,19 @@ func (b *BootstrapableTest) Default(cant bool) { func (b *BootstrapableTest) Clear() error { if b.ClearF != nil { return b.ClearF() - } else if b.CantClear { - if b.T != nil { - b.T.Fatalf("Unexpectedly called Clear") - } - return errClear + } else if b.CantClear && b.T != nil { + require := require.New(b.T) + require.FailNow(errClear.Error()) } - return nil + return errClear } func (b *BootstrapableTest) ForceAccepted(ctx context.Context, containerIDs []ids.ID) error { if b.ForceAcceptedF != nil { return b.ForceAcceptedF(ctx, containerIDs) - } else if b.CantForceAccepted { - if b.T != nil { - b.T.Fatalf("Unexpectedly called ForceAccepted") - } - return errForceAccepted + } else if b.CantForceAccepted && b.T != nil { + require := require.New(b.T) + require.FailNow(errForceAccepted.Error()) } - return nil + return errForceAccepted } diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go index 6645efad33e..e9f51b4652b 100644 --- a/snow/engine/common/test_engine.go +++ b/snow/engine/common/test_engine.go @@ -9,6 +9,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/version" @@ -187,7 +189,8 @@ func (e *EngineTest) Start(ctx context.Context, startReqID uint32) error { return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called Start") + require := require.New(e.T) + require.FailNow(errStart.Error()) } return errStart } @@ -200,7 +203,8 @@ func (e *EngineTest) Context() *snow.ConsensusContext { return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called Context") + require := require.New(e.T) + require.FailNow("Unexpectedly called Context") } return nil } @@ -213,7 +217,8 @@ func (e *EngineTest) Timeout(ctx context.Context) error { return nil } if e.T != nil { - e.T.Fatal(errTimeout) + require := require.New(e.T) + require.FailNow(errTimeout.Error()) } return errTimeout } @@ -226,7 +231,8 @@ func (e *EngineTest) Gossip(ctx context.Context) error { return nil } if e.T != nil { - e.T.Fatal(errGossip) + require := require.New(e.T) + require.FailNow(errGossip.Error()) } return errGossip } @@ -236,8 +242,12 @@ func (e *EngineTest) Halt(ctx context.Context) { e.HaltF(ctx) return } - if e.CantHalt && e.T != nil { - e.T.Fatalf("Unexpectedly called Halt") + if !e.CantHalt { + return + } + if e.T != nil { + require := require.New(e.T) + require.FailNow("Unexpectedly called Halt") } } @@ -249,7 +259,8 @@ func (e *EngineTest) Shutdown(ctx context.Context) error { return nil } if e.T != nil { - e.T.Fatal(errShutdown) + require := require.New(e.T) + require.FailNow(errShutdown.Error()) } return errShutdown } @@ -262,7 +273,8 @@ func (e *EngineTest) Notify(ctx context.Context, msg Message) error { return nil } if e.T != nil { - e.T.Fatal(errNotify) + require := require.New(e.T) + require.FailNow(errNotify.Error()) } return errNotify } @@ -275,7 +287,8 @@ func (e *EngineTest) GetStateSummaryFrontier(ctx context.Context, validatorID id return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called GetStateSummaryFrontier") + require := require.New(e.T) + require.FailNow(errGetStateSummaryFrontier.Error()) } return errGetStateSummaryFrontier } @@ -288,7 +301,8 @@ func (e *EngineTest) StateSummaryFrontier(ctx context.Context, validatorID ids.N return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called CantStateSummaryFrontier") + require := require.New(e.T) + require.FailNow(errStateSummaryFrontier.Error()) } return errStateSummaryFrontier } @@ -301,7 +315,8 @@ func (e *EngineTest) GetStateSummaryFrontierFailed(ctx context.Context, validato return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called GetStateSummaryFrontierFailed") + require := require.New(e.T) + require.FailNow(errGetStateSummaryFrontierFailed.Error()) } return errGetStateSummaryFrontierFailed } @@ -314,7 +329,8 @@ func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID id return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called GetAcceptedStateSummary") + require := require.New(e.T) + require.FailNow(errGetAcceptedStateSummary.Error()) } return errGetAcceptedStateSummary } @@ -327,7 +343,8 @@ func (e *EngineTest) AcceptedStateSummary(ctx context.Context, validatorID ids.N return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called AcceptedStateSummary") + require := require.New(e.T) + require.FailNow(errAcceptedStateSummary.Error()) } return errAcceptedStateSummary } @@ -340,7 +357,8 @@ func (e *EngineTest) GetAcceptedStateSummaryFailed(ctx context.Context, validato return nil } if e.T != nil { - e.T.Fatalf("Unexpectedly called GetAcceptedStateSummaryFailed") + require := require.New(e.T) + require.FailNow(errGetAcceptedStateSummaryFailed.Error()) } return errGetAcceptedStateSummaryFailed } @@ -353,7 +371,8 @@ func (e *EngineTest) GetAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, return nil } if e.T != nil { - e.T.Fatal(errGetAcceptedFrontier) + require := require.New(e.T) + require.FailNow(errGetAcceptedFrontier.Error()) } return errGetAcceptedFrontier } @@ -366,7 +385,8 @@ func (e *EngineTest) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.N return nil } if e.T != nil { - e.T.Fatal(errGetAcceptedFrontierFailed) + require := require.New(e.T) + require.FailNow(errGetAcceptedFrontierFailed.Error()) } return errGetAcceptedFrontierFailed } @@ -379,7 +399,8 @@ func (e *EngineTest) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, re return nil } if e.T != nil { - e.T.Fatal(errAcceptedFrontier) + require := require.New(e.T) + require.FailNow(errAcceptedFrontier.Error()) } return errAcceptedFrontier } @@ -392,7 +413,8 @@ func (e *EngineTest) GetAccepted(ctx context.Context, nodeID ids.NodeID, request return nil } if e.T != nil { - e.T.Fatal(errGetAccepted) + require := require.New(e.T) + require.FailNow(errGetAccepted.Error()) } return errGetAccepted } @@ -405,7 +427,8 @@ func (e *EngineTest) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, r return nil } if e.T != nil { - e.T.Fatal(errGetAcceptedFailed) + require := require.New(e.T) + require.FailNow(errGetAcceptedFailed.Error()) } return errGetAcceptedFailed } @@ -418,7 +441,8 @@ func (e *EngineTest) Accepted(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - e.T.Fatal(errAccepted) + require := require.New(e.T) + require.FailNow(errAccepted.Error()) } return errAccepted } @@ -431,7 +455,8 @@ func (e *EngineTest) Get(ctx context.Context, nodeID ids.NodeID, requestID uint3 return nil } if e.T != nil { - e.T.Fatal(errGet) + require := require.New(e.T) + require.FailNow(errGet.Error()) } return errGet } @@ -444,7 +469,8 @@ func (e *EngineTest) GetAncestors(ctx context.Context, nodeID ids.NodeID, reques return nil } if e.T != nil { - e.T.Fatal(errGetAncestors) + require := require.New(e.T) + require.FailNow(errGetAncestors.Error()) } return errGetAncestors } @@ -457,7 +483,8 @@ func (e *EngineTest) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - e.T.Fatal(errGetFailed) + require := require.New(e.T) + require.FailNow(errGetFailed.Error()) } return errGetFailed } @@ -466,11 +493,12 @@ func (e *EngineTest) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, if e.GetAncestorsFailedF != nil { return e.GetAncestorsFailedF(ctx, nodeID, requestID) } - if e.CantGetAncestorsFailed { + if !e.CantGetAncestorsFailed { return nil } if e.T != nil { - e.T.Fatal(errGetAncestorsFailed) + require := require.New(e.T) + require.FailNow(errGetAncestorsFailed.Error()) } return errGetAncestorsFailed } @@ -483,7 +511,8 @@ func (e *EngineTest) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 return nil } if e.T != nil { - e.T.Fatal(errPut) + require := require.New(e.T) + require.FailNow(errPut.Error()) } return errPut } @@ -496,7 +525,8 @@ func (e *EngineTest) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - e.T.Fatal(errAncestors) + require := require.New(e.T) + require.FailNow(errAncestors.Error()) } return errAncestors } @@ -509,7 +539,8 @@ func (e *EngineTest) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - e.T.Fatal(errPushQuery) + require := require.New(e.T) + require.FailNow(errPushQuery.Error()) } return errPushQuery } @@ -522,7 +553,8 @@ func (e *EngineTest) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - e.T.Fatal(errPullQuery) + require := require.New(e.T) + require.FailNow(errPullQuery.Error()) } return errPullQuery } @@ -535,7 +567,8 @@ func (e *EngineTest) QueryFailed(ctx context.Context, nodeID ids.NodeID, request return nil } if e.T != nil { - e.T.Fatal(errQueryFailed) + require := require.New(e.T) + require.FailNow(errQueryFailed.Error()) } return errQueryFailed } @@ -548,7 +581,8 @@ func (e *EngineTest) CrossChainAppRequest(ctx context.Context, chainID ids.ID, r return nil } if e.T != nil { - e.T.Fatal(errCrossChainAppRequest) + require := require.New(e.T) + require.FailNow(errCrossChainAppRequest.Error()) } return errCrossChainAppRequest } @@ -561,7 +595,8 @@ func (e *EngineTest) CrossChainAppRequestFailed(ctx context.Context, chainID ids return nil } if e.T != nil { - e.T.Fatal(errCrossChainAppRequestFailed) + require := require.New(e.T) + require.FailNow(errCrossChainAppRequestFailed.Error()) } return errCrossChainAppRequestFailed } @@ -574,7 +609,8 @@ func (e *EngineTest) CrossChainAppResponse(ctx context.Context, chainID ids.ID, return nil } if e.T != nil { - e.T.Fatal(errCrossChainAppResponse) + require := require.New(e.T) + require.FailNow(errCrossChainAppResponse.Error()) } return errCrossChainAppResponse } @@ -587,7 +623,8 @@ func (e *EngineTest) AppRequest(ctx context.Context, nodeID ids.NodeID, requestI return nil } if e.T != nil { - e.T.Fatal(errAppRequest) + require := require.New(e.T) + require.FailNow(errAppRequest.Error()) } return errAppRequest } @@ -600,7 +637,8 @@ func (e *EngineTest) AppResponse(ctx context.Context, nodeID ids.NodeID, request return nil } if e.T != nil { - e.T.Fatal(errAppResponse) + require := require.New(e.T) + require.FailNow(errAppResponse.Error()) } return errAppResponse } @@ -613,7 +651,8 @@ func (e *EngineTest) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, re return nil } if e.T != nil { - e.T.Fatal(errAppRequestFailed) + require := require.New(e.T) + require.FailNow(errAppRequestFailed.Error()) } return errAppRequestFailed } @@ -626,7 +665,8 @@ func (e *EngineTest) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byt return nil } if e.T != nil { - e.T.Fatal(errAppGossip) + require := require.New(e.T) + require.FailNow(errAppGossip.Error()) } return errAppGossip } @@ -639,7 +679,8 @@ func (e *EngineTest) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin return nil } if e.T != nil { - e.T.Fatal(errChits) + require := require.New(e.T) + require.FailNow(errChits.Error()) } return errChits } @@ -652,7 +693,8 @@ func (e *EngineTest) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersi return nil } if e.T != nil { - e.T.Fatal(errConnected) + require := require.New(e.T) + require.FailNow(errConnected.Error()) } return errConnected } @@ -665,7 +707,8 @@ func (e *EngineTest) Disconnected(ctx context.Context, nodeID ids.NodeID) error return nil } if e.T != nil { - e.T.Fatal(errDisconnected) + require := require.New(e.T) + require.FailNow(errDisconnected.Error()) } return errDisconnected } @@ -678,7 +721,8 @@ func (e *EngineTest) HealthCheck(ctx context.Context) (interface{}, error) { return nil, nil } if e.T != nil { - e.T.Fatal(errHealthCheck) + require := require.New(e.T) + require.FailNow(errHealthCheck.Error()) } return nil, errHealthCheck } @@ -687,8 +731,12 @@ func (e *EngineTest) GetVM() VM { if e.GetVMF != nil { return e.GetVMF() } - if e.CantGetVM && e.T != nil { - e.T.Fatalf("Unexpectedly called GetVM") + if !e.CantGetVM { + return nil + } + if e.T != nil { + require := require.New(e.T) + require.FailNow("Unexpectedly called GetVM") } return nil } diff --git a/snow/engine/common/test_sender.go b/snow/engine/common/test_sender.go index 0a32dcc00ea..79fda55cc9e 100644 --- a/snow/engine/common/test_sender.go +++ b/snow/engine/common/test_sender.go @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils/set" @@ -101,7 +103,8 @@ func (s *SenderTest) Accept(ctx *snow.ConsensusContext, containerID ids.ID, cont return nil } if s.T != nil { - s.T.Fatal(errAccept) + require := require.New(s.T) + require.FailNow(errAccept.Error()) } return errAccept } @@ -113,7 +116,8 @@ func (s *SenderTest) SendGetStateSummaryFrontier(ctx context.Context, validatorI if s.SendGetStateSummaryFrontierF != nil { s.SendGetStateSummaryFrontierF(ctx, validatorIDs, requestID) } else if s.CantSendGetStateSummaryFrontier && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGetStateSummaryFrontier") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendGetStateSummaryFrontier") } } @@ -124,7 +128,8 @@ func (s *SenderTest) SendStateSummaryFrontier(ctx context.Context, validatorID i if s.SendStateSummaryFrontierF != nil { s.SendStateSummaryFrontierF(ctx, validatorID, requestID, summary) } else if s.CantSendStateSummaryFrontier && s.T != nil { - s.T.Fatalf("Unexpectedly called SendStateSummaryFrontier") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendStateSummaryFrontier") } } @@ -135,7 +140,8 @@ func (s *SenderTest) SendGetAcceptedStateSummary(ctx context.Context, nodeIDs se if s.SendGetAcceptedStateSummaryF != nil { s.SendGetAcceptedStateSummaryF(ctx, nodeIDs, requestID, heights) } else if s.CantSendGetAcceptedStateSummary && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGetAcceptedStateSummaryF") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendGetAcceptedStateSummaryF") } } @@ -146,7 +152,8 @@ func (s *SenderTest) SendAcceptedStateSummary(ctx context.Context, validatorID i if s.SendAcceptedStateSummaryF != nil { s.SendAcceptedStateSummaryF(ctx, validatorID, requestID, summaryIDs) } else if s.CantSendAcceptedStateSummary && s.T != nil { - s.T.Fatalf("Unexpectedly called SendAcceptedStateSummary") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendAcceptedStateSummary") } } @@ -157,7 +164,8 @@ func (s *SenderTest) SendGetAcceptedFrontier(ctx context.Context, validatorIDs s if s.SendGetAcceptedFrontierF != nil { s.SendGetAcceptedFrontierF(ctx, validatorIDs, requestID) } else if s.CantSendGetAcceptedFrontier && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGetAcceptedFrontier") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendGetAcceptedFrontier") } } @@ -168,7 +176,8 @@ func (s *SenderTest) SendAcceptedFrontier(ctx context.Context, validatorID ids.N if s.SendAcceptedFrontierF != nil { s.SendAcceptedFrontierF(ctx, validatorID, requestID, containerIDs) } else if s.CantSendAcceptedFrontier && s.T != nil { - s.T.Fatalf("Unexpectedly called SendAcceptedFrontier") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendAcceptedFrontier") } } @@ -179,7 +188,8 @@ func (s *SenderTest) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.No if s.SendGetAcceptedF != nil { s.SendGetAcceptedF(ctx, nodeIDs, requestID, containerIDs) } else if s.CantSendGetAccepted && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGetAccepted") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendGetAccepted") } } @@ -190,7 +200,8 @@ func (s *SenderTest) SendAccepted(ctx context.Context, validatorID ids.NodeID, r if s.SendAcceptedF != nil { s.SendAcceptedF(ctx, validatorID, requestID, containerIDs) } else if s.CantSendAccepted && s.T != nil { - s.T.Fatalf("Unexpectedly called SendAccepted") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendAccepted") } } @@ -201,7 +212,8 @@ func (s *SenderTest) SendGet(ctx context.Context, vdr ids.NodeID, requestID uint if s.SendGetF != nil { s.SendGetF(ctx, vdr, requestID, vtxID) } else if s.CantSendGet && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGet") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendGet") } } @@ -212,7 +224,8 @@ func (s *SenderTest) SendGetAncestors(ctx context.Context, validatorID ids.NodeI if s.SendGetAncestorsF != nil { s.SendGetAncestorsF(ctx, validatorID, requestID, vtxID) } else if s.CantSendGetAncestors && s.T != nil { - s.T.Fatalf("Unexpectedly called SendCantSendGetAncestors") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendCantSendGetAncestors") } } @@ -223,7 +236,8 @@ func (s *SenderTest) SendPut(ctx context.Context, vdr ids.NodeID, requestID uint if s.SendPutF != nil { s.SendPutF(ctx, vdr, requestID, vtx) } else if s.CantSendPut && s.T != nil { - s.T.Fatalf("Unexpectedly called SendPut") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendPut") } } @@ -234,7 +248,8 @@ func (s *SenderTest) SendAncestors(ctx context.Context, vdr ids.NodeID, requestI if s.SendAncestorsF != nil { s.SendAncestorsF(ctx, vdr, requestID, vtxs) } else if s.CantSendAncestors && s.T != nil { - s.T.Fatalf("Unexpectedly called SendAncestors") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendAncestors") } } @@ -245,7 +260,8 @@ func (s *SenderTest) SendPushQuery(ctx context.Context, vdrs set.Set[ids.NodeID] if s.SendPushQueryF != nil { s.SendPushQueryF(ctx, vdrs, requestID, vtx) } else if s.CantSendPushQuery && s.T != nil { - s.T.Fatalf("Unexpectedly called SendPushQuery") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendPushQuery") } } @@ -256,7 +272,8 @@ func (s *SenderTest) SendPullQuery(ctx context.Context, vdrs set.Set[ids.NodeID] if s.SendPullQueryF != nil { s.SendPullQueryF(ctx, vdrs, requestID, vtxID) } else if s.CantSendPullQuery && s.T != nil { - s.T.Fatalf("Unexpectedly called SendPullQuery") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendPullQuery") } } @@ -267,7 +284,8 @@ func (s *SenderTest) SendChits(ctx context.Context, vdr ids.NodeID, requestID ui if s.SendChitsF != nil { s.SendChitsF(ctx, vdr, requestID, votes, accepted) } else if s.CantSendChits && s.T != nil { - s.T.Fatalf("Unexpectedly called SendChits") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendChits") } } @@ -278,7 +296,8 @@ func (s *SenderTest) SendGossip(ctx context.Context, container []byte) { if s.SendGossipF != nil { s.SendGossipF(ctx, container) } else if s.CantSendGossip && s.T != nil { - s.T.Fatalf("Unexpectedly called SendGossip") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendGossip") } } @@ -286,7 +305,8 @@ func (s *SenderTest) SendCrossChainAppRequest(ctx context.Context, chainID ids.I if s.SendCrossChainAppRequestF != nil { s.SendCrossChainAppRequestF(ctx, chainID, requestID, appRequestBytes) } else if s.CantSendCrossChainAppRequest && s.T != nil { - s.T.Fatal("Unexpectedly called SendCrossChainAppRequest") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendCrossChainAppRequest") } return nil } @@ -295,7 +315,8 @@ func (s *SenderTest) SendCrossChainAppResponse(ctx context.Context, chainID ids. if s.SendCrossChainAppResponseF != nil { s.SendCrossChainAppResponseF(ctx, chainID, requestID, appResponseBytes) } else if s.CantSendCrossChainAppResponse && s.T != nil { - s.T.Fatal("Unexpectedly called SendCrossChainAppResponse") + require := require.New(s.T) + require.FailNow("Unexpectedly called SendCrossChainAppResponse") } return nil } @@ -308,7 +329,8 @@ func (s *SenderTest) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.Nod case s.SendAppRequestF != nil: return s.SendAppRequestF(ctx, nodeIDs, requestID, appRequestBytes) case s.CantSendAppRequest && s.T != nil: - s.T.Fatal(errSendAppRequest) + require := require.New(s.T) + require.FailNow(errSendAppRequest.Error()) } return errSendAppRequest } @@ -321,7 +343,8 @@ func (s *SenderTest) SendAppResponse(ctx context.Context, nodeID ids.NodeID, req case s.SendAppResponseF != nil: return s.SendAppResponseF(ctx, nodeID, requestID, appResponseBytes) case s.CantSendAppResponse && s.T != nil: - s.T.Fatal(errSendAppResponse) + require := require.New(s.T) + require.FailNow(errSendAppResponse.Error()) } return errSendAppResponse } @@ -334,7 +357,8 @@ func (s *SenderTest) SendAppGossip(ctx context.Context, appGossipBytes []byte) e case s.SendAppGossipF != nil: return s.SendAppGossipF(ctx, appGossipBytes) case s.CantSendAppGossip && s.T != nil: - s.T.Fatal(errSendAppGossip) + require := require.New(s.T) + require.FailNow(errSendAppGossip.Error()) } return errSendAppGossip } @@ -347,7 +371,8 @@ func (s *SenderTest) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ case s.SendAppGossipSpecificF != nil: return s.SendAppGossipSpecificF(ctx, nodeIDs, appGossipBytes) case s.CantSendAppGossipSpecific && s.T != nil: - s.T.Fatal(errSendAppGossipSpecific) + require := require.New(s.T) + require.FailNow(errSendAppGossipSpecific.Error()) } return errSendAppGossipSpecific } diff --git a/snow/engine/common/test_timer.go b/snow/engine/common/test_timer.go index a563e65c575..bca461caf02 100644 --- a/snow/engine/common/test_timer.go +++ b/snow/engine/common/test_timer.go @@ -6,6 +6,8 @@ package common import ( "testing" "time" + + "github.com/stretchr/testify/require" ) var _ Timer = (*TimerTest)(nil) @@ -28,6 +30,7 @@ func (t *TimerTest) RegisterTimeout(delay time.Duration) { if t.RegisterTimeoutF != nil { t.RegisterTimeoutF(delay) } else if t.CantRegisterTimout && t.T != nil { - t.T.Fatalf("Unexpectedly called RegisterTimeout") + require := require.New(t.T) + require.FailNow("Unexpectedly called RegisterTimeout") } } diff --git a/snow/engine/common/test_vm.go b/snow/engine/common/test_vm.go index bbf10d4d0ec..aeeb3c0b845 100644 --- a/snow/engine/common/test_vm.go +++ b/snow/engine/common/test_vm.go @@ -9,11 +9,12 @@ import ( "testing" "time" - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/version" + "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/database/manager" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/version" ) var ( @@ -109,7 +110,8 @@ func (vm *TestVM) Initialize( ) } if vm.CantInitialize && vm.T != nil { - vm.T.Fatal(errInitialize) + require := require.New(vm.T) + require.FailNow(errInitialize.Error()) } return errInitialize } @@ -120,7 +122,8 @@ func (vm *TestVM) SetState(ctx context.Context, state snow.State) error { } if vm.CantSetState { if vm.T != nil { - vm.T.Fatal(errSetState) + require := require.New(vm.T) + require.FailNow(errSetState.Error()) } return errSetState } @@ -133,7 +136,8 @@ func (vm *TestVM) Shutdown(ctx context.Context) error { } if vm.CantShutdown { if vm.T != nil { - vm.T.Fatal(errShutdown) + require := require.New(vm.T) + require.FailNow(errShutdown.Error()) } return errShutdown } @@ -145,7 +149,8 @@ func (vm *TestVM) CreateHandlers(ctx context.Context) (map[string]*HTTPHandler, return vm.CreateHandlersF(ctx) } if vm.CantCreateHandlers && vm.T != nil { - vm.T.Fatal(errCreateHandlers) + require := require.New(vm.T) + require.FailNow(errCreateHandlers.Error()) } return nil, nil } @@ -155,7 +160,8 @@ func (vm *TestVM) CreateStaticHandlers(ctx context.Context) (map[string]*HTTPHan return vm.CreateStaticHandlersF(ctx) } if vm.CantCreateStaticHandlers && vm.T != nil { - vm.T.Fatal(errCreateStaticHandlers) + require := require.New(vm.T) + require.FailNow(errCreateStaticHandlers.Error()) } return nil, nil } @@ -165,7 +171,8 @@ func (vm *TestVM) HealthCheck(ctx context.Context) (interface{}, error) { return vm.HealthCheckF(ctx) } if vm.CantHealthCheck && vm.T != nil { - vm.T.Fatal(errHealthCheck) + require := require.New(vm.T) + require.FailNow(errHealthCheck.Error()) } return nil, errHealthCheck } @@ -178,7 +185,8 @@ func (vm *TestVM) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID u return nil } if vm.T != nil { - vm.T.Fatal(errAppRequest) + require := require.New(vm.T) + require.FailNow(errAppRequest.Error()) } return errAppRequest } @@ -191,7 +199,8 @@ func (vm *TestVM) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, reque return nil } if vm.T != nil { - vm.T.Fatal(errAppRequestFailed) + require := require.New(vm.T) + require.FailNow(errAppRequestFailed.Error()) } return errAppRequestFailed } @@ -204,7 +213,8 @@ func (vm *TestVM) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID return nil } if vm.T != nil { - vm.T.Fatal(errAppResponse) + require := require.New(vm.T) + require.FailNow(errAppResponse.Error()) } return errAppResponse } @@ -217,7 +227,8 @@ func (vm *TestVM) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) return nil } if vm.T != nil { - vm.T.Fatal(errAppGossip) + require := require.New(vm.T) + require.FailNow(errAppGossip.Error()) } return errAppGossip } @@ -230,7 +241,8 @@ func (vm *TestVM) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requ return nil } if vm.T != nil { - vm.T.Fatal(errCrossChainAppRequest) + require := require.New(vm.T) + require.FailNow(errCrossChainAppRequest.Error()) } return errCrossChainAppRequest } @@ -243,7 +255,8 @@ func (vm *TestVM) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID return nil } if vm.T != nil { - vm.T.Fatal(errCrossChainAppRequestFailed) + require := require.New(vm.T) + require.FailNow(errCrossChainAppRequestFailed.Error()) } return errCrossChainAppRequestFailed } @@ -256,7 +269,8 @@ func (vm *TestVM) CrossChainAppResponse(ctx context.Context, chainID ids.ID, req return nil } if vm.T != nil { - vm.T.Fatal(errCrossChainAppResponse) + require := require.New(vm.T) + require.FailNow(errCrossChainAppResponse.Error()) } return errCrossChainAppResponse } @@ -266,7 +280,8 @@ func (vm *TestVM) Connected(ctx context.Context, id ids.NodeID, nodeVersion *ver return vm.ConnectedF(ctx, id, nodeVersion) } if vm.CantConnected && vm.T != nil { - vm.T.Fatal(errConnected) + require := require.New(vm.T) + require.FailNow(errConnected.Error()) } return nil } @@ -276,7 +291,8 @@ func (vm *TestVM) Disconnected(ctx context.Context, id ids.NodeID) error { return vm.DisconnectedF(ctx, id) } if vm.CantDisconnected && vm.T != nil { - vm.T.Fatal(errDisconnected) + require := require.New(vm.T) + require.FailNow(errDisconnected.Error()) } return nil } @@ -286,7 +302,8 @@ func (vm *TestVM) Version(ctx context.Context) (string, error) { return vm.VersionF(ctx) } if vm.CantVersion && vm.T != nil { - vm.T.Fatal(errVersion) + require := require.New(vm.T) + require.FailNow(errVersion.Error()) } return "", nil } diff --git a/snow/engine/snowman/block/batched_vm_test.go b/snow/engine/snowman/block/batched_vm_test.go index 553490b00cb..6f56830db43 100644 --- a/snow/engine/snowman/block/batched_vm_test.go +++ b/snow/engine/snowman/block/batched_vm_test.go @@ -19,27 +19,31 @@ import ( var errTest = errors.New("non-nil error") func TestGetAncestorsDatabaseNotFound(t *testing.T) { + require := require.New(t) + vm := &TestVM{} someID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - require.Equal(t, someID, id) + require.Equal(someID, id) return nil, database.ErrNotFound } containers, err := GetAncestors(context.Background(), vm, someID, 10, 10, 1*time.Second) - require.NoError(t, err) - require.Empty(t, containers) + require.NoError(err) + require.Empty(containers) } // TestGetAncestorsPropagatesErrors checks errors other than // database.ErrNotFound propagate to caller. func TestGetAncestorsPropagatesErrors(t *testing.T) { + require := require.New(t) + vm := &TestVM{} someID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - require.Equal(t, someID, id) + require.Equal(someID, id) return nil, errTest } containers, err := GetAncestors(context.Background(), vm, someID, 10, 10, 1*time.Second) - require.Nil(t, containers) - require.ErrorIs(t, err, errTest) + require.Nil(containers) + require.ErrorIs(err, errTest) } diff --git a/snow/engine/snowman/block/test_batched_vm.go b/snow/engine/snowman/block/test_batched_vm.go index f5a94bb9c4d..46627c68332 100644 --- a/snow/engine/snowman/block/test_batched_vm.go +++ b/snow/engine/snowman/block/test_batched_vm.go @@ -9,6 +9,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" ) @@ -63,7 +65,8 @@ func (vm *TestBatchedVM) GetAncestors( ) } if vm.CantGetAncestors && vm.T != nil { - vm.T.Fatal(errGetAncestor) + require := require.New(vm.T) + require.FailNow(errGetAncestor.Error()) } return nil, errGetAncestor } @@ -76,7 +79,8 @@ func (vm *TestBatchedVM) BatchedParseBlock( return vm.BatchedParseBlockF(ctx, blks) } if vm.CantBatchParseBlock && vm.T != nil { - vm.T.Fatal(errBatchedParseBlock) + require := require.New(vm.T) + require.FailNow(errBatchedParseBlock.Error()) } return nil, errBatchedParseBlock } diff --git a/snow/engine/snowman/block/test_height_indexed_vm.go b/snow/engine/snowman/block/test_height_indexed_vm.go index c1587a72996..3d72cf4cc2c 100644 --- a/snow/engine/snowman/block/test_height_indexed_vm.go +++ b/snow/engine/snowman/block/test_height_indexed_vm.go @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -34,7 +36,8 @@ func (vm *TestHeightIndexedVM) VerifyHeightIndex(ctx context.Context) error { return vm.VerifyHeightIndexF(ctx) } if vm.CantVerifyHeightIndex && vm.T != nil { - vm.T.Fatal(errVerifyHeightIndex) + require := require.New(vm.T) + require.FailNow(errVerifyHeightIndex.Error()) } return errVerifyHeightIndex } @@ -44,7 +47,8 @@ func (vm *TestHeightIndexedVM) GetBlockIDAtHeight(ctx context.Context, height ui return vm.GetBlockIDAtHeightF(ctx, height) } if vm.CantGetBlockIDAtHeight && vm.T != nil { - vm.T.Fatal(errGetAncestor) + require := require.New(vm.T) + require.FailNow(errGetAncestor.Error()) } return ids.Empty, errGetBlockIDAtHeight } diff --git a/snow/engine/snowman/block/test_state_summary.go b/snow/engine/snowman/block/test_state_summary.go index 26cd9fccba8..fb37c7b94be 100644 --- a/snow/engine/snowman/block/test_state_summary.go +++ b/snow/engine/snowman/block/test_state_summary.go @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -44,7 +46,8 @@ func (s *TestStateSummary) Accept(ctx context.Context) (StateSyncMode, error) { return s.AcceptF(ctx) } if s.CantAccept && s.T != nil { - s.T.Fatal(errAccept) + require := require.New(s.T) + require.FailNow(errAccept.Error()) } return StateSyncSkipped, errAccept } diff --git a/snow/engine/snowman/block/test_state_syncable_vm.go b/snow/engine/snowman/block/test_state_syncable_vm.go index 60e179e5e63..ce02dc08a59 100644 --- a/snow/engine/snowman/block/test_state_syncable_vm.go +++ b/snow/engine/snowman/block/test_state_syncable_vm.go @@ -7,6 +7,8 @@ import ( "context" "errors" "testing" + + "github.com/stretchr/testify/require" ) var ( @@ -40,7 +42,8 @@ func (vm *TestStateSyncableVM) StateSyncEnabled(ctx context.Context) (bool, erro return vm.StateSyncEnabledF(ctx) } if vm.CantStateSyncEnabled && vm.T != nil { - vm.T.Fatal(errStateSyncEnabled) + require := require.New(vm.T) + require.FailNow(errStateSyncEnabled.Error()) } return false, errStateSyncEnabled } @@ -50,7 +53,8 @@ func (vm *TestStateSyncableVM) GetOngoingSyncStateSummary(ctx context.Context) ( return vm.GetOngoingSyncStateSummaryF(ctx) } if vm.CantStateSyncGetOngoingSummary && vm.T != nil { - vm.T.Fatal(errStateSyncGetOngoingSummary) + require := require.New(vm.T) + require.FailNow(errStateSyncGetOngoingSummary.Error()) } return nil, errStateSyncGetOngoingSummary } @@ -60,7 +64,8 @@ func (vm *TestStateSyncableVM) GetLastStateSummary(ctx context.Context) (StateSu return vm.GetLastStateSummaryF(ctx) } if vm.CantGetLastStateSummary && vm.T != nil { - vm.T.Fatal(errGetLastStateSummary) + require := require.New(vm.T) + require.FailNow(errGetLastStateSummary.Error()) } return nil, errGetLastStateSummary } @@ -70,7 +75,8 @@ func (vm *TestStateSyncableVM) ParseStateSummary(ctx context.Context, summaryByt return vm.ParseStateSummaryF(ctx, summaryBytes) } if vm.CantParseStateSummary && vm.T != nil { - vm.T.Fatal(errParseStateSummary) + require := require.New(vm.T) + require.FailNow(errParseStateSummary.Error()) } return nil, errParseStateSummary } @@ -80,7 +86,8 @@ func (vm *TestStateSyncableVM) GetStateSummary(ctx context.Context, summaryHeigh return vm.GetStateSummaryF(ctx, summaryHeight) } if vm.CantGetStateSummary && vm.T != nil { - vm.T.Fatal(errGetStateSummary) + require := require.New(vm.T) + require.FailNow(errGetStateSummary.Error()) } return nil, errGetStateSummary } diff --git a/snow/engine/snowman/block/test_vm.go b/snow/engine/snowman/block/test_vm.go index b2cce5e2efc..b6ffe4fcaac 100644 --- a/snow/engine/snowman/block/test_vm.go +++ b/snow/engine/snowman/block/test_vm.go @@ -7,6 +7,8 @@ import ( "context" "errors" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" @@ -53,7 +55,8 @@ func (vm *TestVM) BuildBlock(ctx context.Context) (snowman.Block, error) { return vm.BuildBlockF(ctx) } if vm.CantBuildBlock && vm.T != nil { - vm.T.Fatal(errBuildBlock) + require := require.New(vm.T) + require.FailNow(errBuildBlock.Error()) } return nil, errBuildBlock } @@ -63,7 +66,8 @@ func (vm *TestVM) ParseBlock(ctx context.Context, b []byte) (snowman.Block, erro return vm.ParseBlockF(ctx, b) } if vm.CantParseBlock && vm.T != nil { - vm.T.Fatal(errParseBlock) + require := require.New(vm.T) + require.FailNow(errParseBlock.Error()) } return nil, errParseBlock } @@ -73,7 +77,8 @@ func (vm *TestVM) GetBlock(ctx context.Context, id ids.ID) (snowman.Block, error return vm.GetBlockF(ctx, id) } if vm.CantGetBlock && vm.T != nil { - vm.T.Fatal(errGetBlock) + require := require.New(vm.T) + require.FailNow(errGetBlock.Error()) } return nil, errGetBlock } @@ -83,7 +88,8 @@ func (vm *TestVM) SetPreference(ctx context.Context, id ids.ID) error { return vm.SetPreferenceF(ctx, id) } if vm.CantSetPreference && vm.T != nil { - vm.T.Fatalf("Unexpectedly called SetPreference") + require := require.New(vm.T) + require.FailNow("Unexpectedly called SetPreference") } return nil } @@ -93,7 +99,8 @@ func (vm *TestVM) LastAccepted(ctx context.Context) (ids.ID, error) { return vm.LastAcceptedF(ctx) } if vm.CantLastAccepted && vm.T != nil { - vm.T.Fatal(errLastAccepted) + require := require.New(vm.T) + require.FailNow(errLastAccepted.Error()) } return ids.ID{}, errLastAccepted } diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index d3f98fb55b1..281ae2623ac 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -35,6 +35,8 @@ import ( var errUnknownBlock = errors.New("unknown block") func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *block.TestVM) { + require := require.New(t) + ctx := snow.DefaultConsensusContextTest() peers := validators.NewSet() @@ -62,17 +64,13 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *block.Tes sender.CantSendGetAcceptedFrontier = false peer := ids.GenerateTestNodeID() - if err := peers.Add(peer, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(peers.Add(peer, nil, ids.Empty, 1)) peerTracker := tracker.NewPeers() startupTracker := tracker.NewStartup(peerTracker, peers.Weight()/2+1) peers.RegisterCallbackListener(startupTracker) - if err := startupTracker.Connected(context.Background(), peer, version.CurrentApp); err != nil { - t.Fatal(err) - } + require.NoError(startupTracker.Connected(context.Background(), peer, version.CurrentApp)) commonConfig := common.Config{ Ctx: ctx, @@ -89,9 +87,7 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *common.SenderTest, *block.Tes } snowGetHandler, err := getter.New(vm, commonConfig) - if err != nil { - t.Fatal(err) - } + require.NoError(err) blocker, _ := queue.NewWithMissing(memdb.New(), "", prometheus.NewRegistry()) return Config{ @@ -210,6 +206,8 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { // Single node in the accepted frontier; no need to fetch parent func TestBootstrapperSingleFrontier(t *testing.T) { + require := require.New(t) + config, _, _, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -241,7 +239,7 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } @@ -255,14 +253,10 @@ func TestBootstrapperSingleFrontier(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) acceptedIDs := []ids.ID{blkID1} @@ -273,8 +267,8 @@ func TestBootstrapperSingleFrontier(t *testing.T) { case blkID0: return blk0, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, nil } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -284,19 +278,13 @@ func TestBootstrapperSingleFrontier(t *testing.T) { case bytes.Equal(blkBytes, blkBytes0): return blk0, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } - err = bs.ForceAccepted(context.Background(), acceptedIDs) - switch { - case err != nil: // should finish - t.Fatal(err) - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk1.Status()) } // Requests the unknown block and gets back a Ancestors with unexpected request ID. @@ -304,6 +292,8 @@ func TestBootstrapperSingleFrontier(t *testing.T) { // Requests again and gets an unexpected block. // Requests again and gets the expected block. func TestBootstrapperUnknownByzantineResponse(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -347,7 +337,7 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } @@ -361,13 +351,9 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) acceptedIDs := []ids.ID{blkID2} @@ -384,8 +370,8 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { case blkID2: return blk2, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -399,64 +385,41 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { case bytes.Equal(blkBytes, blkBytes2): return blk2, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } requestID := new(uint32) sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } - switch { - case vtxID == blkID1: - default: - t.Fatalf("should have requested blk1") - } + require.Equal(peerID, vdr) + require.Equal(blkID1, vtxID) *requestID = reqID } vm.CantSetState = false - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request blk1 - t.Fatal(err) - } + require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request blk1 oldReqID := *requestID - if err := bs.Ancestors(context.Background(), peerID, *requestID+1, [][]byte{blkBytes1}); err != nil { // respond with wrong request ID - t.Fatal(err) - } else if oldReqID != *requestID { - t.Fatal("should not have sent new request") - } + require.NoError(bs.Ancestors(context.Background(), peerID, *requestID+1, [][]byte{blkBytes1})) // respond with wrong request ID + require.Equal(oldReqID, *requestID) - if err := bs.Ancestors(context.Background(), ids.NodeID{1, 2, 3}, *requestID, [][]byte{blkBytes1}); err != nil { // respond from wrong peer - t.Fatal(err) - } else if oldReqID != *requestID { - t.Fatal("should not have sent new request") - } + require.NoError(bs.Ancestors(context.Background(), ids.NodeID{1, 2, 3}, *requestID, [][]byte{blkBytes1})) // respond from wrong peer + require.Equal(oldReqID, *requestID) - if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes0}); err != nil { // respond with wrong block - t.Fatal(err) - } else if oldReqID == *requestID { - t.Fatal("should have sent new request") - } + require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes0})) // respond with wrong block + require.NotEqual(oldReqID, *requestID) - err = bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1}) - switch { - case err != nil: // respond with right block - t.Fatal(err) - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1})) + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) } // There are multiple needed blocks and Ancestors returns one at a time func TestBootstrapperPartialFetch(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -510,7 +473,7 @@ func TestBootstrapperPartialFetch(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } @@ -524,14 +487,10 @@ func TestBootstrapperPartialFetch(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) acceptedIDs := []ids.ID{blkID3} @@ -554,8 +513,8 @@ func TestBootstrapperPartialFetch(t *testing.T) { case blkID3: return blk3, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -573,56 +532,38 @@ func TestBootstrapperPartialFetch(t *testing.T) { case bytes.Equal(blkBytes, blkBytes3): return blk3, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } requestID := new(uint32) requested := ids.Empty sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } - switch vtxID { - case blkID1, blkID2: - default: - t.Fatalf("should have requested blk1 or blk2") - } + require.Equal(peerID, vdr) + require.Contains([]ids.ID{blkID1, blkID2}, vtxID) *requestID = reqID requested = vtxID } - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request blk2 - t.Fatal(err) - } + require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request blk2 - if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2}); err != nil { // respond with blk2 - t.Fatal(err) - } else if requested != blkID1 { - t.Fatal("should have requested blk1") - } + require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2})) // respond with blk2 + require.Equal(blkID1, requested) - if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1}); err != nil { // respond with blk1 - t.Fatal(err) - } else if requested != blkID1 { - t.Fatal("should not have requested another block") - } + require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes1})) // respond with blk1 + require.Equal(blkID1, requested) - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) } // There are multiple needed blocks and some validators do not have all the blocks // This test was modeled after TestBootstrapperPartialFetch. func TestBootstrapperEmptyResponse(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -676,7 +617,7 @@ func TestBootstrapperEmptyResponse(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } @@ -690,14 +631,10 @@ func TestBootstrapperEmptyResponse(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) acceptedIDs := []ids.ID{blkID3} @@ -720,8 +657,8 @@ func TestBootstrapperEmptyResponse(t *testing.T) { case blkID3: return blk3, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -739,7 +676,7 @@ func TestBootstrapperEmptyResponse(t *testing.T) { case bytes.Equal(blkBytes, blkBytes3): return blk3, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } @@ -753,15 +690,9 @@ func TestBootstrapperEmptyResponse(t *testing.T) { } // should request blk2 - err = bs.ForceAccepted(context.Background(), acceptedIDs) - switch { - case err != nil: - t.Fatal(err) - case requestedVdr != peerID: - t.Fatal("should have requested from peerID") - case requestedBlock != blkID2: - t.Fatal("should have requested blk2") - } + require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) + require.Equal(peerID, requestedVdr) + require.Equal(blkID2, requestedBlock) // add another two validators to the fetch set to test behavior on empty response newPeerID := ids.GenerateTestNodeID() @@ -770,46 +701,31 @@ func TestBootstrapperEmptyResponse(t *testing.T) { newPeerID = ids.GenerateTestNodeID() bs.(*bootstrapper).fetchFrom.Add(newPeerID) - if err := bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes2}); err != nil { // respond with blk2 - t.Fatal(err) - } else if requestedBlock != blkID1 { - t.Fatal("should have requested blk1") - } + require.NoError(bs.Ancestors(context.Background(), peerID, requestID, [][]byte{blkBytes2})) + require.Equal(blkID1, requestedBlock) peerToBlacklist := requestedVdr // respond with empty - err = bs.Ancestors(context.Background(), peerToBlacklist, requestID, nil) - switch { - case err != nil: - t.Fatal(err) - case requestedVdr == peerToBlacklist: - t.Fatal("shouldn't have requested from peerToBlacklist") - case requestedBlock != blkID1: - t.Fatal("should have requested blk1") - } + require.NoError(bs.Ancestors(context.Background(), peerToBlacklist, requestID, nil)) + require.NotEqual(peerToBlacklist, requestedVdr) + require.Equal(blkID1, requestedBlock) - if err := bs.Ancestors(context.Background(), requestedVdr, requestID, [][]byte{blkBytes1}); err != nil { // respond with blk1 - t.Fatal(err) - } + require.NoError(bs.Ancestors(context.Background(), requestedVdr, requestID, [][]byte{blkBytes1})) // respond with blk1 - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) // check peerToBlacklist was removed from the fetch set - require.False(t, bs.(*bootstrapper).fetchFrom.Contains(peerToBlacklist)) + require.NotContains(bs.(*bootstrapper).fetchFrom, peerToBlacklist) } // There are multiple needed blocks and Ancestors returns all at once func TestBootstrapperAncestors(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -864,7 +780,7 @@ func TestBootstrapperAncestors(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } @@ -878,13 +794,9 @@ func TestBootstrapperAncestors(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) acceptedIDs := []ids.ID{blkID3} @@ -907,8 +819,8 @@ func TestBootstrapperAncestors(t *testing.T) { case blkID3: return blk3, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -926,48 +838,32 @@ func TestBootstrapperAncestors(t *testing.T) { case bytes.Equal(blkBytes, blkBytes3): return blk3, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } requestID := new(uint32) requested := ids.Empty sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } - switch vtxID { - case blkID1, blkID2: - default: - t.Fatalf("should have requested blk1 or blk2") - } + require.Equal(peerID, vdr) + require.Contains([]ids.ID{blkID1, blkID2}, vtxID) *requestID = reqID requested = vtxID } - if err := bs.ForceAccepted(context.Background(), acceptedIDs); err != nil { // should request blk2 - t.Fatal(err) - } - - if err := bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2, blkBytes1}); err != nil { // respond with blk2 and blk1 - t.Fatal(err) - } else if requested != blkID2 { - t.Fatal("should not have requested another block") - } + require.NoError(bs.ForceAccepted(context.Background(), acceptedIDs)) // should request blk2 + require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{blkBytes2, blkBytes1})) // respond with blk2 and blk1 + require.Equal(blkID2, requested) - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) } func TestBootstrapperFinalized(t *testing.T) { + require := require.New(t) + config, peerID, sender, vm := newConfig(t) blkID0 := ids.Empty.Prefix(0) @@ -1010,7 +906,7 @@ func TestBootstrapperFinalized(t *testing.T) { return blk0.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk0.ID(), blkID) + require.Equal(blk0.ID(), blkID) return blk0, nil } bs, err := New( @@ -1023,14 +919,10 @@ func TestBootstrapperFinalized(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) parsedBlk1 := false parsedBlk2 := false @@ -1049,8 +941,8 @@ func TestBootstrapperFinalized(t *testing.T) { } return nil, database.ErrNotFound default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -1066,41 +958,27 @@ func TestBootstrapperFinalized(t *testing.T) { parsedBlk2 = true return blk2, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } requestIDs := map[ids.ID]uint32{} sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } + require.Equal(peerID, vdr) requestIDs[vtxID] = reqID } - if err := bs.ForceAccepted(context.Background(), []ids.ID{blkID1, blkID2}); err != nil { // should request blk2 and blk1 - t.Fatal(err) - } + require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{blkID1, blkID2})) // should request blk2 and blk1 reqIDBlk2, ok := requestIDs[blkID2] - if !ok { - t.Fatalf("should have requested blk2") - } + require.True(ok) - if err := bs.Ancestors(context.Background(), peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1}); err != nil { - t.Fatal(err) - } + require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1})) - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) } func TestRestartBootstrapping(t *testing.T) { @@ -1198,8 +1076,8 @@ func TestRestartBootstrapping(t *testing.T) { } return nil, database.ErrNotFound default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -1223,7 +1101,7 @@ func TestRestartBootstrapping(t *testing.T) { parsedBlk4 = true return blk4, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } @@ -1237,88 +1115,53 @@ func TestRestartBootstrapping(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) require.IsType(&bootstrapper{}, bsIntf) bs := bsIntf.(*bootstrapper) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) requestIDs := map[ids.ID]uint32{} sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } + require.Equal(peerID, vdr) requestIDs[vtxID] = reqID } // Force Accept blk3 - if err := bs.ForceAccepted(context.Background(), []ids.ID{blkID3}); err != nil { // should request blk3 - t.Fatal(err) - } + require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{blkID3})) // should request blk3 reqID, ok := requestIDs[blkID3] - if !ok { - t.Fatalf("should have requested blk3") - } + require.True(ok) - if err := bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blkBytes3, blkBytes2}); err != nil { - t.Fatal(err) - } + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blkBytes3, blkBytes2})) - if _, ok := requestIDs[blkID1]; !ok { - t.Fatal("should have requested blk1") - } + _, ok = requestIDs[blkID1] + require.True(ok) // Remove request, so we can restart bootstrapping via ForceAccepted - if removed := bs.OutstandingRequests.RemoveAny(blkID1); !removed { - t.Fatal("Expected to find an outstanding request for blk1") - } + require.True(bs.OutstandingRequests.RemoveAny(blkID1)) requestIDs = map[ids.ID]uint32{} - if err := bs.ForceAccepted(context.Background(), []ids.ID{blkID4}); err != nil { - t.Fatal(err) - } + require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{blkID4})) blk1RequestID, ok := requestIDs[blkID1] - if !ok { - t.Fatal("should have re-requested blk1 on restart") - } + require.True(ok) blk4RequestID, ok := requestIDs[blkID4] - if !ok { - t.Fatal("should have requested blk4 as new accepted frontier") - } + require.True(ok) - if err := bs.Ancestors(context.Background(), peerID, blk1RequestID, [][]byte{blkBytes1}); err != nil { - t.Fatal(err) - } + require.NoError(bs.Ancestors(context.Background(), peerID, blk1RequestID, [][]byte{blkBytes1})) - if config.Ctx.State.Get().State == snow.NormalOp { - t.Fatal("Bootstrapping should not have finished with outstanding request for blk4") - } + require.NotEqual(snow.NormalOp, config.Ctx.State.Get().State) - if err := bs.Ancestors(context.Background(), peerID, blk4RequestID, [][]byte{blkBytes4}); err != nil { - t.Fatal(err) - } + require.NoError(bs.Ancestors(context.Background(), peerID, blk4RequestID, [][]byte{blkBytes4})) - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk2.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk3.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - case blk4.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) + require.Equal(choices.Accepted, blk3.Status()) + require.Equal(choices.Accepted, blk4.Status()) } func TestBootstrapOldBlockAfterStateSync(t *testing.T) { @@ -1354,8 +1197,8 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { case blk1.ID(): return blk1, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { @@ -1365,7 +1208,7 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { case bytes.Equal(blkBytes, blk1.Bytes()): return blk1, nil } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } @@ -1379,47 +1222,29 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) require.IsType(&bootstrapper{}, bsIntf) bs := bsIntf.(*bootstrapper) vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) requestIDs := map[ids.ID]uint32{} sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, vtxID ids.ID) { - if vdr != peerID { - t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) - } + require.Equal(peerID, vdr) requestIDs[vtxID] = reqID } // Force Accept, the already transitively accepted, blk0 - if err := bs.ForceAccepted(context.Background(), []ids.ID{blk0.ID()}); err != nil { // should request blk0 - t.Fatal(err) - } + require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{blk0.ID()})) // should request blk0 reqID, ok := requestIDs[blk0.ID()] - if !ok { - t.Fatalf("should have requested blk0") - } - - if err := bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blk0.Bytes()}); err != nil { - t.Fatal(err) - } + require.True(ok) + require.NoError(bs.Ancestors(context.Background(), peerID, reqID, [][]byte{blk0.Bytes()})) - switch { - case config.Ctx.State.Get().State != snow.NormalOp: - t.Fatalf("Bootstrapping should have finished") - case blk0.Status() != choices.Processing: - t.Fatalf("Block should be processing") - case blk1.Status() != choices.Accepted: - t.Fatalf("Block should be accepted") - } + require.Equal(snow.NormalOp, config.Ctx.State.Get().State) + require.Equal(choices.Processing, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) } func TestBootstrapContinueAfterHalt(t *testing.T) { @@ -1468,9 +1293,7 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { return nil }, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) require.IsType(&bootstrapper{}, bsIntf) bs := bsIntf.(*bootstrapper) @@ -1484,23 +1307,17 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { case blk2.ID(): return blk2, nil default: - t.Fatal(database.ErrNotFound) - panic(database.ErrNotFound) + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound } } vm.CantSetState = false - if err := bs.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.Start(context.Background(), 0)) - if err := bs.ForceAccepted(context.Background(), []ids.ID{blk2.ID()}); err != nil { - t.Fatal(err) - } + require.NoError(bs.ForceAccepted(context.Background(), []ids.ID{blk2.ID()})) - if bs.Blocked.NumMissingIDs() != 1 { - t.Fatal("Should have left blk1 as missing") - } + require.Equal(1, bs.Blocked.NumMissingIDs()) } func TestBootstrapNoParseOnNew(t *testing.T) { diff --git a/snow/engine/snowman/getter/getter_test.go b/snow/engine/snowman/getter/getter_test.go index 3dfdb9ad560..7cf0044fd5f 100644 --- a/snow/engine/snowman/getter/getter_test.go +++ b/snow/engine/snowman/getter/getter_test.go @@ -34,6 +34,8 @@ func testSetup( t *testing.T, ctrl *gomock.Controller, ) (StateSyncEnabledMock, *common.SenderTest, common.Config) { + require := require.New(t) + ctx := snow.DefaultConsensusContextTest() peers := validators.NewSet() @@ -61,9 +63,7 @@ func testSetup( sender.CantSendGetAcceptedFrontier = false peer := ids.GenerateTestNodeID() - if err := peers.Add(peer, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(peers.Add(peer, nil, ids.Empty, 1)) commonConfig := common.Config{ Ctx: ctx, @@ -82,6 +82,8 @@ func testSetup( } func TestAcceptedFrontier(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -102,15 +104,13 @@ func TestAcceptedFrontier(t *testing.T) { return blkID, nil } vm.GetBlockF = func(_ context.Context, bID ids.ID) (snowman.Block, error) { - require.Equal(t, blkID, bID) + require.Equal(blkID, bID) return dummyBlk, nil } bsIntf, err := New(vm, config) - if err != nil { - t.Fatal(err) - } - require.IsType(t, &getter{}, bsIntf) + require.NoError(err) + require.IsType(&getter{}, bsIntf) bs := bsIntf.(*getter) var accepted []ids.ID @@ -118,19 +118,15 @@ func TestAcceptedFrontier(t *testing.T) { accepted = frontier } - if err := bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0); err != nil { - t.Fatal(err) - } + require.NoError(bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0)) - if len(accepted) != 1 { - t.Fatalf("Only one block should be accepted") - } - if accepted[0] != blkID { - t.Fatalf("Blk should be accepted") - } + require.Len(accepted, 1) + require.Equal(blkID, accepted[0]) } func TestFilterAccepted(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -154,15 +150,13 @@ func TestFilterAccepted(t *testing.T) { return blk1.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - require.Equal(t, blk1.ID(), blkID) + require.Equal(blk1.ID(), blkID) return blk1, nil } bsIntf, err := New(vm, config) - if err != nil { - t.Fatal(err) - } - require.IsType(t, &getter{}, bsIntf) + require.NoError(err) + require.IsType(&getter{}, bsIntf) bs := bsIntf.(*getter) blkIDs := []ids.ID{blkID0, blkID1, blkID2} @@ -175,7 +169,7 @@ func TestFilterAccepted(t *testing.T) { case blkID2: return nil, errUnknownBlock } - t.Fatal(errUnknownBlock) + require.FailNow(errUnknownBlock.Error()) return nil, errUnknownBlock } @@ -184,23 +178,13 @@ func TestFilterAccepted(t *testing.T) { accepted = frontier } - if err := bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, blkIDs); err != nil { - t.Fatal(err) - } + require.NoError(bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, blkIDs)) acceptedSet := set.Set[ids.ID]{} acceptedSet.Add(accepted...) - if acceptedSet.Len() != 2 { - t.Fatalf("Two blocks should be accepted") - } - if !acceptedSet.Contains(blkID0) { - t.Fatalf("Blk should be accepted") - } - if !acceptedSet.Contains(blkID1) { - t.Fatalf("Blk should be accepted") - } - if acceptedSet.Contains(blkID2) { - t.Fatalf("Blk shouldn't be accepted") - } + require.Len(acceptedSet, 2) + require.Contains(acceptedSet, blkID0) + require.Contains(acceptedSet, blkID1) + require.NotContains(acceptedSet, blkID2) } diff --git a/snow/engine/snowman/syncer/state_syncer_test.go b/snow/engine/snowman/syncer/state_syncer_test.go index 18db546b6de..c93d9116007 100644 --- a/snow/engine/snowman/syncer/state_syncer_test.go +++ b/snow/engine/snowman/syncer/state_syncer_test.go @@ -181,7 +181,7 @@ func TestStateSyncLocalSummaryIsIncludedAmongFrontiersIfAvailable(t *testing.T) require.Equal(localSummary, syncer.locallyAvailableSummary) ws, ok := syncer.weightedSummaries[summaryID] require.True(ok) - require.True(bytes.Equal(ws.summary.Bytes(), summaryBytes)) + require.Equal(summaryBytes, ws.summary.Bytes()) require.Zero(ws.weight) } @@ -254,7 +254,7 @@ func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { require.Len(contactedFrontiersProviders, safeMath.Min(vdrs.Len(), common.MaxOutstandingBroadcastRequests)) for beaconID := range contactedFrontiersProviders { // check that beacon is duly marked as reached out - require.True(syncer.pendingSeeders.Contains(beaconID)) + require.Contains(syncer.pendingSeeders, beaconID) } // check that, obviously, no summary is yet registered @@ -295,8 +295,8 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) - require.True(initiallyReachedOutBeaconsSize > 0) - require.True(initiallyReachedOutBeaconsSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyReachedOutBeaconsSize) + require.LessOrEqual(initiallyReachedOutBeaconsSize, common.MaxOutstandingBroadcastRequests) // mock VM to simulate a valid summary is returned fullVM.CantParseStateSummary = true @@ -388,8 +388,8 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) - require.True(initiallyReachedOutBeaconsSize > 0) - require.True(initiallyReachedOutBeaconsSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyReachedOutBeaconsSize) + require.LessOrEqual(initiallyReachedOutBeaconsSize, common.MaxOutstandingBroadcastRequests) // mock VM to simulate an invalid summary is returned summary := []byte{'s', 'u', 'm', 'm', 'a', 'r', 'y'} @@ -460,8 +460,8 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) - require.True(initiallyReachedOutBeaconsSize > 0) - require.True(initiallyReachedOutBeaconsSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyReachedOutBeaconsSize) + require.LessOrEqual(initiallyReachedOutBeaconsSize, common.MaxOutstandingBroadcastRequests) // pick one of the vdrs that have been reached out unresponsiveBeaconID := pickRandomFrom(contactedFrontiersProviders) @@ -571,7 +571,7 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { for _, vdr := range vdrs.List() { require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let just one node respond and all others timeout maxResponses := 1 @@ -637,7 +637,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { // mock VM to simulate a valid summary is returned fullVM.CantParseStateSummary = true fullVM.ParseStateSummaryF = func(_ context.Context, b []byte) (block.StateSummary, error) { - require.True(bytes.Equal(b, summaryBytes)) + require.Equal(summaryBytes, b) return &block.TestStateSummary{ HeightV: key, IDV: summaryID, @@ -657,7 +657,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { for _, vdr := range vdrs.List() { require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond for syncer.pendingSeeders.Len() != 0 { @@ -672,12 +672,12 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { summaryBytes, )) } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) // check that vote requests are issued initiallyContactedVotersSize := len(contactedVoters) - require.True(initiallyContactedVotersSize > 0) - require.True(initiallyContactedVotersSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyContactedVotersSize) + require.LessOrEqual(initiallyContactedVotersSize, common.MaxOutstandingBroadcastRequests) } func TestUnRequestedVotesAreDropped(t *testing.T) { @@ -730,7 +730,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { for _, vdr := range vdrs.List() { require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond for syncer.pendingSeeders.Len() != 0 { @@ -745,12 +745,12 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { summaryBytes, )) } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) // check that vote requests are issued initiallyContactedVotersSize := len(contactedVoters) - require.True(initiallyContactedVotersSize > 0) - require.True(initiallyContactedVotersSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyContactedVotersSize) + require.LessOrEqual(initiallyContactedVotersSize, common.MaxOutstandingBroadcastRequests) _, found := syncer.weightedSummaries[summaryID] require.True(found) @@ -850,7 +850,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { for _, vdr := range vdrs.List() { require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond for syncer.pendingSeeders.Len() != 0 { @@ -865,12 +865,12 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { summaryBytes, )) } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) // check that vote requests are issued initiallyContactedVotersSize := len(contactedVoters) - require.True(initiallyContactedVotersSize > 0) - require.True(initiallyContactedVotersSize <= common.MaxOutstandingBroadcastRequests) + require.Positive(initiallyContactedVotersSize) + require.LessOrEqual(initiallyContactedVotersSize, common.MaxOutstandingBroadcastRequests) _, found := syncer.weightedSummaries[summaryID] require.True(found) @@ -972,7 +972,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { for _, vdr := range vdrs.List() { require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond with majority or minority summaries for { @@ -1000,7 +1000,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { )) } } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) majoritySummaryCalled := false minoritySummaryCalled := false @@ -1107,7 +1107,7 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { for _, vdr := range vdrs.List() { require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond for syncer.pendingSeeders.Len() != 0 { @@ -1122,7 +1122,7 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { summaryBytes, )) } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) minoritySummaryCalled := false minoritySummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { @@ -1159,8 +1159,8 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { require.False(minoritySummaryCalled) // instead the whole process is restared - require.False(syncer.pendingVoters.Len() != 0) // no voters reached - require.True(syncer.pendingSeeders.Len() != 0) // frontiers providers reached again + require.Empty(syncer.pendingVoters) // no voters reached + require.NotEmpty(syncer.pendingSeeders) // frontiers providers reached again } func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing.T) { @@ -1229,7 +1229,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. for _, vdr := range vdrs.List() { require.NoError(syncer.Connected(context.Background(), vdr.NodeID, version.CurrentApp)) } - require.True(syncer.pendingSeeders.Len() != 0) + require.NotEmpty(syncer.pendingSeeders) // let all contacted vdrs respond with majority or minority summaries for { @@ -1257,7 +1257,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. )) } } - require.False(syncer.pendingSeeders.Len() != 0) + require.Empty(syncer.pendingSeeders) majoritySummaryCalled := false minoritySummaryCalled := false diff --git a/snow/engine/snowman/syncer/utils_test.go b/snow/engine/snowman/syncer/utils_test.go index a8d24297927..08f62cb8877 100644 --- a/snow/engine/snowman/syncer/utils_test.go +++ b/snow/engine/snowman/syncer/utils_test.go @@ -55,12 +55,14 @@ type fullVM struct { } func buildTestPeers(t *testing.T) validators.Set { + require := require.New(t) + // we consider more than common.MaxOutstandingBroadcastRequests peers // so to test the effect of cap on number of requests sent out vdrs := validators.NewSet() for idx := 0; idx < 2*common.MaxOutstandingBroadcastRequests; idx++ { beaconID := ids.GenerateTestNodeID() - require.NoError(t, vdrs.Add(beaconID, nil, ids.Empty, 1)) + require.NoError(vdrs.Add(beaconID, nil, ids.Empty, 1)) } return vdrs } @@ -70,6 +72,8 @@ func buildTestsObjects(t *testing.T, commonCfg *common.Config) ( *fullVM, *common.SenderTest, ) { + require := require.New(t) + sender := &common.SenderTest{T: t} commonCfg.Sender = sender @@ -82,16 +86,16 @@ func buildTestsObjects(t *testing.T, commonCfg *common.Config) ( }, } dummyGetter, err := getter.New(fullVM, *commonCfg) - require.NoError(t, err) + require.NoError(err) cfg, err := NewConfig(*commonCfg, nil, dummyGetter, fullVM) - require.NoError(t, err) + require.NoError(err) commonSyncer := New(cfg, func(context.Context, uint32) error { return nil }) - require.IsType(t, &stateSyncer{}, commonSyncer) + require.IsType(&stateSyncer{}, commonSyncer) syncer := commonSyncer.(*stateSyncer) - require.NotNil(t, syncer.stateSyncVM) + require.NotNil(syncer.stateSyncVM) fullVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { return nil, database.ErrNotFound diff --git a/snow/engine/snowman/test_engine.go b/snow/engine/snowman/test_engine.go index c5c897b4a4e..ca6b9da462a 100644 --- a/snow/engine/snowman/test_engine.go +++ b/snow/engine/snowman/test_engine.go @@ -7,6 +7,8 @@ import ( "context" "errors" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" @@ -36,7 +38,8 @@ func (e *EngineTest) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, return e.GetBlockF(ctx, blkID) } if e.CantGetBlock && e.T != nil { - e.T.Fatalf("Unexpectedly called GetBlock") + require := require.New(e.T) + require.FailNow(errGetBlock.Error()) } return nil, errGetBlock } diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 52f345e5e39..3c9d8d59447 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -22,7 +22,6 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" ) var ( @@ -35,13 +34,13 @@ var ( ) func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, validators.Set, *common.SenderTest, *block.TestVM, *Transitive, snowman.Block) { + require := require.New(t) + vals := validators.NewSet() engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vals.Add(vdr, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -53,9 +52,7 @@ func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, va engCfg.VM = vm snowGetHandler, err := getter.New(vm, commonCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) engCfg.AllGetsServer = snowGetHandler vm.Default(true) @@ -81,13 +78,9 @@ func setup(t *testing.T, commonCfg common.Config, engCfg Config) (ids.NodeID, va } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil @@ -101,6 +94,8 @@ func setupDefaultConfig(t *testing.T) (ids.NodeID, validators.Set, *common.Sende } func TestEngineShutdown(t *testing.T) { + require := require.New(t) + _, _, _, vm, transitive, _ := setupDefaultConfig(t) vmShutdownCalled := false vm.ShutdownF = func(context.Context) error { @@ -108,20 +103,16 @@ func TestEngineShutdown(t *testing.T) { return nil } vm.CantShutdown = false - if err := transitive.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } - if !vmShutdownCalled { - t.Fatal("Shutting down the Transitive did not shutdown the VM") - } + require.NoError(transitive.Shutdown(context.Background())) + require.True(vmShutdownCalled) } func TestEngineAdd(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) - if te.Ctx.ChainID != ids.Empty { - t.Fatalf("Wrong chain ID") - } + require.Equal(ids.Empty, te.Ctx.ChainID) parent := &snowman.TestBlock{TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -141,22 +132,14 @@ func TestEngineAdd(t *testing.T) { reqID := new(uint32) sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID - if *asked { - t.Fatalf("Asked multiple times") - } + require.False(*asked) *asked = true - if vdr != inVdr { - t.Fatalf("Asking wrong validator for block") - } - if blkID != blk.Parent() { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdr, inVdr) + require.Equal(blk.Parent(), blkID) } vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - if !bytes.Equal(b, blk.Bytes()) { - t.Fatalf("Wrong bytes") - } + require.Equal(blk.Bytes(), b) return blk, nil } @@ -171,36 +154,27 @@ func TestEngineAdd(t *testing.T) { } } - if err := te.Put(context.Background(), vdr, 0, blk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, blk.Bytes())) vm.ParseBlockF = nil - if !*asked { - t.Fatalf("Didn't ask for a missing block") - } - - if len(te.blocked) != 1 { - t.Fatalf("Should have been blocking on request") - } + require.True(*asked) + require.Len(te.blocked, 1) vm.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { return nil, errUnknownBytes } - if err := te.Put(context.Background(), vdr, *reqID, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *reqID, nil)) vm.ParseBlockF = nil - if len(te.blocked) != 0 { - t.Fatalf("Should have finished blocking issue") - } + require.Empty(te.blocked) } func TestEngineQuery(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) blk := &snowman.TestBlock{ @@ -215,25 +189,13 @@ func TestEngineQuery(t *testing.T) { chitted := new(bool) sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, prefSet []ids.ID, accepted []ids.ID) { - if *chitted { - t.Fatalf("Sent multiple chits") - } + require.False(*chitted) *chitted = true - if requestID != 15 { - t.Fatalf("Wrong request ID") - } - if len(prefSet) != 1 { - t.Fatal("Should only be one vote") - } - if gBlk.ID() != prefSet[0] { - t.Fatalf("Wrong chits block") - } - if len(accepted) != 1 { - t.Fatal("accepted should only have one element") - } - if gBlk.ID() != accepted[0] { - t.Fatalf("Wrong accepted frontier") - } + require.Equal(uint32(15), requestID) + require.Len(prefSet, 1) + require.Equal(gBlk.ID(), prefSet[0]) + require.Len(accepted, 1) + require.Equal(gBlk.ID(), accepted[0]) } blocked := new(bool) @@ -250,64 +212,41 @@ func TestEngineQuery(t *testing.T) { asked := new(bool) getRequestID := new(uint32) sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { - if *asked { - t.Fatalf("Asked multiple times") - } + require.False(*asked) *asked = true *getRequestID = requestID - if vdr != inVdr { - t.Fatalf("Asking wrong validator for block") - } - if blk.ID() != blkID && gBlk.ID() != blkID { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdr, inVdr) + require.Contains([]ids.ID{ + blk.ID(), + gBlk.ID(), + }, blkID) } - if err := te.PullQuery(context.Background(), vdr, 15, blk.ID()); err != nil { - t.Fatal(err) - } - if !*chitted { - t.Fatalf("Didn't respond with chits") - } - if !*blocked { - t.Fatalf("Didn't request block") - } - if !*asked { - t.Fatalf("Didn't request block from validator") - } + require.NoError(te.PullQuery(context.Background(), vdr, 15, blk.ID())) + require.True(*chitted) + require.True(*blocked) + require.True(*asked) queried := new(bool) queryRequestID := new(uint32) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true *queryRequestID = requestID vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdrSet, inVdrs) + require.Equal(blk.Bytes(), blkBytes) } vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - if !bytes.Equal(b, blk.Bytes()) { - t.Fatalf("Wrong bytes") - } + require.Equal(blk.Bytes(), b) return blk, nil } - if err := te.Put(context.Background(), vdr, *getRequestID, blk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *getRequestID, blk.Bytes())) vm.ParseBlockF = nil - if !*queried { - t.Fatalf("Didn't ask for preferences") - } + require.True(*queried) blk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -326,49 +265,33 @@ func TestEngineQuery(t *testing.T) { case blk1.ID(): return nil, errUnknownBlock } - t.Fatalf("Wrong block requested") - panic("Should have failed") + require.FailNow("Wrong block requested") + return nil, errUnknownBlock } *asked = false sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { - if *asked { - t.Fatalf("Asked multiple times") - } + require.False(*asked) *asked = true *getRequestID = requestID - if vdr != inVdr { - t.Fatalf("Asking wrong validator for block") - } - if blk1.ID() != blkID { - t.Fatalf("Asking for wrong block") - } - } - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk1.ID()}, nil); err != nil { - t.Fatal(err) + require.Equal(vdr, inVdr) + require.Equal(blk1.ID(), blkID) } + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk1.ID()}, nil)) *queried = false sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true *queryRequestID = requestID vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk1.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdrSet, inVdrs) + require.Equal(blk1.Bytes(), blkBytes) } vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - if !bytes.Equal(b, blk1.Bytes()) { - t.Fatalf("Wrong bytes") - } + require.Equal(blk1.Bytes(), b) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -377,35 +300,27 @@ func TestEngineQuery(t *testing.T) { case blk1.ID(): return blk1, nil } - t.Fatalf("Wrong block requested") - panic("Should have failed") + require.FailNow("Wrong block requested") + return nil, nil } return blk1, nil } - if err := te.Put(context.Background(), vdr, *getRequestID, blk1.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *getRequestID, blk1.Bytes())) vm.ParseBlockF = nil - if blk1.Status() != choices.Accepted { - t.Fatalf("Should have executed block") - } - if len(te.blocked) != 0 { - t.Fatalf("Should have finished blocking") - } + require.Equal(choices.Accepted, blk1.Status()) + require.Empty(te.blocked) _ = te.polls.String() // Shouldn't panic - if err := te.QueryFailed(context.Background(), vdr, *queryRequestID); err != nil { - t.Fatal(err) - } - if len(te.blocked) != 0 { - t.Fatalf("Should have finished blocking") - } + require.NoError(te.QueryFailed(context.Background(), vdr, *queryRequestID)) + require.Empty(te.blocked) } func TestEngineMultipleQuery(t *testing.T) { + require := require.New(t) + engCfg := DefaultConfigs() engCfg.Params = snowball.Parameters{ K: 3, @@ -426,15 +341,9 @@ func TestEngineMultipleQuery(t *testing.T) { vdr1 := ids.GenerateTestNodeID() vdr2 := ids.GenerateTestNodeID() - errs := wrappers.Errs{} - errs.Add( - vals.Add(vdr0, nil, ids.Empty, 1), - vals.Add(vdr1, nil, ids.Empty, 1), - vals.Add(vdr2, nil, ids.Empty, 1), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(vals.Add(vdr0, nil, ids.Empty, 1)) + require.NoError(vals.Add(vdr1, nil, ids.Empty, 1)) + require.NoError(vals.Add(vdr2, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -457,20 +366,14 @@ func TestEngineMultipleQuery(t *testing.T) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - if blkID != gBlk.ID() { - t.Fatalf("Wrong block requested") - } + require.Equal(gBlk.ID(), blkID) return gBlk, nil } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil @@ -488,19 +391,13 @@ func TestEngineMultipleQuery(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true *queryRequestID = requestID vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr0, vdr1, vdr2) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk0.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdrSet, inVdrs) + require.Equal(blk0.Bytes(), blkBytes) } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -512,9 +409,7 @@ func TestEngineMultipleQuery(t *testing.T) { } } - if err := te.issue(context.Background(), blk0); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), blk0)) blk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -535,32 +430,22 @@ func TestEngineMultipleQuery(t *testing.T) { case blk1.ID(): return nil, errUnknownBlock } - t.Fatalf("Unknown block") - panic("Should have errored") + require.FailNow("Unknown block") + return nil, nil } asked := new(bool) getRequestID := new(uint32) sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { - if *asked { - t.Fatalf("Asked multiple times") - } + require.False(*asked) *asked = true *getRequestID = requestID - if vdr0 != inVdr { - t.Fatalf("Asking wrong validator for block") - } - if blk1.ID() != blkID { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdr0, inVdr) + require.Equal(blk1.ID(), blkID) } blkSet := []ids.ID{blk1.ID()} - if err := te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) - } - if err := te.Chits(context.Background(), vdr1, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil)) + require.NoError(te.Chits(context.Background(), vdr1, *queryRequestID, blkSet, nil)) vm.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -570,8 +455,8 @@ func TestEngineMultipleQuery(t *testing.T) { case blkID == blk1.ID(): return blk1, nil } - t.Fatalf("Wrong block requested") - panic("Should have failed") + require.FailNow("Wrong block requested") + return nil, nil } return blk1, nil @@ -580,39 +465,27 @@ func TestEngineMultipleQuery(t *testing.T) { *queried = false secondQueryRequestID := new(uint32) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true *secondQueryRequestID = requestID vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr0, vdr1, vdr2) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk1.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } - } - if err := te.Put(context.Background(), vdr0, *getRequestID, blk1.Bytes()); err != nil { - t.Fatal(err) + require.Equal(vdrSet, inVdrs) + require.Equal(blk1.Bytes(), blkBytes) } + require.NoError(te.Put(context.Background(), vdr0, *getRequestID, blk1.Bytes())) // Should be dropped because the query was already filled blkSet = []ids.ID{blk0.ID()} - if err := te.Chits(context.Background(), vdr2, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr2, *queryRequestID, blkSet, nil)) - if blk1.Status() != choices.Accepted { - t.Fatalf("Should have executed block") - } - if len(te.blocked) != 0 { - t.Fatalf("Should have finished blocking") - } + require.Equal(choices.Accepted, blk1.Status()) + require.Empty(te.blocked) } func TestEngineBlockedIssue(t *testing.T) { + require := require.New(t) + _, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(false) @@ -648,21 +521,16 @@ func TestEngineBlockedIssue(t *testing.T) { } } - if err := te.issue(context.Background(), blk1); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), blk1)) blk0.StatusV = choices.Processing - if err := te.issue(context.Background(), blk0); err != nil { - t.Fatal(err) - } - - if blk1.ID() != te.Consensus.Preference() { - t.Fatalf("Should have issued blk1") - } + require.NoError(te.issue(context.Background(), blk0)) + require.Equal(blk1.ID(), te.Consensus.Preference()) } func TestEngineAbandonResponse(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(false) @@ -684,59 +552,44 @@ func TestEngineAbandonResponse(t *testing.T) { case blkID == blk.ID(): return nil, errUnknownBlock } - t.Fatalf("Wrong block requested") + require.FailNow("Wrong block requested") return nil, errUnknownBlock } - if err := te.issue(context.Background(), blk); err != nil { - t.Fatal(err) - } - if err := te.QueryFailed(context.Background(), vdr, 1); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), blk)) + require.NoError(te.QueryFailed(context.Background(), vdr, 1)) - if len(te.blocked) != 0 { - t.Fatalf("Should have removed blocking event") - } + require.Empty(te.blocked) } func TestEngineFetchBlock(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(false) vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - if id == gBlk.ID() { - return gBlk, nil - } - t.Fatalf("Unknown block") - panic("Should have failed") + require.Equal(gBlk.ID(), id) + return gBlk, nil } added := new(bool) sender.SendPutF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blk []byte) { - if vdr != inVdr { - t.Fatalf("Wrong validator") - } - if requestID != 123 { - t.Fatalf("Wrong request id") - } - if !bytes.Equal(gBlk.Bytes(), blk) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdr, inVdr) + require.Equal(uint32(123), requestID) + require.Equal(gBlk.Bytes(), blk) *added = true } - if err := te.Get(context.Background(), vdr, 123, gBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(te.Get(context.Background(), vdr, 123, gBlk.ID())) - if !*added { - t.Fatalf("Should have sent block to peer") - } + require.True(*added) } func TestEnginePushQuery(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -771,59 +624,35 @@ func TestEnginePushQuery(t *testing.T) { chitted := new(bool) sender.SendChitsF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, votes []ids.ID, accepted []ids.ID) { - if *chitted { - t.Fatalf("Sent chit multiple times") - } + require.False(*chitted) *chitted = true - if inVdr != vdr { - t.Fatalf("Asking wrong validator for preference") - } - if requestID != 20 { - t.Fatalf("Wrong request id") - } - if len(votes) != 1 { - t.Fatal("votes should only have one element") - } - if gBlk.ID() != votes[0] { - t.Fatalf("Asking for wrong block") - } - if len(accepted) != 1 { - t.Fatal("accepted should only have one element") - } - if gBlk.ID() != accepted[0] { - t.Fatalf("Wrong accepted frontier") - } + require.Equal(vdr, inVdr) + require.Equal(uint32(20), requestID) + require.Len(votes, 1) + require.Equal(gBlk.ID(), votes[0]) + require.Len(accepted, 1) + require.Equal(gBlk.ID(), accepted[0]) } queried := new(bool) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdrSet, inVdrs) + require.Equal(blk.Bytes(), blkBytes) } - if err := te.PushQuery(context.Background(), vdr, 20, blk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.PushQuery(context.Background(), vdr, 20, blk.Bytes())) - if !*chitted { - t.Fatalf("Should have sent a chit to the peer") - } - if !*queried { - t.Fatalf("Should have sent a query to the peer") - } + require.True(*chitted) + require.True(*queried) } func TestEngineBuildBlock(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -849,55 +678,44 @@ func TestEngineBuildBlock(t *testing.T) { queried := new(bool) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } + require.Equal(vdrSet, inVdrs) } vm.BuildBlockF = func(context.Context) (snowman.Block, error) { return blk, nil } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } + require.NoError(te.Notify(context.Background(), common.PendingTxs)) - if !*queried { - t.Fatalf("Should have sent a query to the peer") - } + require.True(*queried) } func TestEngineRepoll(t *testing.T) { + require := require.New(t) vdr, _, sender, _, te, _ := setupDefaultConfig(t) sender.Default(true) queried := new(bool) sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], _ uint32, blkID ids.ID) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } + require.Equal(vdrSet, inVdrs) } te.repoll(context.Background()) - if !*queried { - t.Fatalf("Should have sent a query to the peer") - } + require.True(*queried) } func TestVoteCanceling(t *testing.T) { + require := require.New(t) + engCfg := DefaultConfigs() engCfg.Params = snowball.Parameters{ K: 3, @@ -918,15 +736,9 @@ func TestVoteCanceling(t *testing.T) { vdr1 := ids.GenerateTestNodeID() vdr2 := ids.GenerateTestNodeID() - errs := wrappers.Errs{} - errs.Add( - vals.Add(vdr0, nil, ids.Empty, 1), - vals.Add(vdr1, nil, ids.Empty, 1), - vals.Add(vdr2, nil, ids.Empty, 1), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(vals.Add(vdr0, nil, ids.Empty, 1)) + require.NoError(vals.Add(vdr1, nil, ids.Empty, 1)) + require.NoError(vals.Add(vdr2, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -949,23 +761,14 @@ func TestVoteCanceling(t *testing.T) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - switch id { - case gBlk.ID(): - return gBlk, nil - default: - t.Fatalf("Loaded unknown block") - panic("Should have failed") - } + require.Equal(gBlk.ID(), id) + return gBlk, nil } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.LastAcceptedF = nil @@ -982,51 +785,35 @@ func TestVoteCanceling(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true *queryRequestID = requestID vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr0, vdr1, vdr2) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdrSet, inVdrs) + require.Equal(blk.Bytes(), blkBytes) } - if err := te.issue(context.Background(), blk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), blk)) - if te.polls.Len() != 1 { - t.Fatalf("Shouldn't have finished blocking issue") - } + require.Equal(1, te.polls.Len()) - if err := te.QueryFailed(context.Background(), vdr0, *queryRequestID); err != nil { - t.Fatal(err) - } + require.NoError(te.QueryFailed(context.Background(), vdr0, *queryRequestID)) - if te.polls.Len() != 1 { - t.Fatalf("Shouldn't have finished blocking issue") - } + require.Equal(1, te.polls.Len()) repolled := new(bool) sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID) { *repolled = true } - if err := te.QueryFailed(context.Background(), vdr1, *queryRequestID); err != nil { - t.Fatal(err) - } + require.NoError(te.QueryFailed(context.Background(), vdr1, *queryRequestID)) - if !*repolled { - t.Fatalf("Should have finished blocking issue and repolled the network") - } + require.True(*repolled) } func TestEngineNoQuery(t *testing.T) { + require := require.New(t) + engCfg := DefaultConfigs() sender := &common.SenderTest{T: t} @@ -1054,13 +841,9 @@ func TestEngineNoQuery(t *testing.T) { engCfg.VM = vm te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) blk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1072,12 +855,12 @@ func TestEngineNoQuery(t *testing.T) { BytesV: []byte{1}, } - if err := te.issue(context.Background(), blk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), blk)) } func TestEngineNoRepollQuery(t *testing.T) { + require := require.New(t) + engCfg := DefaultConfigs() sender := &common.SenderTest{T: t} @@ -1105,18 +888,16 @@ func TestEngineNoRepollQuery(t *testing.T) { engCfg.VM = vm te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) te.repoll(context.Background()) } func TestEngineAbandonQuery(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, _ := setupDefaultConfig(t) sender.Default(true) @@ -1124,13 +905,8 @@ func TestEngineAbandonQuery(t *testing.T) { blkID := ids.GenerateTestID() vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - switch id { - case blkID: - return nil, errUnknownBlock - default: - t.Fatalf("Loaded unknown block") - panic("Should have failed") - } + require.Equal(blkID, id) + return nil, errUnknownBlock } reqID := new(uint32) @@ -1140,21 +916,13 @@ func TestEngineAbandonQuery(t *testing.T) { sender.CantSendChits = false - if err := te.PullQuery(context.Background(), vdr, 0, blkID); err != nil { - t.Fatal(err) - } + require.NoError(te.PullQuery(context.Background(), vdr, 0, blkID)) - if te.blkReqs.Len() != 1 { - t.Fatalf("Should have issued request") - } + require.Equal(1, te.blkReqs.Len()) - if err := te.GetFailed(context.Background(), vdr, *reqID); err != nil { - t.Fatal(err) - } + require.NoError(te.GetFailed(context.Background(), vdr, *reqID)) - if te.blkReqs.Len() != 0 { - t.Fatalf("Should have removed request") - } + require.Zero(te.blkReqs.Len()) } func TestEngineAbandonChit(t *testing.T) { @@ -1181,7 +949,7 @@ func TestEngineAbandonChit(t *testing.T) { case blk.ID(): return nil, errUnknownBlock } - t.Fatalf("Wrong block requested") + require.FailNow("Wrong block requested") return nil, errUnknownBlock } @@ -1236,7 +1004,7 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { case blk.ID(): return nil, errUnknownBlock } - t.Fatalf("Wrong block requested") + require.FailNow("Wrong block requested") return nil, errUnknownBlock } @@ -1276,6 +1044,8 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { } func TestEngineBlockingChitRequest(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -1322,42 +1092,29 @@ func TestEngineBlockingChitRequest(t *testing.T) { sender.SendGetF = func(context.Context, ids.NodeID, uint32, ids.ID) {} vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - switch { - case bytes.Equal(b, blockingBlk.Bytes()): - return blockingBlk, nil - default: - t.Fatalf("Loaded unknown block") - panic("Should have failed") - } + require.Equal(blockingBlk.Bytes(), b) + return blockingBlk, nil } - if err := te.issue(context.Background(), parentBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), parentBlk)) sender.CantSendChits = false - if err := te.PushQuery(context.Background(), vdr, 0, blockingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.PushQuery(context.Background(), vdr, 0, blockingBlk.Bytes())) - if len(te.blocked) != 2 { - t.Fatalf("Both inserts should be blocking") - } + require.Len(te.blocked, 2) sender.CantSendPushQuery = false missingBlk.StatusV = choices.Processing - if err := te.issue(context.Background(), missingBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), missingBlk)) - if len(te.blocked) != 0 { - t.Fatalf("Both inserts should not longer be blocking") - } + require.Empty(te.blocked) } func TestEngineBlockingChitResponse(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -1401,44 +1158,34 @@ func TestEngineBlockingChitResponse(t *testing.T) { } } - if err := te.issue(context.Background(), blockingBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), blockingBlk)) queryRequestID := new(uint32) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { *queryRequestID = requestID vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(issuedBlk.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdrSet, inVdrs) + require.Equal(issuedBlk.Bytes(), blkBytes) } - if err := te.issue(context.Background(), issuedBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), issuedBlk)) sender.SendPushQueryF = nil sender.CantSendPushQuery = false - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blockingBlk.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blockingBlk.ID()}, nil)) - require.Len(t, te.blocked, 2) + require.Len(te.blocked, 2) sender.CantSendPullQuery = false missingBlk.StatusV = choices.Processing - if err := te.issue(context.Background(), missingBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), missingBlk)) } func TestEngineRetryFetch(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -1461,16 +1208,12 @@ func TestEngineRetryFetch(t *testing.T) { } sender.CantSendChits = false - if err := te.PullQuery(context.Background(), vdr, 0, missingBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(te.PullQuery(context.Background(), vdr, 0, missingBlk.ID())) vm.CantGetBlock = true sender.SendGetF = nil - if err := te.GetFailed(context.Background(), vdr, *reqID); err != nil { - t.Fatal(err) - } + require.NoError(te.GetFailed(context.Background(), vdr, *reqID)) vm.CantGetBlock = false @@ -1479,19 +1222,17 @@ func TestEngineRetryFetch(t *testing.T) { *called = true } - if err := te.PullQuery(context.Background(), vdr, 0, missingBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(te.PullQuery(context.Background(), vdr, 0, missingBlk.ID())) vm.CantGetBlock = true sender.SendGetF = nil - if !*called { - t.Fatalf("Should have requested the block again") - } + require.True(*called) } func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -1536,62 +1277,46 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { return nil, errUnknownBlock } } - if err := te.issue(context.Background(), validBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), validBlk)) sender.SendPushQueryF = nil - if err := te.issue(context.Background(), invalidBlk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), invalidBlk)) - if err := te.Chits(context.Background(), vdr, *reqID, []ids.ID{invalidBlkID}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, *reqID, []ids.ID{invalidBlkID}, nil)) - if status := validBlk.Status(); status != choices.Accepted { - t.Log(status) - t.Fatalf("Should have bubbled invalid votes to the valid parent") - } + require.Equal(choices.Accepted, validBlk.Status()) } func TestEngineGossip(t *testing.T) { + require := require.New(t) + _, _, sender, vm, te, gBlk := setupDefaultConfig(t) vm.LastAcceptedF = func(context.Context) (ids.ID, error) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - if blkID == gBlk.ID() { - return gBlk, nil - } - t.Fatal(errUnknownBlock) - return nil, errUnknownBlock + require.Equal(gBlk.ID(), blkID) + return gBlk, nil } called := new(bool) sender.SendGossipF = func(_ context.Context, blkBytes []byte) { *called = true - if !bytes.Equal(blkBytes, gBlk.Bytes()) { - t.Fatal(errUnknownBytes) - } + require.Equal(gBlk.Bytes(), blkBytes) } - if err := te.Gossip(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(te.Gossip(context.Background())) - if !*called { - t.Fatalf("Should have gossiped the block") - } + require.True(*called) } func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { + require := require.New(t) + vdr, vdrs, sender, vm, te, gBlk := setupDefaultConfig(t) secondVdr := ids.GenerateTestNodeID() - if err := vdrs.Add(secondVdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vdrs.Add(secondVdr, nil, ids.Empty, 1)) sender.Default(true) @@ -1640,22 +1365,14 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { reqID := new(uint32) sender.SendGetF = func(_ context.Context, reqVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID - if reqVdr != vdr { - t.Fatalf("Wrong validator requested") - } - if blkID != missingBlk.ID() { - t.Fatalf("Wrong block requested") - } + require.Equal(vdr, reqVdr) + require.Equal(missingBlk.ID(), blkID) } sender.CantSendChits = false - if err := te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes())) - if err := te.Put(context.Background(), secondVdr, *reqID, []byte{3}); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), secondVdr, *reqID, []byte{3})) *parsed = false vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -1682,17 +1399,14 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { missingBlk.StatusV = choices.Processing - if err := te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes())) - pref := te.Consensus.Preference() - if pref != pendingBlk.ID() { - t.Fatalf("Shouldn't have abandoned the pending block") - } + require.Equal(pendingBlk.ID(), te.Consensus.Preference()) } func TestEnginePushQueryRequestIDConflict(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) sender.Default(true) @@ -1742,25 +1456,17 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { reqID := new(uint32) sender.SendGetF = func(_ context.Context, reqVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID - if reqVdr != vdr { - t.Fatalf("Wrong validator requested") - } - if blkID != missingBlk.ID() { - t.Fatalf("Wrong block requested") - } + require.Equal(vdr, reqVdr) + require.Equal(missingBlk.ID(), blkID) } sender.CantSendChits = false - if err := te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes())) sender.SendGetF = nil sender.CantSendGet = false - if err := te.PushQuery(context.Background(), vdr, *reqID, []byte{3}); err != nil { - t.Fatal(err) - } + require.NoError(te.PushQuery(context.Background(), vdr, *reqID, []byte{3})) *parsed = false vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -1785,17 +1491,14 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } sender.CantSendPushQuery = false - if err := te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes())) - pref := te.Consensus.Preference() - if pref != pendingBlk.ID() { - t.Fatalf("Shouldn't have abandoned the pending block") - } + require.Equal(pendingBlk.ID(), te.Consensus.Preference()) } func TestEngineAggressivePolling(t *testing.T) { + require := require.New(t) + engCfg := DefaultConfigs() engCfg.Params.ConcurrentRepolls = 2 @@ -1803,9 +1506,7 @@ func TestEngineAggressivePolling(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vals.Add(vdr, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -1828,20 +1529,14 @@ func TestEngineAggressivePolling(t *testing.T) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - if blkID != gBlk.ID() { - t.Fatalf("Wrong block requested") - } + require.Equal(gBlk.ID(), blkID) return gBlk, nil } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil @@ -1889,20 +1584,15 @@ func TestEngineAggressivePolling(t *testing.T) { *numPulled++ } - if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - if *numPushed != 1 { - t.Fatalf("Should have initially sent a push query") - } - - if *numPulled != 1 { - t.Fatalf("Should have sent an additional pull query") - } + require.Equal(1, *numPushed) + require.Equal(1, *numPulled) } func TestEngineDoubleChit(t *testing.T) { + require := require.New(t) + engCfg := DefaultConfigs() engCfg.Params = snowball.Parameters{ K: 2, @@ -1922,12 +1612,8 @@ func TestEngineDoubleChit(t *testing.T) { vdr0 := ids.GenerateTestNodeID() vdr1 := ids.GenerateTestNodeID() - if err := vals.Add(vdr0, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - if err := vals.Add(vdr1, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vals.Add(vdr0, nil, ids.Empty, 1)) + require.NoError(vals.Add(vdr1, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -1951,21 +1637,14 @@ func TestEngineDoubleChit(t *testing.T) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - if id == gBlk.ID() { - return gBlk, nil - } - t.Fatalf("Unknown block") - panic("Should have errored") + require.Equal(gBlk.ID(), id) + return gBlk, nil } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.LastAcceptedF = nil @@ -1982,24 +1661,16 @@ func TestEngineDoubleChit(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true *queryRequestID = requestID vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr0, vdr1) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdrSet, inVdrs) + require.Equal(blk.Bytes(), blkBytes) } - if err := te.issue(context.Background(), blk); err != nil { - t.Fatal(err) - } + require.NoError(te.issue(context.Background(), blk)) vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { switch id { @@ -2008,42 +1679,30 @@ func TestEngineDoubleChit(t *testing.T) { case blk.ID(): return blk, nil } - t.Fatalf("Unknown block") - panic("Should have errored") + require.FailNow("Unknown block") + return nil, nil } blkSet := []ids.ID{blk.ID()} - if status := blk.Status(); status != choices.Processing { - t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) - } + require.Equal(choices.Processing, blk.Status()) - if err := te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil)) - if status := blk.Status(); status != choices.Processing { - t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) - } + require.Equal(choices.Processing, blk.Status()) - if err := te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr0, *queryRequestID, blkSet, nil)) - if status := blk.Status(); status != choices.Processing { - t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) - } + require.Equal(choices.Processing, blk.Status()) - if err := te.Chits(context.Background(), vdr1, *queryRequestID, blkSet, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr1, *queryRequestID, blkSet, nil)) - if status := blk.Status(); status != choices.Accepted { - t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Accepted) - } + require.Equal(choices.Accepted, blk.Status()) } func TestEngineBuildBlockLimit(t *testing.T) { + require := require.New(t) + engCfg := DefaultConfigs() engCfg.Params.K = 1 engCfg.Params.Alpha = 1 @@ -2053,9 +1712,7 @@ func TestEngineBuildBlockLimit(t *testing.T) { engCfg.Validators = vals vdr := ids.GenerateTestNodeID() - if err := vals.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } + require.NoError(vals.Add(vdr, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} engCfg.Sender = sender @@ -2078,20 +1735,14 @@ func TestEngineBuildBlockLimit(t *testing.T) { return gBlk.ID(), nil } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { - if blkID != gBlk.ID() { - t.Fatalf("Wrong block requested") - } + require.Equal(gBlk.ID(), blkID) return gBlk, nil } te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := te.Start(context.Background(), 0); err != nil { - t.Fatal(err) - } + require.NoError(te.Start(context.Background(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil @@ -2122,15 +1773,11 @@ func TestEngineBuildBlockLimit(t *testing.T) { ) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], rID uint32, _ []byte) { reqID = rID - if queried { - t.Fatalf("Asked multiple times") - } + require.False(queried) queried = true vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } + require.Equal(vdrSet, inVdrs) } vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -2144,29 +1791,19 @@ func TestEngineBuildBlockLimit(t *testing.T) { blkToReturn := 0 vm.BuildBlockF = func(context.Context) (snowman.Block, error) { - if blkToReturn >= len(blks) { - t.Fatalf("Built too many blocks") - } + require.Less(blkToReturn, len(blks)) blk := blks[blkToReturn] blkToReturn++ return blk, nil } - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } + require.NoError(te.Notify(context.Background(), common.PendingTxs)) - if !queried { - t.Fatalf("Should have sent a query to the peer") - } + require.True(queried) queried = false - if err := te.Notify(context.Background(), common.PendingTxs); err != nil { - t.Fatal(err) - } + require.NoError(te.Notify(context.Background(), common.PendingTxs)) - if queried { - t.Fatalf("Shouldn't have sent a query to the peer") - } + require.False(queried) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -2179,16 +1816,14 @@ func TestEngineBuildBlockLimit(t *testing.T) { } } - if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{blk0.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, reqID, []ids.ID{blk0.ID()}, nil)) - if !queried { - t.Fatalf("Should have sent a query to the peer") - } + require.True(queried) } func TestEngineReceiveNewRejectedBlock(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) acceptedBlk := &snowman.TestBlock{ @@ -2228,7 +1863,7 @@ func TestEngineReceiveNewRejectedBlock(t *testing.T) { case bytes.Equal(b, pendingBlk.Bytes()): return pendingBlk, nil default: - t.Fatalf("Unknown block bytes") + require.FailNow("Unknown block bytes") return nil, nil } } @@ -2253,17 +1888,11 @@ func TestEngineReceiveNewRejectedBlock(t *testing.T) { reqID = rID } - if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - if !asked { - t.Fatalf("Didn't query for the new block") - } + require.True(asked) - if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil)) sender.SendPushQueryF = nil asked = false @@ -2273,26 +1902,20 @@ func TestEngineReceiveNewRejectedBlock(t *testing.T) { reqID = rID } - if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - if !asked { - t.Fatalf("Didn't request the missing block") - } + require.True(asked) rejectedBlk.StatusV = choices.Rejected - if err := te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes())) - if te.blkReqs.Len() != 0 { - t.Fatalf("Should have finished all requests") - } + require.Zero(te.blkReqs.Len()) } func TestEngineRejectionAmplification(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) acceptedBlk := &snowman.TestBlock{ @@ -2332,7 +1955,7 @@ func TestEngineRejectionAmplification(t *testing.T) { case bytes.Equal(b, pendingBlk.Bytes()): return pendingBlk, nil default: - t.Fatalf("Unknown block bytes") + require.FailNow("Unknown block bytes") return nil, nil } } @@ -2357,13 +1980,9 @@ func TestEngineRejectionAmplification(t *testing.T) { reqID = rID } - if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - if !queried { - t.Fatalf("Didn't query for the new block") - } + require.True(queried) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -2376,13 +1995,9 @@ func TestEngineRejectionAmplification(t *testing.T) { } } - if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil)) - if !te.Consensus.Finalized() { - t.Fatalf("Should have finalized the consensus instance") - } + require.True(te.Consensus.Finalized()) queried = false var asked bool @@ -2393,35 +2008,25 @@ func TestEngineRejectionAmplification(t *testing.T) { asked = true reqID = rID - if blkID != rejectedBlk.ID() { - t.Fatalf("requested %s but should have requested %s", blkID, rejectedBlk.ID()) - } + require.Equal(rejectedBlk.ID(), blkID) } - if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - if queried { - t.Fatalf("Queried for the pending block") - } - if !asked { - t.Fatalf("Should have asked for the missing block") - } + require.False(queried) + require.True(asked) rejectedBlk.StatusV = choices.Processing - if err := te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, reqID, rejectedBlk.Bytes())) - if queried { - t.Fatalf("Queried for the rejected block") - } + require.False(queried) } // Test that the node will not issue a block into consensus that it knows will // be rejected because the parent is rejected. func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) acceptedBlk := &snowman.TestBlock{ @@ -2462,7 +2067,7 @@ func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) case bytes.Equal(b, pendingBlk.Bytes()): return pendingBlk, nil default: - t.Fatalf("Unknown block bytes") + require.FailNow("Unknown block bytes") return nil, nil } } @@ -2489,38 +2094,26 @@ func TestEngineTransitiveRejectionAmplificationDueToRejectedParent(t *testing.T) reqID = rID } - if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) - if !queried { - t.Fatalf("Didn't query for the new block") - } + require.True(queried) - if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil)) - if !te.Consensus.Finalized() { - t.Fatalf("Should have finalized the consensus instance") - } + require.True(te.Consensus.Finalized()) - if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) - if !te.Consensus.Finalized() { - t.Fatalf("Should have finalized the consensus instance") - } + require.True(te.Consensus.Finalized()) - if len(te.pending) != 0 { - t.Fatalf("Shouldn't have any pending blocks") - } + require.Empty(te.pending) } // Test that the node will not issue a block into consensus that it knows will // be rejected because the parent is failing verification. func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) acceptedBlk := &snowman.TestBlock{ @@ -2562,7 +2155,7 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) case bytes.Equal(b, pendingBlk.Bytes()): return pendingBlk, nil default: - t.Fatalf("Unknown block bytes") + require.FailNow("Unknown block bytes") return nil, nil } } @@ -2585,13 +2178,8 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) reqID = rID } - if err := te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes()); err != nil { - t.Fatal(err) - } - - if !queried { - t.Fatalf("Didn't query for the new block") - } + require.NoError(te.Put(context.Background(), vdr, 0, acceptedBlk.Bytes())) + require.True(queried) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -2606,29 +2194,18 @@ func TestEngineTransitiveRejectionAmplificationDueToInvalidParent(t *testing.T) } } - if err := te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil); err != nil { - t.Fatal(err) - } - - if !te.Consensus.Finalized() { - t.Fatalf("Should have finalized the consensus instance") - } - - if err := te.Put(context.Background(), vdr, 0, pendingBlk.Bytes()); err != nil { - t.Fatal(err) - } - - if !te.Consensus.Finalized() { - t.Fatalf("Should have finalized the consensus instance") - } + require.NoError(te.Chits(context.Background(), vdr, reqID, []ids.ID{acceptedBlk.ID()}, nil)) + require.True(te.Consensus.Finalized()) - if len(te.pending) != 0 { - t.Fatalf("Shouldn't have any pending blocks") - } + require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) + require.True(te.Consensus.Finalized()) + require.Empty(te.pending) } // Test that the node will not gossip a block that isn't preferred. func TestEngineNonPreferredAmplification(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) preferredBlk := &snowman.TestBlock{ @@ -2657,7 +2234,7 @@ func TestEngineNonPreferredAmplification(t *testing.T) { case bytes.Equal(b, nonPreferredBlk.Bytes()): return nonPreferredBlk, nil default: - t.Fatalf("Unknown block bytes") + require.FailNow("Unknown block bytes") return nil, nil } } @@ -2672,23 +2249,15 @@ func TestEngineNonPreferredAmplification(t *testing.T) { } sender.SendPushQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkBytes []byte) { - if bytes.Equal(nonPreferredBlk.Bytes(), blkBytes) { - t.Fatalf("gossiped non-preferred block") - } + require.NotEqual(nonPreferredBlk.Bytes(), blkBytes) } sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkID ids.ID) { - if blkID == nonPreferredBlk.ID() { - t.Fatalf("gossiped non-preferred block") - } + require.NotEqual(nonPreferredBlk.ID(), blkID) } - if err := te.Put(context.Background(), vdr, 0, preferredBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, preferredBlk.Bytes())) - if err := te.Put(context.Background(), vdr, 0, nonPreferredBlk.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, 0, nonPreferredBlk.Bytes())) } // Test that in the following scenario, if block B fails verification, votes @@ -2703,6 +2272,8 @@ func TestEngineNonPreferredAmplification(t *testing.T) { // | // B func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) // [blk1] is a child of [gBlk] and currently passes verification @@ -2736,7 +2307,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { case bytes.Equal(b, blk2.Bytes()): return blk2, nil default: - t.Fatalf("Unknown block bytes") + require.FailNow("Unknown block bytes") return nil, nil } } @@ -2756,52 +2327,34 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { reqID := new(uint32) sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID - if *asked { - t.Fatalf("Asked multiple times") - } - if blkID != blk1.ID() { - t.Fatalf("Expected engine to request blk1") - } - if inVdr != vdr { - t.Fatalf("Expected engine to request blk2 from vdr") - } + require.False(*asked) + require.Equal(blk1.ID(), blkID) + require.Equal(vdr, inVdr) *asked = true } // Receive Gossip message for [blk2] first and expect the sender to issue a Get request for // its ancestor: [blk1]. - if err := te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes())) - if !*asked { - t.Fatalf("Didn't ask for missing blk1") - } + require.True(*asked) // Prepare to PushQuery [blk1] after our Get request is fulfilled. We should not PushQuery // [blk2] since it currently fails verification. queried := new(bool) queryRequestID := new(uint32) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true *queryRequestID = requestID vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk1.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdrSet, inVdrs) + require.Equal(blk1.Bytes(), blkBytes) } // Answer the request, this should allow [blk1] to be issued and cause [blk2] to // fail verification. - if err := te.Put(context.Background(), vdr, *reqID, blk1.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *reqID, blk1.Bytes())) // now blk1 is verified, vm can return it vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -2815,9 +2368,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { } } - if !*queried { - t.Fatalf("Didn't ask for preferences regarding blk1") - } + require.True(*queried) sendReqID := new(uint32) reqVdr := new(ids.NodeID) @@ -2826,41 +2377,35 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { switch blkID { case blk1.ID(): - t.Fatal("Unexpectedly sent a Get request for blk1") + require.FailNow("Unexpectedly sent a Get request for blk1") case blk2.ID(): *sendReqID = requestID *reqVdr = inVdr return default: - t.Fatal("Unexpectedly sent a Get request for unknown block") + require.FailNow("Unexpectedly sent a Get request for unknown block") } } sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkID ids.ID) { switch blkID { case blk1.ID(): - t.Fatal("Unexpectedly sent a PullQuery request for blk1") + require.FailNow("Unexpectedly sent a PullQuery request for blk1") case blk2.ID(): - t.Fatal("Unexpectedly sent a PullQuery request for blk2") + require.FailNow("Unexpectedly sent a PullQuery request for blk2") default: - t.Fatal("Unexpectedly sent a PullQuery request for unknown block") + require.FailNow("Unexpectedly sent a PullQuery request for unknown block") } } // Now we are expecting a Chits message, and we receive it for blk2 instead of blk1 // The votes should be bubbled through blk2 despite the fact that it is failing verification. - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk2.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk2.ID()}, nil)) - if err := te.Put(context.Background(), *reqVdr, *sendReqID, blk2.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), *reqVdr, *sendReqID, blk2.Bytes())) // The vote should be bubbled through [blk2], such that [blk1] gets marked as Accepted. - if blk1.Status() != choices.Accepted { - t.Fatalf("Expected blk1 to be Accepted, but found status: %s", blk1.Status()) - } + require.Equal(choices.Accepted, blk1.Status()) // Now that [blk1] has been marked as Accepted, [blk2] can pass verification. blk2.VerifyV = nil @@ -2879,37 +2424,22 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { *queried = false // Prepare to PushQuery [blk2] after receiving a Gossip message with [blk2]. sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true *queryRequestID = requestID vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk2.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdrSet, inVdrs) + require.Equal(blk2.Bytes(), blkBytes) } // Expect that the Engine will send a PushQuery after receiving this Gossip message for [blk2]. - if err := te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk2.Bytes())) - if !*queried { - t.Fatalf("Didn't ask for preferences regarding blk2") - } + require.True(*queried) // After a single vote for [blk2], it should be marked as accepted. - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk2.ID()}, nil); err != nil { - t.Fatal(err) - } - - if blk2.Status() != choices.Accepted { - t.Fatalf("Expected blk2 to be Accepted, but found status: %s", blk2.Status()) - } + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk2.ID()}, nil)) + require.Equal(choices.Accepted, blk2.Status()) } // Test that in the following scenario, if block B fails verification, votes @@ -2926,6 +2456,8 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // | // C func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { + require := require.New(t) + vdr, _, sender, vm, te, gBlk := setupDefaultConfig(t) // [blk1] is a child of [gBlk] and currently passes verification @@ -2972,7 +2504,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { case bytes.Equal(b, blk3.Bytes()): return blk3, nil default: - t.Fatalf("Unknown block bytes") + require.FailNow("Unknown block bytes") return nil, nil } } @@ -2993,26 +2525,16 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { reqID := new(uint32) sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { *reqID = requestID - if *asked { - t.Fatalf("Asked multiple times") - } - if blkID != blk2.ID() { - t.Fatalf("Expected engine to request blk2") - } - if inVdr != vdr { - t.Fatalf("Expected engine to request blk2 from vdr") - } + require.False(*asked) + require.Equal(blk2.ID(), blkID) + require.Equal(vdr, inVdr) *asked = true } // Receive Gossip message for [blk3] first and expect the sender to issue a // Get request for its ancestor: [blk2]. - if err := te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk3.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, constants.GossipMsgRequestID, blk3.Bytes())) - if !*asked { - t.Fatalf("Didn't ask for missing blk2") - } + require.True(*asked) // Prepare to PushQuery [blk1] after our request for [blk2] is fulfilled. // We should not PushQuery [blk2] since it currently fails verification. @@ -3020,29 +2542,19 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { queried := new(bool) queryRequestID := new(uint32) sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - if *queried { - t.Fatalf("Asked multiple times") - } + require.False(*queried) *queried = true *queryRequestID = requestID vdrSet := set.Set[ids.NodeID]{} vdrSet.Add(vdr) - if !inVdrs.Equals(vdrSet) { - t.Fatalf("Asking wrong validator for preference") - } - if !bytes.Equal(blk1.Bytes(), blkBytes) { - t.Fatalf("Asking for wrong block") - } + require.Equal(vdrSet, inVdrs) + require.Equal(blk1.Bytes(), blkBytes) } // Answer the request, this should result in [blk1] being issued as well. - if err := te.Put(context.Background(), vdr, *reqID, blk2.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), vdr, *reqID, blk2.Bytes())) - if !*queried { - t.Fatalf("Didn't ask for preferences regarding blk1") - } + require.True(*queried) sendReqID := new(uint32) reqVdr := new(ids.NodeID) @@ -3051,7 +2563,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { sender.SendGetF = func(_ context.Context, inVdr ids.NodeID, requestID uint32, blkID ids.ID) { switch blkID { case blk1.ID(): - t.Fatal("Unexpectedly sent a Get request for blk1") + require.FailNow("Unexpectedly sent a Get request for blk1") case blk2.ID(): t.Logf("sending get for blk2 with %d", requestID) *sendReqID = requestID @@ -3063,41 +2575,35 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { *reqVdr = inVdr return default: - t.Fatal("Unexpectedly sent a Get request for unknown block") + require.FailNow("Unexpectedly sent a Get request for unknown block") } } sender.SendPullQueryF = func(_ context.Context, _ set.Set[ids.NodeID], _ uint32, blkID ids.ID) { switch blkID { case blk1.ID(): - t.Fatal("Unexpectedly sent a PullQuery request for blk1") + require.FailNow("Unexpectedly sent a PullQuery request for blk1") case blk2.ID(): - t.Fatal("Unexpectedly sent a PullQuery request for blk2") + require.FailNow("Unexpectedly sent a PullQuery request for blk2") case blk3.ID(): - t.Fatal("Unexpectedly sent a PullQuery request for blk3") + require.FailNow("Unexpectedly sent a PullQuery request for blk3") default: - t.Fatal("Unexpectedly sent a PullQuery request for unknown block") + require.FailNow("Unexpectedly sent a PullQuery request for unknown block") } } // Now we are expecting a Chits message, and we receive it for [blk3] // instead of blk1. This will cause the node to again request [blk3]. - if err := te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk3.ID()}, nil); err != nil { - t.Fatal(err) - } + require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, []ids.ID{blk3.ID()}, nil)) // Drop the re-request for blk3 to cause the poll to termindate. The votes // should be bubbled through blk3 despite the fact that it hasn't been // issued. - if err := te.GetFailed(context.Background(), *reqVdr, *sendReqID); err != nil { - t.Fatal(err) - } + require.NoError(te.GetFailed(context.Background(), *reqVdr, *sendReqID)) // The vote should be bubbled through [blk3] and [blk2] such that [blk1] // gets marked as Accepted. - if blk1.Status() != choices.Accepted { - t.Fatalf("Expected blk1 to be Accepted, but found status: %s", blk1.Status()) - } + require.Equal(choices.Accepted, blk1.Status()) } func TestMixedQueryNumPushSet(t *testing.T) { @@ -3105,15 +2611,13 @@ func TestMixedQueryNumPushSet(t *testing.T) { t.Run( fmt.Sprint(i), func(t *testing.T) { + require := require.New(t) + engCfg := DefaultConfigs() engCfg.Params.MixedQueryNumPushVdr = i te, err := newTransitive(engCfg) - if err != nil { - t.Fatal(err) - } - if te.Params.MixedQueryNumPushVdr != i { - t.Fatalf("expected to push query %v validators but got %v", i, te.Config.Params.MixedQueryNumPushVdr) - } + require.NoError(err) + require.Equal(i, te.Params.MixedQueryNumPushVdr) }, ) } @@ -3131,6 +2635,8 @@ func TestSendMixedQuery(t *testing.T) { t.Run( fmt.Sprintf("is validator: %v", tt.isVdr), func(t *testing.T) { + require := require.New(t) + engConfig := DefaultConfigs() commonCfg := common.DefaultConfigTest() // Override the parameters k and MixedQueryNumPushNonVdr, @@ -3146,17 +2652,11 @@ func TestSendMixedQuery(t *testing.T) { for i := 0; i < te.Params.K; i++ { vdrID := ids.GenerateTestNodeID() vdrs.Add(vdrID) - err := te.Validators.Add(vdrID, nil, ids.Empty, 1) - if err != nil { - t.Fatal(err) - } + require.NoError(te.Validators.Add(vdrID, nil, ids.Empty, 1)) } if tt.isVdr { vdrs.Add(te.Ctx.NodeID) - err := te.Validators.Add(te.Ctx.NodeID, nil, ids.Empty, 1) - if err != nil { - t.Fatal(err) - } + require.NoError(te.Validators.Add(te.Ctx.NodeID, nil, ids.Empty, 1)) } // [blk1] is a child of [gBlk] and passes verification @@ -3176,7 +2676,7 @@ func TestSendMixedQuery(t *testing.T) { case bytes.Equal(b, blk1.Bytes()): return blk1, nil default: - t.Fatalf("Unknown block bytes") + require.FailNow("Unknown block bytes") return nil, nil } } @@ -3195,12 +2695,8 @@ func TestSendMixedQuery(t *testing.T) { pullQueryReqID := new(uint32) pullQueriedVdrs := set.Set[ids.NodeID]{} sender.SendPullQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkID ids.ID) { - switch { - case *pullQuerySent: - t.Fatalf("Asked multiple times") - case blkID != blk1.ID(): - t.Fatalf("Expected engine to request blk1") - } + require.False(*pullQuerySent) + require.Equal(blk1.ID(), blkID) pullQueriedVdrs.Union(inVdrs) *pullQuerySent = true *pullQueryReqID = requestID @@ -3210,12 +2706,8 @@ func TestSendMixedQuery(t *testing.T) { pushQueryReqID := new(uint32) pushQueriedVdrs := set.Set[ids.NodeID]{} sender.SendPushQueryF = func(_ context.Context, inVdrs set.Set[ids.NodeID], requestID uint32, blkBytes []byte) { - switch { - case *pushQuerySent: - t.Fatal("Asked multiple times") - case !bytes.Equal(blkBytes, blk1.Bytes()): - t.Fatal("got unexpected block bytes instead of blk1") - } + require.False(*pushQuerySent) + require.Equal(blk1.Bytes(), blkBytes) *pushQuerySent = true *pushQueryReqID = requestID pushQueriedVdrs.Union(inVdrs) @@ -3223,30 +2715,21 @@ func TestSendMixedQuery(t *testing.T) { // Give the engine blk1. It should insert it into consensus and send a mixed query // consisting of 12 push queries and 8 pull queries. - if err := te.Put(context.Background(), te.Validators.List()[0].NodeID, constants.GossipMsgRequestID, blk1.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(te.Put(context.Background(), te.Validators.List()[0].NodeID, constants.GossipMsgRequestID, blk1.Bytes())) - switch { - case !*pullQuerySent: - t.Fatal("expected us to send pull queries") - case !*pushQuerySent: - t.Fatal("expected us to send push queries") - case *pushQueryReqID != *pullQueryReqID: - t.Fatalf("expected equal push query (%v) and pull query (%v) req IDs", *pushQueryReqID, *pullQueryReqID) - case pushQueriedVdrs.Len()+pullQueriedVdrs.Len() != te.Config.Params.K: - t.Fatalf("expected num push queried (%d) + num pull queried (%d) to be %d", pushQueriedVdrs.Len(), pullQueriedVdrs.Len(), te.Config.Params.K) - case !tt.isVdr && pushQueriedVdrs.Len() != te.Params.MixedQueryNumPushNonVdr: - t.Fatalf("expected num push queried (%d) to be %d", pushQueriedVdrs.Len(), te.Params.MixedQueryNumPushNonVdr) - case tt.isVdr && pushQueriedVdrs.Len() != te.Params.MixedQueryNumPushVdr: - t.Fatalf("expected num push queried (%d) to be %d", pushQueriedVdrs.Len(), te.Params.MixedQueryNumPushVdr) + require.True(*pullQuerySent) + require.True(*pushQuerySent) + require.Equal(*pushQueryReqID, *pullQueryReqID) + require.Equal(te.Config.Params.K, pushQueriedVdrs.Len()+pullQueriedVdrs.Len()) + expectedPushQueriedVdrs := te.Params.MixedQueryNumPushNonVdr + if tt.isVdr { + expectedPushQueriedVdrs = te.Params.MixedQueryNumPushVdr } + require.Len(pushQueriedVdrs, expectedPushQueriedVdrs) pullQueriedVdrs.Union(pushQueriedVdrs) // Now this holds all queried validators (push and pull) for vdr := range pullQueriedVdrs { - if !vdrs.Contains(vdr) { - t.Fatalf("got unexpected vdr %v", vdr) - } + require.Contains(vdrs, vdr) } }) } @@ -3472,8 +2955,8 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { case blk.ID(): return blk, nil } - t.Fatalf("unknown block") - panic("Should have errored") + require.FailNow("unknown block") + return nil, nil } require.Equal(choices.Processing, blk.Status()) diff --git a/snow/events/blocker_test.go b/snow/events/blocker_test.go index d3710be917b..850a8063bbe 100644 --- a/snow/events/blocker_test.go +++ b/snow/events/blocker_test.go @@ -7,11 +7,15 @@ import ( "context" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" ) func TestBlocker(t *testing.T) { + require := require.New(t) + b := Blocker(nil) a := newTestBlockable() @@ -43,39 +47,39 @@ func TestBlocker(t *testing.T) { b.Register(context.Background(), a) - switch { - case !*calledDep, *calledFill, *calledAbandon, !*calledUpdate: - t.Fatalf("Called wrong function") - } + require.True(*calledDep) + require.False(*calledFill) + require.False(*calledAbandon) + require.True(*calledUpdate) b.Fulfill(context.Background(), id2) b.Abandon(context.Background(), id2) - switch { - case !*calledDep, *calledFill, *calledAbandon, !*calledUpdate: - t.Fatalf("Called wrong function") - } + require.True(*calledDep) + require.False(*calledFill) + require.False(*calledAbandon) + require.True(*calledUpdate) b.Fulfill(context.Background(), id0) - switch { - case !*calledDep, !*calledFill, *calledAbandon, !*calledUpdate: - t.Fatalf("Called wrong function") - } + require.True(*calledDep) + require.True(*calledFill) + require.False(*calledAbandon) + require.True(*calledUpdate) b.Abandon(context.Background(), id0) - switch { - case !*calledDep, !*calledFill, *calledAbandon, !*calledUpdate: - t.Fatalf("Called wrong function") - } + require.True(*calledDep) + require.True(*calledFill) + require.False(*calledAbandon) + require.True(*calledUpdate) b.Abandon(context.Background(), id1) - switch { - case !*calledDep, !*calledFill, !*calledAbandon, !*calledUpdate: - t.Fatalf("Called wrong function") - } + require.True(*calledDep) + require.True(*calledFill) + require.True(*calledAbandon) + require.True(*calledUpdate) } type testBlockable struct { diff --git a/snow/networking/benchlist/benchlist_test.go b/snow/networking/benchlist/benchlist_test.go index 93b3075daff..15742449ea9 100644 --- a/snow/networking/benchlist/benchlist_test.go +++ b/snow/networking/benchlist/benchlist_test.go @@ -14,13 +14,14 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/wrappers" ) var minimumFailingDuration = 5 * time.Minute // Test that validators are properly added to the bench func TestBenchlistAdd(t *testing.T) { + require := require.New(t) + vdrs := validators.NewSet() vdrID0 := ids.GenerateTestNodeID() vdrID1 := ids.GenerateTestNodeID() @@ -28,17 +29,11 @@ func TestBenchlistAdd(t *testing.T) { vdrID3 := ids.GenerateTestNodeID() vdrID4 := ids.GenerateTestNodeID() - errs := wrappers.Errs{} - errs.Add( - vdrs.Add(vdrID0, nil, ids.Empty, 50), - vdrs.Add(vdrID1, nil, ids.Empty, 50), - vdrs.Add(vdrID2, nil, ids.Empty, 50), - vdrs.Add(vdrID3, nil, ids.Empty, 50), - vdrs.Add(vdrID4, nil, ids.Empty, 50), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(vdrs.Add(vdrID0, nil, ids.Empty, 50)) + require.NoError(vdrs.Add(vdrID1, nil, ids.Empty, 50)) + require.NoError(vdrs.Add(vdrID2, nil, ids.Empty, 50)) + require.NoError(vdrs.Add(vdrID3, nil, ids.Empty, 50)) + require.NoError(vdrs.Add(vdrID4, nil, ids.Empty, 50)) benchable := &TestBenchable{T: t} benchable.Default(true) @@ -57,9 +52,7 @@ func TestBenchlistAdd(t *testing.T) { maxPortion, prometheus.NewRegistry(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) b := benchIntf.(*benchlist) defer b.timer.Stop() now := time.Now() @@ -67,14 +60,14 @@ func TestBenchlistAdd(t *testing.T) { // Nobody should be benched at the start b.lock.Lock() - require.False(t, b.isBenched(vdrID0)) - require.False(t, b.isBenched(vdrID1)) - require.False(t, b.isBenched(vdrID2)) - require.False(t, b.isBenched(vdrID3)) - require.False(t, b.isBenched(vdrID4)) - require.Empty(t, b.failureStreaks) - require.Zero(t, b.benchedQueue.Len()) - require.Zero(t, b.benchlistSet.Len()) + require.False(b.isBenched(vdrID0)) + require.False(b.isBenched(vdrID1)) + require.False(b.isBenched(vdrID2)) + require.False(b.isBenched(vdrID3)) + require.False(b.isBenched(vdrID4)) + require.Empty(b.failureStreaks) + require.Empty(b.benchedQueue) + require.Empty(b.benchlistSet) b.lock.Unlock() // Register [threshold - 1] failures in a row for vdr0 @@ -83,13 +76,13 @@ func TestBenchlistAdd(t *testing.T) { } // Still shouldn't be benched due to not enough consecutive failure - require.False(t, b.isBenched(vdrID0)) - require.Zero(t, b.benchedQueue.Len()) - require.Zero(t, b.benchlistSet.Len()) - require.Len(t, b.failureStreaks, 1) + require.False(b.isBenched(vdrID0)) + require.Zero(b.benchedQueue.Len()) + require.Zero(b.benchlistSet.Len()) + require.Len(b.failureStreaks, 1) fs := b.failureStreaks[vdrID0] - require.Equal(t, threshold-1, fs.consecutive) - require.True(t, fs.firstFailure.Equal(now)) + require.Equal(threshold-1, fs.consecutive) + require.True(fs.firstFailure.Equal(now)) // Register another failure b.RegisterFailure(vdrID0) @@ -97,9 +90,9 @@ func TestBenchlistAdd(t *testing.T) { // Still shouldn't be benched because not enough time (any in this case) // has passed since the first failure b.lock.Lock() - require.False(t, b.isBenched(vdrID0)) - require.Zero(t, b.benchedQueue.Len()) - require.Zero(t, b.benchlistSet.Len()) + require.False(b.isBenched(vdrID0)) + require.Zero(b.benchedQueue.Len()) + require.Zero(b.benchlistSet.Len()) b.lock.Unlock() // Move the time up @@ -118,16 +111,16 @@ func TestBenchlistAdd(t *testing.T) { // Now this validator should be benched b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.Equal(t, b.benchedQueue.Len(), 1) - require.Equal(t, b.benchlistSet.Len(), 1) + require.True(b.isBenched(vdrID0)) + require.Equal(b.benchedQueue.Len(), 1) + require.Equal(b.benchlistSet.Len(), 1) next := b.benchedQueue[0] - require.Equal(t, vdrID0, next.nodeID) - require.True(t, !next.benchedUntil.After(now.Add(duration))) - require.True(t, !next.benchedUntil.Before(now.Add(duration/2))) - require.Empty(t, b.failureStreaks) - require.True(t, benched) + require.Equal(vdrID0, next.nodeID) + require.False(next.benchedUntil.After(now.Add(duration))) + require.False(next.benchedUntil.Before(now.Add(duration / 2))) + require.Empty(b.failureStreaks) + require.True(benched) benchable.BenchedF = nil b.lock.Unlock() @@ -142,11 +135,11 @@ func TestBenchlistAdd(t *testing.T) { // vdr1 shouldn't be benched // The response should have cleared its consecutive failures b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.False(t, b.isBenched(vdrID1)) - require.Equal(t, b.benchedQueue.Len(), 1) - require.Equal(t, b.benchlistSet.Len(), 1) - require.Empty(t, b.failureStreaks) + require.True(b.isBenched(vdrID0)) + require.False(b.isBenched(vdrID1)) + require.Equal(b.benchedQueue.Len(), 1) + require.Equal(b.benchlistSet.Len(), 1) + require.Empty(b.failureStreaks) b.lock.Unlock() // Register another failure for vdr0, who is benched @@ -154,12 +147,14 @@ func TestBenchlistAdd(t *testing.T) { // A failure for an already benched validator should not count against it b.lock.Lock() - require.Empty(t, b.failureStreaks) + require.Empty(b.failureStreaks) b.lock.Unlock() } // Test that the benchlist won't bench more than the maximum portion of stake func TestBenchlistMaxStake(t *testing.T) { + require := require.New(t) + vdrs := validators.NewSet() vdrID0 := ids.GenerateTestNodeID() vdrID1 := ids.GenerateTestNodeID() @@ -168,17 +163,11 @@ func TestBenchlistMaxStake(t *testing.T) { vdrID4 := ids.GenerateTestNodeID() // Total weight is 5100 - errs := wrappers.Errs{} - errs.Add( - vdrs.Add(vdrID0, nil, ids.Empty, 1000), - vdrs.Add(vdrID1, nil, ids.Empty, 1000), - vdrs.Add(vdrID2, nil, ids.Empty, 1000), - vdrs.Add(vdrID3, nil, ids.Empty, 2000), - vdrs.Add(vdrID4, nil, ids.Empty, 100), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(vdrs.Add(vdrID0, nil, ids.Empty, 1000)) + require.NoError(vdrs.Add(vdrID1, nil, ids.Empty, 1000)) + require.NoError(vdrs.Add(vdrID2, nil, ids.Empty, 1000)) + require.NoError(vdrs.Add(vdrID3, nil, ids.Empty, 2000)) + require.NoError(vdrs.Add(vdrID4, nil, ids.Empty, 100)) threshold := 3 duration := 1 * time.Hour @@ -195,9 +184,7 @@ func TestBenchlistMaxStake(t *testing.T) { maxPortion, prometheus.NewRegistry(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) b := benchIntf.(*benchlist) defer b.timer.Stop() now := time.Now() @@ -225,12 +212,12 @@ func TestBenchlistMaxStake(t *testing.T) { // Benching vdr2 (weight 1000) would cause the amount benched // to exceed the maximum b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.True(t, b.isBenched(vdrID1)) - require.False(t, b.isBenched(vdrID2)) - require.Equal(t, b.benchedQueue.Len(), 2) - require.Equal(t, b.benchlistSet.Len(), 2) - require.Len(t, b.failureStreaks, 1) + require.True(b.isBenched(vdrID0)) + require.True(b.isBenched(vdrID1)) + require.False(b.isBenched(vdrID2)) + require.Equal(b.benchedQueue.Len(), 2) + require.Equal(b.benchlistSet.Len(), 2) + require.Len(b.failureStreaks, 1) fs := b.failureStreaks[vdrID2] fs.consecutive = threshold fs.firstFailure = now @@ -252,15 +239,15 @@ func TestBenchlistMaxStake(t *testing.T) { // vdr4 should be benched now b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.True(t, b.isBenched(vdrID1)) - require.True(t, b.isBenched(vdrID4)) - require.Equal(t, 3, b.benchedQueue.Len()) - require.Equal(t, 3, b.benchlistSet.Len()) - require.Contains(t, b.benchlistSet, vdrID0) - require.Contains(t, b.benchlistSet, vdrID1) - require.Contains(t, b.benchlistSet, vdrID4) - require.Len(t, b.failureStreaks, 1) // for vdr2 + require.True(b.isBenched(vdrID0)) + require.True(b.isBenched(vdrID1)) + require.True(b.isBenched(vdrID4)) + require.Equal(3, b.benchedQueue.Len()) + require.Equal(3, b.benchlistSet.Len()) + require.Contains(b.benchlistSet, vdrID0) + require.Contains(b.benchlistSet, vdrID1) + require.Contains(b.benchlistSet, vdrID4) + require.Len(b.failureStreaks, 1) // for vdr2 b.lock.Unlock() // More failures for vdr2 shouldn't add it to the bench @@ -270,21 +257,21 @@ func TestBenchlistMaxStake(t *testing.T) { } b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.True(t, b.isBenched(vdrID1)) - require.True(t, b.isBenched(vdrID4)) - require.False(t, b.isBenched(vdrID2)) - require.Equal(t, 3, b.benchedQueue.Len()) - require.Equal(t, 3, b.benchlistSet.Len()) - require.Len(t, b.failureStreaks, 1) - require.Contains(t, b.failureStreaks, vdrID2) + require.True(b.isBenched(vdrID0)) + require.True(b.isBenched(vdrID1)) + require.True(b.isBenched(vdrID4)) + require.False(b.isBenched(vdrID2)) + require.Equal(3, b.benchedQueue.Len()) + require.Equal(3, b.benchlistSet.Len()) + require.Len(b.failureStreaks, 1) + require.Contains(b.failureStreaks, vdrID2) // Ensure the benched queue root has the min end time minEndTime := b.benchedQueue[0].benchedUntil benchedIDs := []ids.NodeID{vdrID0, vdrID1, vdrID4} for _, benchedVdr := range b.benchedQueue { - require.Contains(t, benchedIDs, benchedVdr.nodeID) - require.True(t, !benchedVdr.benchedUntil.Before(minEndTime)) + require.Contains(benchedIDs, benchedVdr.nodeID) + require.False(benchedVdr.benchedUntil.Before(minEndTime)) } b.lock.Unlock() @@ -292,6 +279,8 @@ func TestBenchlistMaxStake(t *testing.T) { // Test validators are removed from the bench correctly func TestBenchlistRemove(t *testing.T) { + require := require.New(t) + vdrs := validators.NewSet() vdrID0 := ids.GenerateTestNodeID() vdrID1 := ids.GenerateTestNodeID() @@ -300,17 +289,11 @@ func TestBenchlistRemove(t *testing.T) { vdrID4 := ids.GenerateTestNodeID() // Total weight is 5000 - errs := wrappers.Errs{} - errs.Add( - vdrs.Add(vdrID0, nil, ids.Empty, 1000), - vdrs.Add(vdrID1, nil, ids.Empty, 1000), - vdrs.Add(vdrID2, nil, ids.Empty, 1000), - vdrs.Add(vdrID3, nil, ids.Empty, 1000), - vdrs.Add(vdrID4, nil, ids.Empty, 1000), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(vdrs.Add(vdrID0, nil, ids.Empty, 1000)) + require.NoError(vdrs.Add(vdrID1, nil, ids.Empty, 1000)) + require.NoError(vdrs.Add(vdrID2, nil, ids.Empty, 1000)) + require.NoError(vdrs.Add(vdrID3, nil, ids.Empty, 1000)) + require.NoError(vdrs.Add(vdrID4, nil, ids.Empty, 1000)) count := 0 benchable := &TestBenchable{ @@ -335,9 +318,7 @@ func TestBenchlistRemove(t *testing.T) { maxPortion, prometheus.NewRegistry(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) b := benchIntf.(*benchlist) defer b.timer.Stop() now := time.Now() @@ -364,19 +345,19 @@ func TestBenchlistRemove(t *testing.T) { // All 3 should be benched b.lock.Lock() - require.True(t, b.isBenched(vdrID0)) - require.True(t, b.isBenched(vdrID1)) - require.True(t, b.isBenched(vdrID2)) - require.Equal(t, 3, b.benchedQueue.Len()) - require.Equal(t, 3, b.benchlistSet.Len()) - require.Empty(t, b.failureStreaks) + require.True(b.isBenched(vdrID0)) + require.True(b.isBenched(vdrID1)) + require.True(b.isBenched(vdrID2)) + require.Equal(3, b.benchedQueue.Len()) + require.Equal(3, b.benchlistSet.Len()) + require.Empty(b.failureStreaks) // Ensure the benched queue root has the min end time minEndTime := b.benchedQueue[0].benchedUntil benchedIDs := []ids.NodeID{vdrID0, vdrID1, vdrID2} for _, benchedVdr := range b.benchedQueue { - require.Contains(t, benchedIDs, benchedVdr.nodeID) - require.True(t, !benchedVdr.benchedUntil.Before(minEndTime)) + require.Contains(benchedIDs, benchedVdr.nodeID) + require.False(benchedVdr.benchedUntil.Before(minEndTime)) } // Set the benchlist's clock past when all validators should be unbenched @@ -386,7 +367,6 @@ func TestBenchlistRemove(t *testing.T) { // Make sure each validator is eventually removed require.Eventually( - t, func() bool { return !b.IsBenched(vdrID0) }, @@ -395,7 +375,6 @@ func TestBenchlistRemove(t *testing.T) { ) require.Eventually( - t, func() bool { return !b.IsBenched(vdrID1) }, @@ -404,7 +383,6 @@ func TestBenchlistRemove(t *testing.T) { ) require.Eventually( - t, func() bool { return !b.IsBenched(vdrID2) }, @@ -412,5 +390,5 @@ func TestBenchlistRemove(t *testing.T) { 100*time.Millisecond, ) - require.Equal(t, 3, count) + require.Equal(3, count) } diff --git a/snow/networking/benchlist/test_benchable.go b/snow/networking/benchlist/test_benchable.go index 1655d808d49..1e059fa4ebb 100644 --- a/snow/networking/benchlist/test_benchable.go +++ b/snow/networking/benchlist/test_benchable.go @@ -6,6 +6,8 @@ package benchlist import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -26,7 +28,8 @@ func (b *TestBenchable) Benched(chainID ids.ID, validatorID ids.NodeID) { if b.BenchedF != nil { b.BenchedF(chainID, validatorID) } else if b.CantBenched && b.T != nil { - b.T.Fatalf("Unexpectedly called Benched") + require := require.New(b.T) + require.FailNow("Unexpectedly called Benched") } } @@ -34,6 +37,7 @@ func (b *TestBenchable) Unbenched(chainID ids.ID, validatorID ids.NodeID) { if b.UnbenchedF != nil { b.UnbenchedF(chainID, validatorID) } else if b.CantUnbenched && b.T != nil { - b.T.Fatalf("Unexpectedly called Unbenched") + require := require.New(b.T) + require.FailNow("Unexpectedly called Unbenched") } } diff --git a/snow/networking/handler/engine_test.go b/snow/networking/handler/engine_test.go index 9eb6752ce46..142441cfda6 100644 --- a/snow/networking/handler/engine_test.go +++ b/snow/networking/handler/engine_test.go @@ -58,14 +58,12 @@ func TestEngineManager_Get(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - r := require.New(t) - e := EngineManager{ Avalanche: avalanche, Snowman: snowman, } - r.Equal(test.expected.engine, e.Get(test.args.engineType)) + require.Equal(t, test.expected.engine, e.Get(test.args.engineType)) }) } } diff --git a/snow/networking/handler/handler_test.go b/snow/networking/handler/handler_test.go index ccc694ae199..3a26bd774ca 100644 --- a/snow/networking/handler/handler_test.go +++ b/snow/networking/handler/handler_test.go @@ -33,13 +33,15 @@ const testThreadPoolSize = 2 var errFatal = errors.New("error should cause handler to close") func TestHandlerDropsTimedOutMessages(t *testing.T) { + require := require.New(t) + called := make(chan struct{}) ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() vdr0 := ids.GenerateTestNodeID() - require.NoError(t, vdrs.Add(vdr0, nil, ids.Empty, 1)) + require.NoError(vdrs.Add(vdr0, nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -47,7 +49,7 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) handlerIntf, err := New( ctx, vdrs, @@ -58,7 +60,7 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) handler := handlerIntf.(*handler) bootstrapper := &common.BootstrapperTest{ @@ -74,7 +76,7 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { return ctx } bootstrapper.GetAcceptedFrontierF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { - t.Fatalf("GetAcceptedFrontier message should have timed out") + require.FailNow("GetAcceptedFrontier message should have timed out") return nil } bootstrapper.GetAcceptedF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, containerIDs []ids.ID) error { @@ -123,17 +125,19 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { defer ticker.Stop() select { case <-ticker.C: - t.Fatalf("Calling engine function timed out") + require.FailNow("Calling engine function timed out") case <-called: } } func TestHandlerClosesOnError(t *testing.T) { + require := require.New(t) + closed := make(chan struct{}, 1) ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -141,7 +145,7 @@ func TestHandlerClosesOnError(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) handlerIntf, err := New( ctx, vdrs, @@ -152,7 +156,7 @@ func TestHandlerClosesOnError(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) handler := handlerIntf.(*handler) handler.clock.Set(time.Now()) @@ -214,16 +218,18 @@ func TestHandlerClosesOnError(t *testing.T) { ticker := time.NewTicker(time.Second) select { case <-ticker.C: - t.Fatalf("Handler shutdown timed out before calling toClose") + require.FailNow("Handler shutdown timed out before calling toClose") case <-closed: } } func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { + require := require.New(t) + closed := make(chan struct{}, 1) ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -231,7 +237,7 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) handlerIntf, err := New( ctx, vdrs, @@ -242,7 +248,7 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) handler := handlerIntf.(*handler) handler.clock.Set(time.Now()) @@ -291,18 +297,20 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { ticker := time.NewTicker(time.Second) select { case <-ticker.C: - t.Fatalf("Handler shutdown timed out before calling toClose") + require.FailNow("Handler shutdown timed out before calling toClose") case <-closed: } } // Test that messages from the VM are handled func TestHandlerDispatchInternal(t *testing.T) { + require := require.New(t) + calledNotify := make(chan struct{}, 1) ctx := snow.DefaultConsensusContextTest() msgFromVMChan := make(chan common.Message) vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -310,7 +318,7 @@ func TestHandlerDispatchInternal(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) handler, err := New( ctx, vdrs, @@ -321,7 +329,7 @@ func TestHandlerDispatchInternal(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ @@ -364,15 +372,17 @@ func TestHandlerDispatchInternal(t *testing.T) { select { case <-time.After(20 * time.Millisecond): - t.Fatalf("should have called notify") + require.FailNow("should have called notify") case <-calledNotify: } } func TestHandlerSubnetConnector(t *testing.T) { + require := require.New(t) + ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -387,7 +397,7 @@ func TestHandlerSubnetConnector(t *testing.T) { nodeID := ids.GenerateTestNodeID() subnetID := ids.GenerateTestID() - require.NoError(t, err) + require.NoError(err) handler, err := New( ctx, vdrs, @@ -398,7 +408,7 @@ func TestHandlerSubnetConnector(t *testing.T) { connector, subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ @@ -545,10 +555,12 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { + require := require.New(t) + messageReceived := make(chan struct{}) ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -556,7 +568,7 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) handler, err := New( ctx, vdrs, @@ -567,7 +579,7 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ids.EmptyNodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 8d146efdde7..afc832d7b96 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -42,8 +42,10 @@ const ( ) func TestShutdown(t *testing.T) { + require := require.New(t) + vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -57,11 +59,11 @@ func TestShutdown(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() chainRouter := ChainRouter{} - require.NoError(t, chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -84,7 +86,7 @@ func TestShutdown(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx, vdrs, @@ -95,7 +97,7 @@ func TestShutdown(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ @@ -162,21 +164,23 @@ func TestShutdown(t *testing.T) { ticker := time.NewTicker(250 * time.Millisecond) select { case <-ticker.C: - t.Fatalf("Handler shutdown was not called or timed out after 250ms during chainRouter shutdown") + require.FailNow("Handler shutdown was not called or timed out after 250ms during chainRouter shutdown") case <-shutdownCalled: } select { case <-h.Stopped(): default: - t.Fatal("handler shutdown but never closed its closing channel") + require.FailNow("handler shutdown but never closed its closing channel") } } func TestShutdownTimesOut(t *testing.T) { + require := require.New(t) + nodeID := ids.EmptyNodeID vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() metrics := prometheus.NewRegistry() // Ensure that the Ancestors request does not timeout @@ -192,12 +196,12 @@ func TestShutdownTimesOut(t *testing.T) { "", metrics, ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() chainRouter := ChainRouter{} - require.NoError(t, chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -218,7 +222,7 @@ func TestShutdownTimesOut(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx, vdrs, @@ -229,7 +233,7 @@ func TestShutdownTimesOut(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) bootstrapFinished := make(chan struct{}, 1) bootstrapper := &common.BootstrapperTest{ @@ -308,7 +312,7 @@ func TestShutdownTimesOut(t *testing.T) { select { case <-bootstrapFinished: - t.Fatalf("Shutdown should have finished in one millisecond before timing out instead of waiting for engine to finish shutting down.") + require.FailNow("Shutdown should have finished in one millisecond before timing out instead of waiting for engine to finish shutting down.") case <-shutdownFinished: } } @@ -797,6 +801,8 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { } func TestRouterClearTimeouts(t *testing.T) { + require := require.New(t) + // Create a timeout manager tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -810,12 +816,12 @@ func TestRouterClearTimeouts(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() // Create a router chainRouter := ChainRouter{} - require.NoError(t, chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -832,7 +838,7 @@ func TestRouterClearTimeouts(t *testing.T) { // Create bootstrapper, engine and handler ctx := snow.DefaultConsensusContextTest() vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -840,7 +846,7 @@ func TestRouterClearTimeouts(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx, vdrs, @@ -851,7 +857,7 @@ func TestRouterClearTimeouts(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ @@ -1077,10 +1083,12 @@ func TestRouterClearTimeouts(t *testing.T) { chainRouter.HandleInbound(context.Background(), msg) } - require.Zero(t, chainRouter.timedRequests.Len()) + require.Zero(chainRouter.timedRequests.Len()) } func TestValidatorOnlyMessageDrops(t *testing.T) { + require := require.New(t) + // Create a timeout manager maxTimeout := 25 * time.Millisecond tm, err := timeout.NewManager( @@ -1095,12 +1103,12 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() // Create a router chainRouter := ChainRouter{} - require.NoError(t, chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -1122,14 +1130,14 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { sb := subnets.New(ctx.NodeID, subnets.Config{ValidatorOnly: true}) vdrs := validators.NewSet() vID := ids.GenerateTestNodeID() - require.NoError(t, vdrs.Add(vID, nil, ids.Empty, 1)) + require.NoError(vdrs.Add(vID, nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx, vdrs, @@ -1140,7 +1148,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { validators.UnhandledSubnetConnector, sb, ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ @@ -1207,7 +1215,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { ) chainRouter.HandleInbound(context.Background(), inMsg) - require.False(t, calledF) // should not be called + require.False(calledF) // should not be called // Validator case calledF = false @@ -1224,10 +1232,12 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { chainRouter.HandleInbound(context.Background(), inMsg) wg.Wait() - require.True(t, calledF) // should be called since this is a validator request + require.True(calledF) // should be called since this is a validator request } func TestRouterCrossChainMessages(t *testing.T) { + require := require.New(t) + tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ InitialTimeout: 3 * time.Second, @@ -1240,13 +1250,13 @@ func TestRouterCrossChainMessages(t *testing.T) { "timeoutManager", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() // Create chain router nodeID := ids.GenerateTestNodeID() chainRouter := ChainRouter{} - require.NoError(t, chainRouter.Initialize( + require.NoError(chainRouter.Initialize( nodeID, logging.NoLog{}, tm, @@ -1262,7 +1272,7 @@ func TestRouterCrossChainMessages(t *testing.T) { // Set up validators vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) // Create bootstrapper, engine and handler requester := snow.DefaultConsensusContextTest() @@ -1277,7 +1287,7 @@ func TestRouterCrossChainMessages(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) requesterHandler, err := handler.New( requester, @@ -1289,7 +1299,7 @@ func TestRouterCrossChainMessages(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(requester.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) responder := snow.DefaultConsensusContextTest() responder.ChainID = ids.GenerateTestID() @@ -1307,7 +1317,7 @@ func TestRouterCrossChainMessages(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(responder.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) // assumed bootstrapping is done responder.State.Set(snow.EngineState{ @@ -1324,8 +1334,8 @@ func TestRouterCrossChainMessages(t *testing.T) { chainRouter.AddChain(context.Background(), responderHandler) // Each chain should start off with a connected message - require.Equal(t, 1, chainRouter.chainHandlers[requester.ChainID].Len()) - require.Equal(t, 1, chainRouter.chainHandlers[responder.ChainID].Len()) + require.Equal(1, chainRouter.chainHandlers[requester.ChainID].Len()) + require.Equal(1, chainRouter.chainHandlers[responder.ChainID].Len()) // Requester sends a request to the responder msgBytes := []byte("foobar") @@ -1338,7 +1348,7 @@ func TestRouterCrossChainMessages(t *testing.T) { msgBytes, ) chainRouter.HandleInbound(context.Background(), msg) - require.Equal(t, 2, chainRouter.chainHandlers[responder.ChainID].Len()) + require.Equal(2, chainRouter.chainHandlers[responder.ChainID].Len()) // We register the cross-chain response on the requester-side so we don't // drop it. @@ -1366,10 +1376,12 @@ func TestRouterCrossChainMessages(t *testing.T) { msgBytes, ) chainRouter.HandleInbound(context.Background(), msg) - require.Equal(t, 2, chainRouter.chainHandlers[requester.ChainID].Len()) + require.Equal(2, chainRouter.chainHandlers[requester.ChainID].Len()) } func TestConnectedSubnet(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -1385,7 +1397,7 @@ func TestConnectedSubnet(t *testing.T) { "timeoutManager", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() // Create chain router @@ -1396,7 +1408,7 @@ func TestConnectedSubnet(t *testing.T) { trackedSubnets := set.Set[ids.ID]{} trackedSubnets.Add(subnetID0, subnetID1) chainRouter := ChainRouter{} - require.NoError(t, chainRouter.Initialize( + require.NoError(chainRouter.Initialize( myNodeID, logging.NoLog{}, tm, @@ -1488,6 +1500,8 @@ func TestConnectedSubnet(t *testing.T) { } func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { + require := require.New(t) + // Create a timeout manager maxTimeout := 25 * time.Millisecond tm, err := timeout.NewManager( @@ -1502,12 +1516,12 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() // Create a router chainRouter := ChainRouter{} - require.NoError(t, chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -1533,7 +1547,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { vdrs := validators.NewSet() vID := ids.GenerateTestNodeID() - require.NoError(t, vdrs.Add(vID, nil, ids.Empty, 1)) + require.NoError(vdrs.Add(vID, nil, ids.Empty, 1)) resourceTracker, err := tracker.NewResourceTracker( prometheus.NewRegistry(), @@ -1541,7 +1555,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx, @@ -1553,7 +1567,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { validators.UnhandledSubnetConnector, sb, ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ @@ -1614,7 +1628,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { ) chainRouter.HandleInbound(context.Background(), inMsg) - require.False(t, calledF) // should not be called for unallowed node ID + require.False(calledF) // should not be called for unallowed node ID // Allowed NodeID case calledF = false @@ -1631,7 +1645,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { chainRouter.HandleInbound(context.Background(), inMsg) wg.Wait() - require.True(t, calledF) // should be called since this is a allowed node request + require.True(calledF) // should be called since this is a allowed node request // Validator case calledF = false @@ -1648,5 +1662,5 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { chainRouter.HandleInbound(context.Background(), inMsg) wg.Wait() - require.True(t, calledF) // should be called since this is a validator request + require.True(calledF) // should be called since this is a validator request } diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 176ddba3ad4..aa82335aaee 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -50,6 +50,7 @@ var defaultSubnetConfig = subnets.Config{ func TestTimeout(t *testing.T) { require := require.New(t) + vdrs := validators.NewSet() require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() @@ -318,8 +319,10 @@ func TestTimeout(t *testing.T) { } func TestReliableMessages(t *testing.T) { + require := require.New(t) + vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.NodeID{1}, nil, ids.Empty, 1)) + require.NoError(vdrs.Add(ids.NodeID{1}, nil, ids.Empty, 1)) benchlist := benchlist.NewNoBenchlist() tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ @@ -333,7 +336,7 @@ func TestReliableMessages(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() @@ -347,9 +350,9 @@ func TestReliableMessages(t *testing.T) { constants.DefaultNetworkCompressionType, 10*time.Second, ) - require.NoError(t, err) + require.NoError(err) - require.NoError(t, chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -377,7 +380,7 @@ func TestReliableMessages(t *testing.T) { p2p.EngineType_ENGINE_TYPE_SNOWMAN, subnets.New(ctx.NodeID, defaultSubnetConfig), ) - require.NoError(t, err) + require.NoError(err) ctx2 := snow.DefaultConsensusContextTest() resourceTracker, err := tracker.NewResourceTracker( @@ -386,7 +389,7 @@ func TestReliableMessages(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx2, vdrs, @@ -397,7 +400,7 @@ func TestReliableMessages(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ @@ -465,9 +468,11 @@ func TestReliableMessages(t *testing.T) { } func TestReliableMessagesToMyself(t *testing.T) { + require := require.New(t) + benchlist := benchlist.NewNoBenchlist() vdrs := validators.NewSet() - require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) + require.NoError(vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, 1)) tm, err := timeout.NewManager( &timer.AdaptiveTimeoutConfig{ InitialTimeout: 10 * time.Millisecond, @@ -480,7 +485,7 @@ func TestReliableMessagesToMyself(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(t, err) + require.NoError(err) go tm.Dispatch() @@ -494,9 +499,9 @@ func TestReliableMessagesToMyself(t *testing.T) { constants.DefaultNetworkCompressionType, 10*time.Second, ) - require.NoError(t, err) + require.NoError(err) - require.NoError(t, chainRouter.Initialize( + require.NoError(chainRouter.Initialize( ids.EmptyNodeID, logging.NoLog{}, tm, @@ -524,7 +529,7 @@ func TestReliableMessagesToMyself(t *testing.T) { p2p.EngineType_ENGINE_TYPE_SNOWMAN, subnets.New(ctx.NodeID, defaultSubnetConfig), ) - require.NoError(t, err) + require.NoError(err) ctx2 := snow.DefaultConsensusContextTest() resourceTracker, err := tracker.NewResourceTracker( @@ -533,7 +538,7 @@ func TestReliableMessagesToMyself(t *testing.T) { meter.ContinuousFactory{}, time.Second, ) - require.NoError(t, err) + require.NoError(err) h, err := handler.New( ctx2, vdrs, @@ -544,7 +549,7 @@ func TestReliableMessagesToMyself(t *testing.T) { validators.UnhandledSubnetConnector, subnets.New(ctx.NodeID, subnets.Config{}), ) - require.NoError(t, err) + require.NoError(err) bootstrapper := &common.BootstrapperTest{ BootstrapableTest: common.BootstrapableTest{ diff --git a/snow/networking/timeout/manager_test.go b/snow/networking/timeout/manager_test.go index 198668174c1..324a96ef338 100644 --- a/snow/networking/timeout/manager_test.go +++ b/snow/networking/timeout/manager_test.go @@ -10,12 +10,16 @@ import ( "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/networking/benchlist" "github.com/ava-labs/avalanchego/utils/timer" ) func TestManagerFire(t *testing.T) { + require := require.New(t) + benchlist := benchlist.NewNoBenchlist() manager, err := NewManager( &timer.AdaptiveTimeoutConfig{ @@ -29,9 +33,7 @@ func TestManagerFire(t *testing.T) { "", prometheus.NewRegistry(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) go manager.Dispatch() wg := sync.WaitGroup{} diff --git a/snow/networking/tracker/resource_tracker_test.go b/snow/networking/tracker/resource_tracker_test.go index a60b9f25a66..b9ecdbcdf3f 100644 --- a/snow/networking/tracker/resource_tracker_test.go +++ b/snow/networking/tracker/resource_tracker_test.go @@ -37,6 +37,8 @@ func TestNewCPUTracker(t *testing.T) { } func TestCPUTracker(t *testing.T) { + require := require.New(t) + halflife := 5 * time.Second ctrl := gomock.NewController(t) @@ -44,7 +46,7 @@ func TestCPUTracker(t *testing.T) { mockUser.EXPECT().CPUUsage().Return(1.0).Times(3) tracker, err := NewResourceTracker(prometheus.NewRegistry(), mockUser, meter.ContinuousFactory{}, time.Second) - require.NoError(t, err) + require.NoError(err) node1 := ids.NodeID{1} node2 := ids.NodeID{2} @@ -66,28 +68,20 @@ func TestCPUTracker(t *testing.T) { node1Utilization := cpuTracker.Usage(node1, endTime2) node2Utilization := cpuTracker.Usage(node2, endTime2) - if node1Utilization >= node2Utilization { - t.Fatalf("Utilization should have been higher for the more recent spender") - } + require.Less(node1Utilization, node2Utilization) cumulative := cpuTracker.TotalUsage() sum := node1Utilization + node2Utilization - if cumulative != sum { - t.Fatalf("Cumulative utilization: %f should have been equal to the sum of the spenders: %f", cumulative, sum) - } + require.Equal(sum, cumulative) mockUser.EXPECT().CPUUsage().Return(.5).Times(3) startTime3 := endTime2 endTime3 := startTime3.Add(halflife) newNode1Utilization := cpuTracker.Usage(node1, endTime3) - if newNode1Utilization >= node1Utilization { - t.Fatalf("node CPU utilization should decrease over time") - } + require.Less(newNode1Utilization, node1Utilization) newCumulative := cpuTracker.TotalUsage() - if newCumulative >= cumulative { - t.Fatal("at-large CPU utilization should decrease over time ") - } + require.Less(newCumulative, cumulative) startTime4 := endTime3 endTime4 := startTime4.Add(halflife) @@ -97,15 +91,15 @@ func TestCPUTracker(t *testing.T) { cumulative = cpuTracker.TotalUsage() sum = node1Utilization + node2Utilization - if cumulative >= sum { - t.Fatal("Sum of CPU usage should exceed cumulative at-large utilization") - } + require.Less(cumulative, sum) } func TestCPUTrackerTimeUntilCPUUtilization(t *testing.T) { + require := require.New(t) + halflife := 5 * time.Second tracker, err := NewResourceTracker(prometheus.NewRegistry(), resource.NoUsage, meter.ContinuousFactory{}, halflife) - require.NoError(t, err) + require.NoError(err) now := time.Now() nodeID := ids.GenerateTestNodeID() // Start the meter @@ -125,11 +119,11 @@ func TestCPUTrackerTimeUntilCPUUtilization(t *testing.T) { now = now.Add(timeUntilDesiredVal) actualVal := cpuTracker.Usage(nodeID, now) // Make sure the actual/expected are close - require.InDelta(t, desiredVal, actualVal, .00001) + require.InDelta(desiredVal, actualVal, .00001) // Make sure TimeUntilUsage returns the zero duration if // the value provided >= the current value - require.Zero(t, cpuTracker.TimeUntilUsage(nodeID, now, actualVal)) - require.Zero(t, cpuTracker.TimeUntilUsage(nodeID, now, actualVal+.1)) + require.Zero(cpuTracker.TimeUntilUsage(nodeID, now, actualVal)) + require.Zero(cpuTracker.TimeUntilUsage(nodeID, now, actualVal+.1)) // Make sure it returns the zero duration if the node isn't known - require.Zero(t, cpuTracker.TimeUntilUsage(ids.GenerateTestNodeID(), now, 0.0001)) + require.Zero(cpuTracker.TimeUntilUsage(ids.GenerateTestNodeID(), now, 0.0001)) } diff --git a/snow/networking/tracker/targeter_test.go b/snow/networking/tracker/targeter_test.go index a70afbc8a99..1a6a0354e15 100644 --- a/snow/networking/tracker/targeter_test.go +++ b/snow/networking/tracker/targeter_test.go @@ -50,12 +50,8 @@ func TestTarget(t *testing.T) { totalVdrWeight := uint64(10) nonVdr := ids.NodeID{2} vdrs := validators.NewSet() - if err := vdrs.Add(vdr, nil, ids.Empty, 1); err != nil { - t.Fatal(err) - } - if err := vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, totalVdrWeight-vdrWeight); err != nil { - t.Fatal(err) - } + require.NoError(t, vdrs.Add(vdr, nil, ids.Empty, 1)) + require.NoError(t, vdrs.Add(ids.GenerateTestNodeID(), nil, ids.Empty, totalVdrWeight-vdrWeight)) tracker := NewMockTracker(ctrl) config := &TargeterConfig{ @@ -123,9 +119,11 @@ func TestTarget(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + tt.setup() target := targeter.TargetUsage(tt.nodeID) - require.Equal(t, tt.expectedTarget, target) + require.Equal(tt.expectedTarget, target) }) } } diff --git a/snow/uptime/locked_calculator_test.go b/snow/uptime/locked_calculator_test.go index 13ae35fb909..69c12f07d15 100644 --- a/snow/uptime/locked_calculator_test.go +++ b/snow/uptime/locked_calculator_test.go @@ -22,7 +22,7 @@ func TestLockedCalculator(t *testing.T) { defer ctrl.Finish() lc := NewLockedCalculator() - require.NotNil(t) + require.NotNil(lc) // Should still error because ctx is nil nodeID := ids.GenerateTestNodeID() diff --git a/snow/validators/gvalidators/validator_state_test.go b/snow/validators/gvalidators/validator_state_test.go index 6f72f03e988..b28eb68287e 100644 --- a/snow/validators/gvalidators/validator_state_test.go +++ b/snow/validators/gvalidators/validator_state_test.go @@ -30,6 +30,8 @@ type testState struct { } func setupState(t testing.TB, ctrl *gomock.Controller) *testState { + require := require.New(t) + t.Helper() state := &testState{ @@ -37,9 +39,7 @@ func setupState(t testing.TB, ctrl *gomock.Controller) *testState { } listener, err := grpcutils.NewListener() - if err != nil { - t.Fatalf("Failed to create listener: %s", err) - } + require.NoError(err) serverCloser := grpcutils.ServerCloser{} server := grpcutils.NewServer() @@ -49,9 +49,7 @@ func setupState(t testing.TB, ctrl *gomock.Controller) *testState { go grpcutils.Serve(listener, server) conn, err := grpcutils.Dial(listener.Addr().String()) - if err != nil { - t.Fatalf("Failed to dial: %s", err) - } + require.NoError(err) state.client = NewClient(pb.NewValidatorStateClient(conn)) state.closeFn = func() { diff --git a/snow/validators/manager_test.go b/snow/validators/manager_test.go index d5f26c8d587..b07bfccd7ad 100644 --- a/snow/validators/manager_test.go +++ b/snow/validators/manager_test.go @@ -87,22 +87,15 @@ func TestContains(t *testing.T) { subnetID := ids.GenerateTestID() nodeID := ids.GenerateTestNodeID() - contains := Contains(m, subnetID, nodeID) - require.False(contains) + require.False(Contains(m, subnetID, nodeID)) s := NewSet() m.Add(subnetID, s) - contains = Contains(m, subnetID, nodeID) - require.False(contains) - + require.False(Contains(m, subnetID, nodeID)) require.NoError(Add(m, subnetID, nodeID, nil, ids.Empty, 1)) - - contains = Contains(m, subnetID, nodeID) - require.True(contains) + require.True(Contains(m, subnetID, nodeID)) require.NoError(RemoveWeight(m, subnetID, nodeID, 1)) - - contains = Contains(m, subnetID, nodeID) - require.False(contains) + require.False(Contains(m, subnetID, nodeID)) } diff --git a/snow/validators/set_test.go b/snow/validators/set_test.go index fe90c051c44..76242f6f0e8 100644 --- a/snow/validators/set_test.go +++ b/snow/validators/set_test.go @@ -83,8 +83,7 @@ func TestSetGetWeight(t *testing.T) { s := NewSet() nodeID := ids.GenerateTestNodeID() - weight := s.GetWeight(nodeID) - require.Zero(weight) + require.Zero(s.GetWeight(nodeID)) require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) @@ -109,9 +108,7 @@ func TestSetSubsetWeight(t *testing.T) { s := NewSet() require.NoError(s.Add(nodeID0, nil, ids.Empty, weight0)) - require.NoError(s.Add(nodeID1, nil, ids.Empty, weight1)) - require.NoError(s.Add(nodeID2, nil, ids.Empty, weight2)) expectedWeight := weight0 + weight1 @@ -197,18 +194,13 @@ func TestSetContains(t *testing.T) { s := NewSet() nodeID := ids.GenerateTestNodeID() - contains := s.Contains(nodeID) - require.False(contains) + require.False(s.Contains(nodeID)) require.NoError(s.Add(nodeID, nil, ids.Empty, 1)) - contains = s.Contains(nodeID) - require.True(contains) - + require.True(s.Contains(nodeID)) require.NoError(s.RemoveWeight(nodeID, 1)) - - contains = s.Contains(nodeID) - require.False(contains) + require.False(s.Contains(nodeID)) } func TestSetLen(t *testing.T) { @@ -311,8 +303,7 @@ func TestSetList(t *testing.T) { require.NoError(s.RemoveWeight(nodeID1, 1)) - list = s.List() - require.Empty(list) + require.Empty(s.List()) } func TestSetWeight(t *testing.T) { diff --git a/snow/validators/test_state.go b/snow/validators/test_state.go index 6be85dcb6e4..6a2d720363d 100644 --- a/snow/validators/test_state.go +++ b/snow/validators/test_state.go @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" ) @@ -39,7 +41,8 @@ func (vm *TestState) GetMinimumHeight(ctx context.Context) (uint64, error) { return vm.GetMinimumHeightF(ctx) } if vm.CantGetMinimumHeight && vm.T != nil { - vm.T.Fatal(errMinimumHeight) + require := require.New(vm.T) + require.FailNow(errMinimumHeight.Error()) } return 0, errMinimumHeight } @@ -49,7 +52,8 @@ func (vm *TestState) GetCurrentHeight(ctx context.Context) (uint64, error) { return vm.GetCurrentHeightF(ctx) } if vm.CantGetCurrentHeight && vm.T != nil { - vm.T.Fatal(errCurrentHeight) + require := require.New(vm.T) + require.FailNow(errCurrentHeight.Error()) } return 0, errCurrentHeight } @@ -59,7 +63,8 @@ func (vm *TestState) GetSubnetID(ctx context.Context, chainID ids.ID) (ids.ID, e return vm.GetSubnetIDF(ctx, chainID) } if vm.CantGetSubnetID && vm.T != nil { - vm.T.Fatal(errSubnetID) + require := require.New(vm.T) + require.FailNow(errSubnetID.Error()) } return ids.Empty, errSubnetID } @@ -73,7 +78,8 @@ func (vm *TestState) GetValidatorSet( return vm.GetValidatorSetF(ctx, height, subnetID) } if vm.CantGetValidatorSet && vm.T != nil { - vm.T.Fatal(errGetValidatorSet) + require := require.New(vm.T) + require.FailNow(errGetValidatorSet.Error()) } return nil, errGetValidatorSet } From 1331037d7cc9e92446edc03aa71d2c2e269f5e0b Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sun, 14 May 2023 12:59:23 -0400 Subject: [PATCH 43/79] push --- x/merkledb/codec_test.go | 12 +- x/merkledb/db_test.go | 266 ++++++++------- x/merkledb/history_test.go | 2 +- x/merkledb/maybe_test.go | 8 +- x/merkledb/path_test.go | 64 ++-- x/merkledb/proof_test.go | 596 ++++++++++++++++++---------------- x/merkledb/trie_test.go | 507 +++++++++++++++-------------- x/sync/client_test.go | 3 +- x/sync/network_server_test.go | 6 +- x/sync/syncworkheap_test.go | 19 +- 10 files changed, 797 insertions(+), 686 deletions(-) diff --git a/x/merkledb/codec_test.go b/x/merkledb/codec_test.go index 50790b87df4..257385cdb90 100644 --- a/x/merkledb/codec_test.go +++ b/x/merkledb/codec_test.go @@ -440,6 +440,8 @@ func FuzzCodecRangeProofDeterministic(f *testing.F) { numEndProofNodes uint, numKeyValues uint, ) { + require := require.New(t) + r := rand.New(rand.NewSource(int64(randSeed))) // #nosec G404 var rootID ids.ID @@ -474,19 +476,19 @@ func FuzzCodecRangeProofDeterministic(f *testing.F) { } proofBytes, err := Codec.EncodeRangeProof(Version, &proof) - require.NoError(t, err) + require.NoError(err) var gotProof RangeProof _, err = Codec.DecodeRangeProof(proofBytes, &gotProof) - require.NoError(t, err) + require.NoError(err) nilEmptySlices(&proof) nilEmptySlices(&gotProof) - require.Equal(t, proof, gotProof) + require.Equal(proof, gotProof) proofBytes2, err := Codec.EncodeRangeProof(Version, &gotProof) - require.NoError(t, err) - require.Equal(t, proofBytes, proofBytes2) + require.NoError(err) + require.Equal(proofBytes, proofBytes2) }, ) } diff --git a/x/merkledb/db_test.go b/x/merkledb/db_test.go index 6aa627ed330..9799253a621 100644 --- a/x/merkledb/db_test.go +++ b/x/merkledb/db_test.go @@ -27,36 +27,40 @@ func newNoopTracer() trace.Tracer { } func Test_MerkleDB_Get_Safety(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) - require.NoError(t, db.Put([]byte{0}, []byte{0, 1, 2})) + require.NoError(err) + require.NoError(db.Put([]byte{0}, []byte{0, 1, 2})) val, err := db.Get([]byte{0}) - require.NoError(t, err) + require.NoError(err) n, err := db.getNode(newPath([]byte{0})) - require.NoError(t, err) + require.NoError(err) val[0] = 1 // node's value shouldn't be affected by the edit - require.NotEqual(t, val, n.value.value) + require.NotEqual(val, n.value.value) } func Test_MerkleDB_GetValues_Safety(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) - require.NoError(t, db.Put([]byte{0}, []byte{0, 1, 2})) + require.NoError(err) + require.NoError(db.Put([]byte{0}, []byte{0, 1, 2})) vals, errs := db.GetValues(context.Background(), [][]byte{{0}}) - require.Len(t, errs, 1) - require.NoError(t, errs[0]) - require.Equal(t, []byte{0, 1, 2}, vals[0]) + require.Len(errs, 1) + require.NoError(errs[0]) + require.Equal([]byte{0, 1, 2}, vals[0]) vals[0][0] = 1 // editing the value array shouldn't affect the db vals, errs = db.GetValues(context.Background(), [][]byte{{0}}) - require.Len(t, errs, 1) - require.NoError(t, errs[0]) - require.Equal(t, []byte{0, 1, 2}, vals[0]) + require.Len(errs, 1) + require.NoError(errs[0]) + require.Equal([]byte{0, 1, 2}, vals[0]) } func Test_MerkleDB_DB_Interface(t *testing.T) { @@ -163,6 +167,8 @@ func Test_MerkleDB_DB_Rebuild(t *testing.T) { } func Test_MerkleDB_Failed_Batch_Commit(t *testing.T) { + require := require.New(t) + memDB := memdb.New() db, err := New( context.Background(), @@ -172,20 +178,21 @@ func Test_MerkleDB_Failed_Batch_Commit(t *testing.T) { HistoryLength: 300, }, ) - require.NoError(t, err) + require.NoError(err) _ = memDB.Close() batch := db.NewBatch() - require.NoError(t, batch.Put([]byte("key1"), []byte("1"))) - require.NoError(t, batch.Put([]byte("key2"), []byte("2"))) - require.NoError(t, batch.Put([]byte("key3"), []byte("3"))) + require.NoError(batch.Put([]byte("key1"), []byte("1"))) + require.NoError(batch.Put([]byte("key2"), []byte("2"))) + require.NoError(batch.Put([]byte("key3"), []byte("3"))) err = batch.Write() - // batch fails - require.ErrorIs(t, err, database.ErrClosed) + require.ErrorIs(err, database.ErrClosed) } func Test_MerkleDB_Value_Cache(t *testing.T) { + require := require.New(t) + memDB := memdb.New() db, err := New( context.Background(), @@ -196,156 +203,163 @@ func Test_MerkleDB_Value_Cache(t *testing.T) { NodeCacheSize: minCacheSize, }, ) - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - require.NoError(t, batch.Put([]byte("key1"), []byte("1"))) - - require.NoError(t, batch.Put([]byte("key2"), []byte("2"))) - - require.NoError(t, err) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key1"), []byte("1"))) + require.NoError(batch.Put([]byte("key2"), []byte("2"))) + require.NoError(batch.Write()) batch = db.NewBatch() // force key2 to be inserted into the cache as not found - require.NoError(t, batch.Delete([]byte("key2"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Delete([]byte("key2"))) + require.NoError(batch.Write()) - _ = memDB.Close() + require.NoError(memDB.Close()) // still works because key1 is read from cache value, err := db.Get([]byte("key1")) - require.NoError(t, err) - require.Equal(t, []byte("1"), value) + require.NoError(err) + require.Equal([]byte("1"), value) // still returns missing instead of closed because key2 is read from cache _, err = db.Get([]byte("key2")) - require.ErrorIs(t, err, database.ErrNotFound) + require.ErrorIs(err, database.ErrNotFound) } func Test_MerkleDB_Invalidate_Siblings_On_Commit(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) viewToCommit, err := dbTrie.NewView() - require.NoError(t, err) + require.NoError(err) sibling1, err := dbTrie.NewView() - require.NoError(t, err) + require.NoError(err) sibling2, err := dbTrie.NewView() - require.NoError(t, err) + require.NoError(err) - require.False(t, sibling1.(*trieView).isInvalid()) - require.False(t, sibling2.(*trieView).isInvalid()) + require.False(sibling1.(*trieView).isInvalid()) + require.False(sibling2.(*trieView).isInvalid()) - require.NoError(t, viewToCommit.Insert(context.Background(), []byte{0}, []byte{0})) - require.NoError(t, viewToCommit.CommitToDB(context.Background())) + require.NoError(viewToCommit.Insert(context.Background(), []byte{0}, []byte{0})) + require.NoError(viewToCommit.CommitToDB(context.Background())) - require.True(t, sibling1.(*trieView).isInvalid()) - require.True(t, sibling2.(*trieView).isInvalid()) - require.False(t, viewToCommit.(*trieView).isInvalid()) + require.True(sibling1.(*trieView).isInvalid()) + require.True(sibling2.(*trieView).isInvalid()) + require.False(viewToCommit.(*trieView).isInvalid()) } func Test_MerkleDB_Commit_Proof_To_Empty_Trie(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - require.NoError(t, batch.Put([]byte("key1"), []byte("1"))) - require.NoError(t, batch.Put([]byte("key2"), []byte("2"))) - require.NoError(t, batch.Put([]byte("key3"), []byte("3"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key1"), []byte("1"))) + require.NoError(batch.Put([]byte("key2"), []byte("2"))) + require.NoError(batch.Put([]byte("key3"), []byte("3"))) + require.NoError(batch.Write()) proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key3"), 10) - require.NoError(t, err) + require.NoError(err) freshDB, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) - require.NoError(t, freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof)) + require.NoError(freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof)) value, err := freshDB.Get([]byte("key2")) - require.NoError(t, err) - require.Equal(t, []byte("2"), value) + require.NoError(err) + require.Equal([]byte("2"), value) freshRoot, err := freshDB.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) oldRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, oldRoot, freshRoot) + require.NoError(err) + require.Equal(oldRoot, freshRoot) } func Test_MerkleDB_Commit_Proof_To_Filled_Trie(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - require.NoError(t, batch.Put([]byte("key1"), []byte("1"))) - require.NoError(t, batch.Put([]byte("key2"), []byte("2"))) - require.NoError(t, batch.Put([]byte("key3"), []byte("3"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key1"), []byte("1"))) + require.NoError(batch.Put([]byte("key2"), []byte("2"))) + require.NoError(batch.Put([]byte("key3"), []byte("3"))) + require.NoError(batch.Write()) proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key3"), 10) - require.NoError(t, err) + require.NoError(err) freshDB, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch = freshDB.NewBatch() - require.NoError(t, batch.Put([]byte("key1"), []byte("3"))) - require.NoError(t, batch.Put([]byte("key2"), []byte("4"))) - require.NoError(t, batch.Put([]byte("key3"), []byte("5"))) - require.NoError(t, batch.Put([]byte("key25"), []byte("5"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key1"), []byte("3"))) + require.NoError(batch.Put([]byte("key2"), []byte("4"))) + require.NoError(batch.Put([]byte("key3"), []byte("5"))) + require.NoError(batch.Put([]byte("key25"), []byte("5"))) + require.NoError(batch.Write()) - require.NoError(t, freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof)) + require.NoError(freshDB.CommitRangeProof(context.Background(), []byte("key1"), proof)) value, err := freshDB.Get([]byte("key2")) - require.NoError(t, err) - require.Equal(t, []byte("2"), value) + require.NoError(err) + require.Equal([]byte("2"), value) freshRoot, err := freshDB.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) oldRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, oldRoot, freshRoot) + require.NoError(err) + require.Equal(oldRoot, freshRoot) } func Test_MerkleDB_GetValues(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) keys := [][]byte{{0}, {1}, {2}, {10}} values, errors := db.GetValues(context.Background(), keys) - require.Len(t, values, len(keys)) - require.Len(t, errors, len(keys)) + require.Len(values, len(keys)) + require.Len(errors, len(keys)) // first 3 have values // last was not found - require.NoError(t, errors[0]) - require.NoError(t, errors[1]) - require.NoError(t, errors[2]) - require.ErrorIs(t, errors[3], database.ErrNotFound) + require.NoError(errors[0]) + require.NoError(errors[1]) + require.NoError(errors[2]) + require.ErrorIs(errors[3], database.ErrNotFound) - require.Equal(t, []byte{0}, values[0]) - require.Equal(t, []byte{1}, values[1]) - require.Equal(t, []byte{2}, values[2]) - require.Nil(t, values[3]) + require.Equal([]byte{0}, values[0]) + require.Equal([]byte{1}, values[1]) + require.Equal([]byte{2}, values[2]) + require.Nil(values[3]) } func Test_MerkleDB_InsertNil(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - require.NoError(t, batch.Put([]byte("key0"), nil)) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key0"), nil)) + require.NoError(batch.Write()) value, err := db.Get([]byte("key0")) - require.NoError(t, err) - require.Nil(t, value) + require.NoError(err) + require.Nil(value) value, err = getNodeValue(db, "key0") - require.NoError(t, err) - require.Nil(t, value) + require.NoError(err) + require.Nil(value) } func Test_MerkleDB_InsertAndRetrieve(t *testing.T) { @@ -367,52 +381,60 @@ func Test_MerkleDB_InsertAndRetrieve(t *testing.T) { } func Test_MerkleDB_HealthCheck(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) val, err := db.HealthCheck(context.Background()) - require.NoError(t, err) - require.Nil(t, val) + require.NoError(err) + require.Nil(val) } func Test_MerkleDB_Overwrite(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) - require.NoError(t, db.Put([]byte("key"), []byte("value0"))) + require.NoError(db.Put([]byte("key"), []byte("value0"))) value, err := db.Get([]byte("key")) - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + require.NoError(err) + require.Equal([]byte("value0"), value) - require.NoError(t, db.Put([]byte("key"), []byte("value1"))) + require.NoError(db.Put([]byte("key"), []byte("value1"))) value, err = db.Get([]byte("key")) - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + require.NoError(err) + require.Equal([]byte("value1"), value) } func Test_MerkleDB_Delete(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) - require.NoError(t, db.Put([]byte("key"), []byte("value0"))) + require.NoError(db.Put([]byte("key"), []byte("value0"))) value, err := db.Get([]byte("key")) - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + require.NoError(err) + require.Equal([]byte("value0"), value) - require.NoError(t, db.Delete([]byte("key"))) + require.NoError(db.Delete([]byte("key"))) value, err = db.Get([]byte("key")) - require.ErrorIs(t, err, database.ErrNotFound) - require.Nil(t, value) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(value) } func Test_MerkleDB_DeleteMissingKey(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) - require.NoError(t, db.Delete([]byte("key"))) + require.NoError(db.Delete([]byte("key"))) } // Test that untracked views aren't persisted to [db.childViews]. @@ -587,6 +609,8 @@ func TestDatabaseInvalidateChildrenExcept(t *testing.T) { } func Test_MerkleDB_Random_Insert_Ordering(t *testing.T) { + require := require.New(t) + totalState := 1000 var ( allKeys [][]byte @@ -602,11 +626,11 @@ func Test_MerkleDB_Random_Insert_Ordering(t *testing.T) { key = make([]byte, r.Intn(50)+len(prefix)) copy(key, prefix) _, err := r.Read(key[len(prefix):]) - require.NoError(t, err) + require.NoError(err) } else { key = make([]byte, r.Intn(50)) _, err := r.Read(key) - require.NoError(t, err) + require.NoError(err) } if _, ok := keyMap[string(key)]; !ok { allKeys = append(allKeys, key) @@ -630,25 +654,25 @@ func Test_MerkleDB_Random_Insert_Ordering(t *testing.T) { value = nil } else { _, err := r.Read(value) - require.NoError(t, err) + require.NoError(err) } ops = append(ops, &testOperation{key: key, value: value}) } db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) result, err := applyOperations(db, ops) - require.NoError(t, err) + require.NoError(err) primaryRoot, err := result.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) for shuffleIndex := 0; shuffleIndex < 3; shuffleIndex++ { r.Shuffle(totalState, func(i, j int) { ops[i], ops[j] = ops[j], ops[i] }) result, err := applyOperations(db, ops) - require.NoError(t, err) + require.NoError(err) newRoot, err := result.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, primaryRoot, newRoot) + require.NoError(err) + require.Equal(primaryRoot, newRoot) } } } diff --git a/x/merkledb/history_test.go b/x/merkledb/history_test.go index 951efb3c76c..96f63653a45 100644 --- a/x/merkledb/history_test.go +++ b/x/merkledb/history_test.go @@ -756,7 +756,7 @@ func TestHistoryGetChangesToRoot(t *testing.T) { got, err := history.getChangesToGetToRoot(tt.rootID, tt.start, tt.end) require.ErrorIs(err, tt.expectedErr) - if err != nil { + if tt.expectedErr != nil { return } tt.validateFunc(require, got) diff --git a/x/merkledb/maybe_test.go b/x/merkledb/maybe_test.go index acaf16308c3..3c0bc7c95a2 100644 --- a/x/merkledb/maybe_test.go +++ b/x/merkledb/maybe_test.go @@ -11,6 +11,8 @@ import ( ) func TestMaybeClone(t *testing.T) { + require := require.New(t) + // Case: Value is maybe { val := []byte{1, 2, 3} @@ -18,14 +20,14 @@ func TestMaybeClone(t *testing.T) { m := Some(val) mClone := Clone(m) m.value[0] = 0 - require.NotEqual(t, mClone.value, m.value) - require.Equal(t, originalVal, mClone.value) + require.NotEqual(mClone.value, m.value) + require.Equal(originalVal, mClone.value) } // Case: Value is nothing { m := Nothing[[]byte]() mClone := Clone(m) - require.True(t, mClone.IsNothing()) + require.True(mClone.IsNothing()) } } diff --git a/x/merkledb/path_test.go b/x/merkledb/path_test.go index 851d6ca01f2..e42ce0f0ca2 100644 --- a/x/merkledb/path_test.go +++ b/x/merkledb/path_test.go @@ -10,82 +10,92 @@ import ( ) func Test_SerializedPath_NibbleVal(t *testing.T) { + require := require.New(t) + path := SerializedPath{Value: []byte{240, 237}} - require.Equal(t, byte(15), path.NibbleVal(0)) - require.Equal(t, byte(0), path.NibbleVal(1)) - require.Equal(t, byte(14), path.NibbleVal(2)) - require.Equal(t, byte(13), path.NibbleVal(3)) + require.Equal(byte(15), path.NibbleVal(0)) + require.Equal(byte(0), path.NibbleVal(1)) + require.Equal(byte(14), path.NibbleVal(2)) + require.Equal(byte(13), path.NibbleVal(3)) } func Test_SerializedPath_AppendNibble(t *testing.T) { + require := require.New(t) + path := SerializedPath{Value: []byte{}} - require.Zero(t, path.NibbleLength) + require.Zero(path.NibbleLength) path = path.AppendNibble(1) - require.Equal(t, 1, path.NibbleLength) - require.Equal(t, byte(1), path.NibbleVal(0)) + require.Equal(1, path.NibbleLength) + require.Equal(byte(1), path.NibbleVal(0)) path = path.AppendNibble(2) - require.Equal(t, 2, path.NibbleLength) - require.Equal(t, byte(2), path.NibbleVal(1)) + require.Equal(2, path.NibbleLength) + require.Equal(byte(2), path.NibbleVal(1)) } func Test_SerializedPath_Has_Prefix(t *testing.T) { + require := require.New(t) + first := SerializedPath{Value: []byte("FirstKey")} prefix := SerializedPath{Value: []byte("FirstKe")} - require.True(t, first.HasPrefix(prefix)) - require.True(t, first.HasStrictPrefix(prefix)) + require.True(first.HasPrefix(prefix)) + require.True(first.HasStrictPrefix(prefix)) first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} - require.True(t, first.HasPrefix(prefix)) - require.True(t, first.HasStrictPrefix(prefix)) + require.True(first.HasPrefix(prefix)) + require.True(first.HasStrictPrefix(prefix)) first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} - require.True(t, first.HasPrefix(prefix)) - require.False(t, first.HasStrictPrefix(prefix)) + require.True(first.HasPrefix(prefix)) + require.False(first.HasStrictPrefix(prefix)) first = SerializedPath{Value: []byte{247}, NibbleLength: 2} prefix = SerializedPath{Value: []byte{240}, NibbleLength: 2} - require.False(t, first.HasPrefix(prefix)) - require.False(t, first.HasStrictPrefix(prefix)) + require.False(first.HasPrefix(prefix)) + require.False(first.HasStrictPrefix(prefix)) first = SerializedPath{Value: []byte{247}, NibbleLength: 2} prefix = SerializedPath{Value: []byte{240}, NibbleLength: 1} - require.True(t, first.HasPrefix(prefix)) - require.True(t, first.HasStrictPrefix(prefix)) + require.True(first.HasPrefix(prefix)) + require.True(first.HasStrictPrefix(prefix)) first = SerializedPath{Value: []byte{}, NibbleLength: 0} prefix = SerializedPath{Value: []byte{}, NibbleLength: 0} - require.True(t, first.HasPrefix(prefix)) - require.False(t, first.HasStrictPrefix(prefix)) + require.True(first.HasPrefix(prefix)) + require.False(first.HasStrictPrefix(prefix)) a := SerializedPath{Value: []byte{0x10}, NibbleLength: 1} b := SerializedPath{Value: []byte{0x10}, NibbleLength: 2} - require.False(t, a.HasPrefix(b)) + require.False(a.HasPrefix(b)) } func Test_SerializedPath_HasPrefix_BadInput(t *testing.T) { + require := require.New(t) + a := SerializedPath{Value: []byte{}} b := SerializedPath{Value: []byte{}, NibbleLength: 1} - require.False(t, a.HasPrefix(b)) + require.False(a.HasPrefix(b)) a = SerializedPath{Value: []byte{}, NibbleLength: 10} b = SerializedPath{Value: []byte{0x10}, NibbleLength: 1} - require.False(t, a.HasPrefix(b)) + require.False(a.HasPrefix(b)) } func Test_SerializedPath_Equal(t *testing.T) { + require := require.New(t) + first := SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} prefix := SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} - require.True(t, first.Equal(prefix)) + require.True(first.Equal(prefix)) first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 16} prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} - require.False(t, first.Equal(prefix)) + require.False(first.Equal(prefix)) first = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} prefix = SerializedPath{Value: []byte("FirstKey"), NibbleLength: 15} - require.True(t, first.Equal(prefix)) + require.True(first.Equal(prefix)) } diff --git a/x/merkledb/proof_test.go b/x/merkledb/proof_test.go index 0f1a41582dd..a0e174cbf98 100644 --- a/x/merkledb/proof_test.go +++ b/x/merkledb/proof_test.go @@ -30,13 +30,15 @@ func getBasicDB() (*Database, error) { } func writeBasicBatch(t *testing.T, db *Database) { + require := require.New(t) + batch := db.NewBatch() - require.NoError(t, batch.Put([]byte{0}, []byte{0})) - require.NoError(t, batch.Put([]byte{1}, []byte{1})) - require.NoError(t, batch.Put([]byte{2}, []byte{2})) - require.NoError(t, batch.Put([]byte{3}, []byte{3})) - require.NoError(t, batch.Put([]byte{4}, []byte{4})) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte{0}, []byte{0})) + require.NoError(batch.Put([]byte{1}, []byte{1})) + require.NoError(batch.Put([]byte{2}, []byte{2})) + require.NoError(batch.Put([]byte{3}, []byte{3})) + require.NoError(batch.Put([]byte{4}, []byte{4})) + require.NoError(batch.Write()) } func Test_Proof_Marshal(t *testing.T) { @@ -68,67 +70,73 @@ func Test_Proof_Empty(t *testing.T) { } func Test_Proof_MissingValue(t *testing.T) { + require := require.New(t) + trie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, trie) + require.NoError(err) + require.NotNil(trie) - require.NoError(t, trie.Insert(context.Background(), []byte{1}, []byte{0})) - require.NoError(t, trie.Insert(context.Background(), []byte{1, 2}, []byte{0})) - require.NoError(t, trie.Insert(context.Background(), []byte{1, 2, 4}, []byte{0})) - require.NoError(t, trie.Insert(context.Background(), []byte{1, 3}, []byte{0})) + require.NoError(trie.Insert(context.Background(), []byte{1}, []byte{0})) + require.NoError(trie.Insert(context.Background(), []byte{1, 2}, []byte{0})) + require.NoError(trie.Insert(context.Background(), []byte{1, 2, 4}, []byte{0})) + require.NoError(trie.Insert(context.Background(), []byte{1, 3}, []byte{0})) // get a proof for a value not in the db proof, err := trie.GetProof(context.Background(), []byte{1, 2, 3}) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) - require.True(t, proof.Value.IsNothing()) + require.True(proof.Value.IsNothing()) proofBytes, err := Codec.EncodeProof(Version, proof) - require.NoError(t, err) + require.NoError(err) parsedProof := &Proof{} _, err = Codec.DecodeProof(proofBytes, parsedProof) - require.NoError(t, err) + require.NoError(err) verifyPath(t, proof.Path, parsedProof.Path) } func Test_Proof_Marshal_Errors(t *testing.T) { + require := require.New(t) + trie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, trie) + require.NoError(err) + require.NotNil(trie) writeBasicBatch(t, trie) proof, err := trie.GetProof(context.Background(), []byte{1}) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) proofBytes, err := Codec.EncodeProof(Version, proof) - require.NoError(t, err) + require.NoError(err) for i := 1; i < len(proofBytes); i++ { broken := proofBytes[:i] parsed := &Proof{} _, err = Codec.DecodeProof(broken, parsed) - require.ErrorIs(t, err, io.ErrUnexpectedEOF) + require.ErrorIs(err, io.ErrUnexpectedEOF) } // add a child at an invalid index proof.Path[0].Children[255] = ids.Empty _, err = Codec.EncodeProof(Version, proof) - require.ErrorIs(t, err, errChildIndexTooLarge) + require.ErrorIs(err, errChildIndexTooLarge) } func verifyPath(t *testing.T, path1, path2 []ProofNode) { - require.Len(t, path2, len(path1)) + require := require.New(t) + + require.Len(path2, len(path1)) for i := range path1 { - require.True(t, bytes.Equal(path1[i].KeyPath.Value, path2[i].KeyPath.Value)) - require.Equal(t, path1[i].KeyPath.hasOddLength(), path2[i].KeyPath.hasOddLength()) - require.True(t, bytes.Equal(path1[i].ValueOrHash.value, path2[i].ValueOrHash.value)) + require.True(bytes.Equal(path1[i].KeyPath.Value, path2[i].KeyPath.Value)) + require.Equal(path1[i].KeyPath.hasOddLength(), path2[i].KeyPath.hasOddLength()) + require.True(bytes.Equal(path1[i].ValueOrHash.value, path2[i].ValueOrHash.value)) for childIndex := range path1[i].Children { - require.Equal(t, path1[i].Children[childIndex], path2[i].Children[childIndex]) + require.Equal(path1[i].Children[childIndex], path2[i].Children[childIndex]) } } } @@ -186,48 +194,54 @@ func Test_Proof_Verify_Bad_Data(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) proof, err := db.GetProof(context.Background(), []byte{2}) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) tt.malform(proof) err = proof.Verify(context.Background(), db.getMerkleRoot()) - require.ErrorIs(t, err, tt.expectedErr) + require.ErrorIs(err, tt.expectedErr) }) } } func Test_Proof_ValueOrHashMatches(t *testing.T) { - require.True(t, valueOrHashMatches(Some([]byte{0}), Some([]byte{0}))) - require.False(t, valueOrHashMatches(Nothing[[]byte](), Some(hashing.ComputeHash256([]byte{0})))) - require.True(t, valueOrHashMatches(Nothing[[]byte](), Nothing[[]byte]())) - - require.False(t, valueOrHashMatches(Some([]byte{0}), Nothing[[]byte]())) - require.False(t, valueOrHashMatches(Nothing[[]byte](), Some([]byte{0}))) - require.False(t, valueOrHashMatches(Nothing[[]byte](), Some(hashing.ComputeHash256([]byte{1})))) - require.False(t, valueOrHashMatches(Some(hashing.ComputeHash256([]byte{0})), Nothing[[]byte]())) + require := require.New(t) + + require.True(valueOrHashMatches(Some([]byte{0}), Some([]byte{0}))) + require.False(valueOrHashMatches(Nothing[[]byte](), Some(hashing.ComputeHash256([]byte{0})))) + require.True(valueOrHashMatches(Nothing[[]byte](), Nothing[[]byte]())) + + require.False(valueOrHashMatches(Some([]byte{0}), Nothing[[]byte]())) + require.False(valueOrHashMatches(Nothing[[]byte](), Some([]byte{0}))) + require.False(valueOrHashMatches(Nothing[[]byte](), Some(hashing.ComputeHash256([]byte{1})))) + require.False(valueOrHashMatches(Some(hashing.ComputeHash256([]byte{0})), Nothing[[]byte]())) } func Test_RangeProof_Extra_Value(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) val, err := db.Get([]byte{2}) - require.NoError(t, err) - require.Equal(t, []byte{2}, val) + require.NoError(err) + require.Equal([]byte{2}, val) proof, err := db.GetRangeProof(context.Background(), []byte{1}, []byte{5, 5}, 10) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) - require.NoError(t, proof.Verify( + require.NoError(proof.Verify( context.Background(), []byte{1}, []byte{5, 5}, @@ -242,7 +256,7 @@ func Test_RangeProof_Extra_Value(t *testing.T) { []byte{5, 5}, db.root.id, ) - require.ErrorIs(t, err, ErrInvalidProof) + require.ErrorIs(err, ErrInvalidProof) } func Test_RangeProof_Verify_Bad_Data(t *testing.T) { @@ -290,71 +304,77 @@ func Test_RangeProof_Verify_Bad_Data(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) proof, err := db.GetRangeProof(context.Background(), []byte{2}, []byte{3, 0}, 50) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) tt.malform(proof) err = proof.Verify(context.Background(), []byte{2}, []byte{3, 0}, db.getMerkleRoot()) - require.ErrorIs(t, err, tt.expectedErr) + require.ErrorIs(err, tt.expectedErr) }) } } func Test_RangeProof_MaxLength(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie, err := dbTrie.NewView() - require.NoError(t, err) + require.NoError(err) _, err = trie.GetRangeProof(context.Background(), nil, nil, -1) - require.ErrorIs(t, err, ErrInvalidMaxLength) + require.ErrorIs(err, ErrInvalidMaxLength) _, err = trie.GetRangeProof(context.Background(), nil, nil, 0) - require.ErrorIs(t, err, ErrInvalidMaxLength) + require.ErrorIs(err, ErrInvalidMaxLength) } func Test_Proof(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie, err := dbTrie.NewView() - require.NoError(t, err) + require.NoError(err) - require.NoError(t, trie.Insert(context.Background(), []byte("key0"), []byte("value0"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("value1"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key2"), []byte("value2"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key3"), []byte("value3"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key4"), []byte("value4"))) + require.NoError(trie.Insert(context.Background(), []byte("key0"), []byte("value0"))) + require.NoError(trie.Insert(context.Background(), []byte("key1"), []byte("value1"))) + require.NoError(trie.Insert(context.Background(), []byte("key2"), []byte("value2"))) + require.NoError(trie.Insert(context.Background(), []byte("key3"), []byte("value3"))) + require.NoError(trie.Insert(context.Background(), []byte("key4"), []byte("value4"))) _, err = trie.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) proof, err := trie.GetProof(context.Background(), []byte("key1")) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) - require.Len(t, proof.Path, 3) + require.Len(proof.Path, 3) - require.Equal(t, newPath([]byte("key1")).Serialize(), proof.Path[2].KeyPath) - require.Equal(t, Some([]byte("value1")), proof.Path[2].ValueOrHash) + require.Equal(newPath([]byte("key1")).Serialize(), proof.Path[2].KeyPath) + require.Equal(Some([]byte("value1")), proof.Path[2].ValueOrHash) - require.Equal(t, newPath([]byte{}).Serialize(), proof.Path[0].KeyPath) - require.True(t, proof.Path[0].ValueOrHash.IsNothing()) + require.Equal(newPath([]byte{}).Serialize(), proof.Path[0].KeyPath) + require.True(proof.Path[0].ValueOrHash.IsNothing()) expectedRootID, err := trie.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.NoError(t, proof.Verify(context.Background(), expectedRootID)) + require.NoError(err) + require.NoError(proof.Verify(context.Background(), expectedRootID)) proof.Path[0].ValueOrHash = Some([]byte("value2")) err = proof.Verify(context.Background(), expectedRootID) - require.ErrorIs(t, err, ErrInvalidProof) + require.ErrorIs(err, ErrInvalidProof) } func Test_RangeProof_Syntactic_Verify(t *testing.T) { @@ -520,10 +540,8 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - err := tt.proof.Verify(context.Background(), tt.start, tt.end, ids.Empty) - require.ErrorIs(err, tt.expectedErr) + require.ErrorIs(t, err, tt.expectedErr) }) } } @@ -564,46 +582,50 @@ func Test_RangeProof(t *testing.T) { } func Test_RangeProof_BadBounds(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) // non-nil start/end proof, err := db.GetRangeProof(context.Background(), []byte{4}, []byte{3}, 50) - require.ErrorIs(t, err, ErrStartAfterEnd) - require.Nil(t, proof) + require.ErrorIs(err, ErrStartAfterEnd) + require.Nil(proof) } func Test_RangeProof_NilStart(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - require.NoError(t, batch.Put([]byte("key1"), []byte("value1"))) - require.NoError(t, batch.Put([]byte("key2"), []byte("value2"))) - require.NoError(t, batch.Put([]byte("key3"), []byte("value3"))) - require.NoError(t, batch.Put([]byte("key4"), []byte("value4"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2"))) + require.NoError(batch.Put([]byte("key3"), []byte("value3"))) + require.NoError(batch.Put([]byte("key4"), []byte("value4"))) + require.NoError(batch.Write()) val, err := db.Get([]byte("key1")) - require.NoError(t, err) - require.Equal(t, []byte("value1"), val) + require.NoError(err) + require.Equal([]byte("value1"), val) proof, err := db.GetRangeProof(context.Background(), nil, []byte("key35"), 2) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) - require.Len(t, proof.KeyValues, 2) + require.Len(proof.KeyValues, 2) - require.Equal(t, []byte("key1"), proof.KeyValues[0].Key) - require.Equal(t, []byte("key2"), proof.KeyValues[1].Key) + require.Equal([]byte("key1"), proof.KeyValues[0].Key) + require.Equal([]byte("key2"), proof.KeyValues[1].Key) - require.Equal(t, []byte("value1"), proof.KeyValues[0].Value) - require.Equal(t, []byte("value2"), proof.KeyValues[1].Value) + require.Equal([]byte("value1"), proof.KeyValues[0].Value) + require.Equal([]byte("value2"), proof.KeyValues[1].Value) - require.Equal(t, newPath([]byte("key2")).Serialize(), proof.EndProof[2].KeyPath) - require.Equal(t, SerializedPath{Value: []uint8{0x6b, 0x65, 0x79, 0x30}, NibbleLength: 7}, proof.EndProof[1].KeyPath) - require.Equal(t, newPath([]byte("")).Serialize(), proof.EndProof[0].KeyPath) + require.Equal(newPath([]byte("key2")).Serialize(), proof.EndProof[2].KeyPath) + require.Equal(SerializedPath{Value: []uint8{0x6b, 0x65, 0x79, 0x30}, NibbleLength: 7}, proof.EndProof[1].KeyPath) + require.Equal(newPath([]byte("")).Serialize(), proof.EndProof[0].KeyPath) - require.NoError(t, proof.Verify( + require.NoError(proof.Verify( context.Background(), nil, []byte("key35"), @@ -612,30 +634,32 @@ func Test_RangeProof_NilStart(t *testing.T) { } func Test_RangeProof_NilEnd(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) - require.NoError(t, err) + require.NoError(err) proof, err := db.GetRangeProof(context.Background(), []byte{1}, nil, 2) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) - require.Len(t, proof.KeyValues, 2) + require.Len(proof.KeyValues, 2) - require.Equal(t, []byte{1}, proof.KeyValues[0].Key) - require.Equal(t, []byte{2}, proof.KeyValues[1].Key) + require.Equal([]byte{1}, proof.KeyValues[0].Key) + require.Equal([]byte{2}, proof.KeyValues[1].Key) - require.Equal(t, []byte{1}, proof.KeyValues[0].Value) - require.Equal(t, []byte{2}, proof.KeyValues[1].Value) + require.Equal([]byte{1}, proof.KeyValues[0].Value) + require.Equal([]byte{2}, proof.KeyValues[1].Value) - require.Equal(t, []byte{1}, proof.StartProof[0].KeyPath.Value) + require.Equal([]byte{1}, proof.StartProof[0].KeyPath.Value) - require.Equal(t, []byte{}, proof.EndProof[0].KeyPath.Value) - require.Equal(t, []byte{0}, proof.EndProof[1].KeyPath.Value) - require.Equal(t, []byte{2}, proof.EndProof[2].KeyPath.Value) + require.Equal([]byte{}, proof.EndProof[0].KeyPath.Value) + require.Equal([]byte{0}, proof.EndProof[1].KeyPath.Value) + require.Equal([]byte{2}, proof.EndProof[2].KeyPath.Value) - require.NoError(t, proof.Verify( + require.NoError(proof.Verify( context.Background(), []byte{1}, nil, @@ -644,38 +668,40 @@ func Test_RangeProof_NilEnd(t *testing.T) { } func Test_RangeProof_EmptyValues(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - require.NoError(t, batch.Put([]byte("key1"), nil)) - require.NoError(t, batch.Put([]byte("key12"), []byte("value1"))) - require.NoError(t, batch.Put([]byte("key2"), []byte{})) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key1"), nil)) + require.NoError(batch.Put([]byte("key12"), []byte("value1"))) + require.NoError(batch.Put([]byte("key2"), []byte{})) + require.NoError(batch.Write()) val, err := db.Get([]byte("key12")) - require.NoError(t, err) - require.Equal(t, []byte("value1"), val) + require.NoError(err) + require.Equal([]byte("value1"), val) proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key2"), 10) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) - require.Len(t, proof.KeyValues, 3) - require.Equal(t, []byte("key1"), proof.KeyValues[0].Key) - require.Empty(t, proof.KeyValues[0].Value) - require.Equal(t, []byte("key12"), proof.KeyValues[1].Key) - require.Equal(t, []byte("value1"), proof.KeyValues[1].Value) - require.Equal(t, []byte("key2"), proof.KeyValues[2].Key) - require.Empty(t, proof.KeyValues[2].Value) + require.Len(proof.KeyValues, 3) + require.Equal([]byte("key1"), proof.KeyValues[0].Key) + require.Empty(proof.KeyValues[0].Value) + require.Equal([]byte("key12"), proof.KeyValues[1].Key) + require.Equal([]byte("value1"), proof.KeyValues[1].Value) + require.Equal([]byte("key2"), proof.KeyValues[2].Key) + require.Empty(proof.KeyValues[2].Value) - require.Len(t, proof.StartProof, 1) - require.Equal(t, newPath([]byte("key1")).Serialize(), proof.StartProof[0].KeyPath) + require.Len(proof.StartProof, 1) + require.Equal(newPath([]byte("key1")).Serialize(), proof.StartProof[0].KeyPath) - require.Len(t, proof.EndProof, 3) - require.Equal(t, newPath([]byte("key2")).Serialize(), proof.EndProof[2].KeyPath) - require.Equal(t, newPath([]byte{}).Serialize(), proof.EndProof[0].KeyPath) + require.Len(proof.EndProof, 3) + require.Equal(newPath([]byte("key2")).Serialize(), proof.EndProof[2].KeyPath) + require.Equal(newPath([]byte{}).Serialize(), proof.EndProof[0].KeyPath) - require.NoError(t, proof.Verify( + require.NoError(proof.Verify( context.Background(), []byte("key1"), []byte("key2"), @@ -684,290 +710,306 @@ func Test_RangeProof_EmptyValues(t *testing.T) { } func Test_RangeProof_Marshal_Nil(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) val, err := db.Get([]byte{1}) - require.NoError(t, err) - require.Equal(t, []byte{1}, val) + require.NoError(err) + require.Equal([]byte{1}, val) proof, err := db.GetRangeProof(context.Background(), []byte("key1"), []byte("key35"), 10) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) proofBytes, err := Codec.EncodeRangeProof(Version, proof) - require.NoError(t, err) + require.NoError(err) parsedProof := &RangeProof{} _, err = Codec.DecodeRangeProof(proofBytes, parsedProof) - require.NoError(t, err) + require.NoError(err) verifyPath(t, proof.StartProof, parsedProof.StartProof) verifyPath(t, proof.EndProof, parsedProof.EndProof) for index, kv := range proof.KeyValues { - require.True(t, bytes.Equal(kv.Key, parsedProof.KeyValues[index].Key)) - require.True(t, bytes.Equal(kv.Value, parsedProof.KeyValues[index].Value)) + require.True(bytes.Equal(kv.Key, parsedProof.KeyValues[index].Key)) + require.True(bytes.Equal(kv.Value, parsedProof.KeyValues[index].Value)) } } func Test_RangeProof_Marshal(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) val, err := db.Get([]byte{1}) - require.NoError(t, err) - require.Equal(t, []byte{1}, val) + require.NoError(err) + require.Equal([]byte{1}, val) proof, err := db.GetRangeProof(context.Background(), nil, nil, 10) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) proofBytes, err := Codec.EncodeRangeProof(Version, proof) - require.NoError(t, err) + require.NoError(err) parsedProof := &RangeProof{} _, err = Codec.DecodeRangeProof(proofBytes, parsedProof) - require.NoError(t, err) + require.NoError(err) verifyPath(t, proof.StartProof, parsedProof.StartProof) verifyPath(t, proof.EndProof, parsedProof.EndProof) for index, state := range proof.KeyValues { - require.True(t, bytes.Equal(state.Key, parsedProof.KeyValues[index].Key)) - require.True(t, bytes.Equal(state.Value, parsedProof.KeyValues[index].Value)) + require.True(bytes.Equal(state.Key, parsedProof.KeyValues[index].Key)) + require.True(bytes.Equal(state.Value, parsedProof.KeyValues[index].Value)) } } func Test_RangeProof_Marshal_Errors(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) proof, err := db.GetRangeProof(context.Background(), nil, nil, 10) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) proofBytes, err := Codec.EncodeRangeProof(Version, proof) - require.NoError(t, err) + require.NoError(err) for i := 1; i < len(proofBytes); i++ { broken := proofBytes[:i] parsedProof := &RangeProof{} _, err = Codec.DecodeRangeProof(broken, parsedProof) - require.ErrorIs(t, err, io.ErrUnexpectedEOF) + require.ErrorIs(err, io.ErrUnexpectedEOF) } } func Test_ChangeProof_Marshal(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - require.NoError(t, batch.Put([]byte("key0"), []byte("value0"))) - require.NoError(t, batch.Put([]byte("key1"), []byte("value1"))) - require.NoError(t, batch.Put([]byte("key2"), []byte("value2"))) - require.NoError(t, batch.Put([]byte("key3"), []byte("value3"))) - require.NoError(t, batch.Put([]byte("key4"), []byte("value4"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key0"), []byte("value0"))) + require.NoError(batch.Put([]byte("key1"), []byte("value1"))) + require.NoError(batch.Put([]byte("key2"), []byte("value2"))) + require.NoError(batch.Put([]byte("key3"), []byte("value3"))) + require.NoError(batch.Put([]byte("key4"), []byte("value4"))) + require.NoError(batch.Write()) startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) batch = db.NewBatch() - require.NoError(t, batch.Put([]byte("key4"), []byte("value0"))) - require.NoError(t, batch.Put([]byte("key5"), []byte("value1"))) - require.NoError(t, batch.Put([]byte("key6"), []byte("value2"))) - require.NoError(t, batch.Put([]byte("key7"), []byte("value3"))) - require.NoError(t, batch.Put([]byte("key8"), []byte("value4"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key4"), []byte("value0"))) + require.NoError(batch.Put([]byte("key5"), []byte("value1"))) + require.NoError(batch.Put([]byte("key6"), []byte("value2"))) + require.NoError(batch.Put([]byte("key7"), []byte("value3"))) + require.NoError(batch.Put([]byte("key8"), []byte("value4"))) + require.NoError(batch.Write()) batch = db.NewBatch() - require.NoError(t, batch.Put([]byte("key9"), []byte("value0"))) - require.NoError(t, batch.Put([]byte("key10"), []byte("value1"))) - require.NoError(t, batch.Put([]byte("key11"), []byte("value2"))) - require.NoError(t, batch.Put([]byte("key12"), []byte("value3"))) - require.NoError(t, batch.Put([]byte("key13"), []byte("value4"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key9"), []byte("value0"))) + require.NoError(batch.Put([]byte("key10"), []byte("value1"))) + require.NoError(batch.Put([]byte("key11"), []byte("value2"))) + require.NoError(batch.Put([]byte("key12"), []byte("value3"))) + require.NoError(batch.Put([]byte("key13"), []byte("value4"))) + require.NoError(batch.Write()) endroot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) proof, err := db.GetChangeProof(context.Background(), startRoot, endroot, nil, nil, 50) - require.NoError(t, err) - require.NotNil(t, proof) - require.True(t, proof.HadRootsInHistory) + require.NoError(err) + require.NotNil(proof) + require.True(proof.HadRootsInHistory) proofBytes, err := Codec.EncodeChangeProof(Version, proof) - require.NoError(t, err) + require.NoError(err) parsedProof := &ChangeProof{} _, err = Codec.DecodeChangeProof(proofBytes, parsedProof) - require.NoError(t, err) + require.NoError(err) verifyPath(t, proof.StartProof, parsedProof.StartProof) verifyPath(t, proof.EndProof, parsedProof.EndProof) for index, kv := range proof.KeyChanges { - require.True(t, bytes.Equal(kv.Key, parsedProof.KeyChanges[index].Key)) - require.True(t, bytes.Equal(kv.Value.value, parsedProof.KeyChanges[index].Value.value)) + require.True(bytes.Equal(kv.Key, parsedProof.KeyChanges[index].Key)) + require.True(bytes.Equal(kv.Value.value, parsedProof.KeyChanges[index].Value.value)) } } func Test_ChangeProof_Marshal_Errors(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - require.NoError(t, batch.Put([]byte{5}, []byte{5})) - require.NoError(t, batch.Put([]byte{6}, []byte{6})) - require.NoError(t, batch.Put([]byte{7}, []byte{7})) - require.NoError(t, batch.Put([]byte{8}, []byte{8})) - require.NoError(t, batch.Delete([]byte{0})) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte{5}, []byte{5})) + require.NoError(batch.Put([]byte{6}, []byte{6})) + require.NoError(batch.Put([]byte{7}, []byte{7})) + require.NoError(batch.Put([]byte{8}, []byte{8})) + require.NoError(batch.Delete([]byte{0})) + require.NoError(batch.Write()) batch = db.NewBatch() - require.NoError(t, batch.Put([]byte{9}, []byte{9})) - require.NoError(t, batch.Put([]byte{10}, []byte{10})) - require.NoError(t, batch.Put([]byte{11}, []byte{11})) - require.NoError(t, batch.Put([]byte{12}, []byte{12})) - require.NoError(t, batch.Delete([]byte{1})) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte{9}, []byte{9})) + require.NoError(batch.Put([]byte{10}, []byte{10})) + require.NoError(batch.Put([]byte{11}, []byte{11})) + require.NoError(batch.Put([]byte{12}, []byte{12})) + require.NoError(batch.Delete([]byte{1})) + require.NoError(batch.Write()) endroot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) proof, err := db.GetChangeProof(context.Background(), startRoot, endroot, nil, nil, 50) - require.NoError(t, err) - require.NotNil(t, proof) - require.True(t, proof.HadRootsInHistory) - require.Len(t, proof.KeyChanges, 10) + require.NoError(err) + require.NotNil(proof) + require.True(proof.HadRootsInHistory) + require.Len(proof.KeyChanges, 10) proofBytes, err := Codec.EncodeChangeProof(Version, proof) - require.NoError(t, err) + require.NoError(err) for i := 1; i < len(proofBytes); i++ { broken := proofBytes[:i] parsedProof := &ChangeProof{} _, err = Codec.DecodeChangeProof(broken, parsedProof) - require.ErrorIs(t, err, io.ErrUnexpectedEOF) + require.ErrorIs(err, io.ErrUnexpectedEOF) } } func Test_ChangeProof_Missing_History_For_EndRoot(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) proof, err := db.GetChangeProof(context.Background(), startRoot, ids.Empty, nil, nil, 50) - require.NoError(t, err) - require.NotNil(t, proof) - require.False(t, proof.HadRootsInHistory) + require.NoError(err) + require.NotNil(proof) + require.False(proof.HadRootsInHistory) - require.NoError(t, proof.Verify(context.Background(), db, nil, nil, db.getMerkleRoot())) + require.NoError(proof.Verify(context.Background(), db, nil, nil, db.getMerkleRoot())) } func Test_ChangeProof_BadBounds(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) - require.NoError(t, db.Insert(context.Background(), []byte{0}, []byte{0})) + require.NoError(db.Insert(context.Background(), []byte{0}, []byte{0})) endRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) // non-nil start/end proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, []byte("key4"), []byte("key3"), 50) - require.ErrorIs(t, err, ErrStartAfterEnd) - require.Nil(t, proof) + require.ErrorIs(err, ErrStartAfterEnd) + require.Nil(proof) } func Test_ChangeProof_Verify(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch := db.NewBatch() - require.NoError(t, batch.Put([]byte("key20"), []byte("value0"))) - require.NoError(t, batch.Put([]byte("key21"), []byte("value1"))) - require.NoError(t, batch.Put([]byte("key22"), []byte("value2"))) - require.NoError(t, batch.Put([]byte("key23"), []byte("value3"))) - require.NoError(t, batch.Put([]byte("key24"), []byte("value4"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key20"), []byte("value0"))) + require.NoError(batch.Put([]byte("key21"), []byte("value1"))) + require.NoError(batch.Put([]byte("key22"), []byte("value2"))) + require.NoError(batch.Put([]byte("key23"), []byte("value3"))) + require.NoError(batch.Put([]byte("key24"), []byte("value4"))) + require.NoError(batch.Write()) startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) // create a second db that has "synced" to the start root dbClone, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) batch = dbClone.NewBatch() - require.NoError(t, batch.Put([]byte("key20"), []byte("value0"))) - require.NoError(t, batch.Put([]byte("key21"), []byte("value1"))) - require.NoError(t, batch.Put([]byte("key22"), []byte("value2"))) - require.NoError(t, batch.Put([]byte("key23"), []byte("value3"))) - require.NoError(t, batch.Put([]byte("key24"), []byte("value4"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key20"), []byte("value0"))) + require.NoError(batch.Put([]byte("key21"), []byte("value1"))) + require.NoError(batch.Put([]byte("key22"), []byte("value2"))) + require.NoError(batch.Put([]byte("key23"), []byte("value3"))) + require.NoError(batch.Put([]byte("key24"), []byte("value4"))) + require.NoError(batch.Write()) // the second db has started to sync some of the range outside of the range proof batch = dbClone.NewBatch() - require.NoError(t, batch.Put([]byte("key31"), []byte("value1"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key31"), []byte("value1"))) + require.NoError(batch.Write()) batch = db.NewBatch() - require.NoError(t, batch.Put([]byte("key25"), []byte("value0"))) - require.NoError(t, batch.Put([]byte("key26"), []byte("value1"))) - require.NoError(t, batch.Put([]byte("key27"), []byte("value2"))) - require.NoError(t, batch.Put([]byte("key28"), []byte("value3"))) - require.NoError(t, batch.Put([]byte("key29"), []byte("value4"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key25"), []byte("value0"))) + require.NoError(batch.Put([]byte("key26"), []byte("value1"))) + require.NoError(batch.Put([]byte("key27"), []byte("value2"))) + require.NoError(batch.Put([]byte("key28"), []byte("value3"))) + require.NoError(batch.Put([]byte("key29"), []byte("value4"))) + require.NoError(batch.Write()) batch = db.NewBatch() - require.NoError(t, batch.Put([]byte("key30"), []byte("value0"))) - require.NoError(t, batch.Put([]byte("key31"), []byte("value1"))) - require.NoError(t, batch.Put([]byte("key32"), []byte("value2"))) - require.NoError(t, batch.Delete([]byte("key21"))) - require.NoError(t, batch.Delete([]byte("key22"))) - require.NoError(t, batch.Write()) + require.NoError(batch.Put([]byte("key30"), []byte("value0"))) + require.NoError(batch.Put([]byte("key31"), []byte("value1"))) + require.NoError(batch.Put([]byte("key32"), []byte("value2"))) + require.NoError(batch.Delete([]byte("key21"))) + require.NoError(batch.Delete([]byte("key22"))) + require.NoError(batch.Write()) endRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) // non-nil start/end proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, []byte("key21"), []byte("key30"), 50) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) - require.NoError(t, proof.Verify(context.Background(), dbClone, []byte("key21"), []byte("key30"), db.getMerkleRoot())) + require.NoError(proof.Verify(context.Background(), dbClone, []byte("key21"), []byte("key30"), db.getMerkleRoot())) // low maxLength proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, nil, nil, 5) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) - require.NoError(t, proof.Verify(context.Background(), dbClone, nil, nil, db.getMerkleRoot())) + require.NoError(proof.Verify(context.Background(), dbClone, nil, nil, db.getMerkleRoot())) // nil start/end proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, nil, nil, 50) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) - require.NoError(t, proof.Verify(context.Background(), dbClone, nil, nil, endRoot)) + require.NoError(proof.Verify(context.Background(), dbClone, nil, nil, endRoot)) - require.NoError(t, dbClone.CommitChangeProof(context.Background(), proof)) + require.NoError(dbClone.CommitChangeProof(context.Background(), proof)) newRoot, err := dbClone.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, endRoot, newRoot) + require.NoError(err) + require.Equal(endRoot, newRoot) proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, []byte("key20"), []byte("key30"), 50) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) - require.NoError(t, proof.Verify(context.Background(), dbClone, []byte("key20"), []byte("key30"), db.getMerkleRoot())) + require.NoError(proof.Verify(context.Background(), dbClone, []byte("key20"), []byte("key30"), db.getMerkleRoot())) } func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { @@ -1008,29 +1050,31 @@ func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) startRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) writeBasicBatch(t, db) endRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) // create a second db that will be synced to the first db dbClone, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, []byte{2}, []byte{3, 0}, 50) - require.NoError(t, err) - require.NotNil(t, proof) + require.NoError(err) + require.NotNil(proof) tt.malform(proof) err = proof.Verify(context.Background(), dbClone, []byte{2}, []byte{3, 0}, db.getMerkleRoot()) - require.ErrorIs(t, err, tt.expectedErr) + require.ErrorIs(err, tt.expectedErr) }) } } @@ -1244,10 +1288,12 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + db, err := getBasicDB() - require.NoError(t, err) + require.NoError(err) err = tt.proof.Verify(context.Background(), db, tt.start, tt.end, ids.Empty) - require.ErrorIs(t, err, tt.expectedErr) + require.ErrorIs(err, tt.expectedErr) }) } } diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index 889798cd716..203cd64b4e4 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -5,6 +5,7 @@ package merkledb import ( "context" + "errors" "math/rand" "strconv" "sync" @@ -181,60 +182,64 @@ func TestTrieViewGetPathTo(t *testing.T) { } func Test_Trie_ViewOnCommitedView(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) committedTrie, err := dbTrie.NewView() - require.NoError(t, err) - require.NoError(t, committedTrie.Insert(context.Background(), []byte{0}, []byte{0})) + require.NoError(err) + require.NoError(committedTrie.Insert(context.Background(), []byte{0}, []byte{0})) - require.NoError(t, committedTrie.CommitToDB(context.Background())) + require.NoError(committedTrie.CommitToDB(context.Background())) newView, err := committedTrie.NewView() - require.NoError(t, err) + require.NoError(err) - require.NoError(t, newView.Insert(context.Background(), []byte{1}, []byte{1})) - require.NoError(t, newView.CommitToDB(context.Background())) + require.NoError(newView.Insert(context.Background(), []byte{1}, []byte{1})) + require.NoError(newView.CommitToDB(context.Background())) val0, err := dbTrie.GetValue(context.Background(), []byte{0}) - require.NoError(t, err) - require.Equal(t, []byte{0}, val0) + require.NoError(err) + require.Equal([]byte{0}, val0) val1, err := dbTrie.GetValue(context.Background(), []byte{1}) - require.NoError(t, err) - require.Equal(t, []byte{1}, val1) + require.NoError(err) + require.Equal([]byte{1}, val1) } func Test_Trie_Partial_Commit_Leaves_Valid_Tries(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie2, err := dbTrie.NewView() - require.NoError(t, err) - require.NoError(t, trie2.Insert(context.Background(), []byte("key"), []byte("value"))) + require.NoError(err) + require.NoError(trie2.Insert(context.Background(), []byte("key"), []byte("value"))) trie3, err := trie2.NewView() - require.NoError(t, err) - require.NoError(t, trie3.Insert(context.Background(), []byte("key1"), []byte("value1"))) + require.NoError(err) + require.NoError(trie3.Insert(context.Background(), []byte("key1"), []byte("value1"))) trie4, err := trie3.NewView() - require.NoError(t, err) - require.NoError(t, trie4.Insert(context.Background(), []byte("key2"), []byte("value2"))) + require.NoError(err) + require.NoError(trie4.Insert(context.Background(), []byte("key2"), []byte("value2"))) trie5, err := trie4.NewView() - require.NoError(t, err) - require.NoError(t, trie5.Insert(context.Background(), []byte("key3"), []byte("value3"))) + require.NoError(err) + require.NoError(trie5.Insert(context.Background(), []byte("key3"), []byte("value3"))) - require.NoError(t, trie3.CommitToDB(context.Background())) + require.NoError(trie3.CommitToDB(context.Background())) root, err := trie3.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) dbRoot, err := dbTrie.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, root, dbRoot) + require.Equal(root, dbRoot) } func Test_Trie_WriteToDB(t *testing.T) { @@ -291,210 +296,228 @@ func Test_Trie_InsertAndRetrieve(t *testing.T) { } func Test_Trie_Overwrite(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie := Trie(dbTrie) - require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value0"))) + require.NoError(trie.Insert(context.Background(), []byte("key"), []byte("value0"))) value, err := getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + require.NoError(err) + require.Equal([]byte("value0"), value) - require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value1"))) + require.NoError(trie.Insert(context.Background(), []byte("key"), []byte("value1"))) value, err = getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + require.NoError(err) + require.Equal([]byte("value1"), value) } func Test_Trie_Delete(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie := Trie(dbTrie) - require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value0"))) + require.NoError(trie.Insert(context.Background(), []byte("key"), []byte("value0"))) value, err := getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + require.NoError(err) + require.Equal([]byte("value0"), value) - require.NoError(t, trie.Remove(context.Background(), []byte("key"))) + require.NoError(trie.Remove(context.Background(), []byte("key"))) value, err = getNodeValue(trie, "key") - require.ErrorIs(t, err, database.ErrNotFound) - require.Nil(t, value) + require.ErrorIs(err, database.ErrNotFound) + require.Nil(value) } func Test_Trie_DeleteMissingKey(t *testing.T) { + require := require.New(t) + trie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, trie) + require.NoError(err) + require.NotNil(trie) - require.NoError(t, trie.Remove(context.Background(), []byte("key"))) + require.NoError(trie.Remove(context.Background(), []byte("key"))) } func Test_Trie_ExpandOnKeyPath(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie := Trie(dbTrie) - require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value0"))) + require.NoError(trie.Insert(context.Background(), []byte("key"), []byte("value0"))) value, err := getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + require.NoError(err) + require.Equal([]byte("value0"), value) - require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("value1"))) + require.NoError(trie.Insert(context.Background(), []byte("key1"), []byte("value1"))) value, err = getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + require.NoError(err) + require.Equal([]byte("value0"), value) value, err = getNodeValue(trie, "key1") - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + require.NoError(err) + require.Equal([]byte("value1"), value) - require.NoError(t, trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) + require.NoError(trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) value, err = getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value0"), value) + require.NoError(err) + require.Equal([]byte("value0"), value) value, err = getNodeValue(trie, "key1") - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + require.NoError(err) + require.Equal([]byte("value1"), value) value, err = getNodeValue(trie, "key12") - require.NoError(t, err) - require.Equal(t, []byte("value12"), value) + require.NoError(err) + require.Equal([]byte("value12"), value) } func Test_Trie_CompressedPaths(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie := Trie(dbTrie) - require.NoError(t, trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) + require.NoError(trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) value, err := getNodeValue(trie, "key12") - require.NoError(t, err) - require.Equal(t, []byte("value12"), value) + require.NoError(err) + require.Equal([]byte("value12"), value) - require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("value1"))) + require.NoError(trie.Insert(context.Background(), []byte("key1"), []byte("value1"))) value, err = getNodeValue(trie, "key12") - require.NoError(t, err) - require.Equal(t, []byte("value12"), value) + require.NoError(err) + require.Equal([]byte("value12"), value) value, err = getNodeValue(trie, "key1") - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + require.NoError(err) + require.Equal([]byte("value1"), value) - require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value"))) + require.NoError(trie.Insert(context.Background(), []byte("key"), []byte("value"))) value, err = getNodeValue(trie, "key12") - require.NoError(t, err) - require.Equal(t, []byte("value12"), value) + require.NoError(err) + require.Equal([]byte("value12"), value) value, err = getNodeValue(trie, "key1") - require.NoError(t, err) - require.Equal(t, []byte("value1"), value) + require.NoError(err) + require.Equal([]byte("value1"), value) value, err = getNodeValue(trie, "key") - require.NoError(t, err) - require.Equal(t, []byte("value"), value) + require.NoError(err) + require.Equal([]byte("value"), value) } func Test_Trie_SplitBranch(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie := Trie(dbTrie) // force a new node to generate with common prefix "key1" and have these two nodes as children - require.NoError(t, trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key134"), []byte("value134"))) + require.NoError(trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) + require.NoError(trie.Insert(context.Background(), []byte("key134"), []byte("value134"))) value, err := getNodeValue(trie, "key12") - require.NoError(t, err) - require.Equal(t, []byte("value12"), value) + require.NoError(err) + require.Equal([]byte("value12"), value) value, err = getNodeValue(trie, "key134") - require.NoError(t, err) - require.Equal(t, []byte("value134"), value) + require.NoError(err) + require.Equal([]byte("value134"), value) } func Test_Trie_HashCountOnBranch(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie := Trie(dbTrie) // force a new node to generate with common prefix "key1" and have these two nodes as children - require.NoError(t, trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) + require.NoError(trie.Insert(context.Background(), []byte("key12"), []byte("value12"))) oldCount := dbTrie.metrics.(*mockMetrics).hashCount - require.NoError(t, trie.Insert(context.Background(), []byte("key134"), []byte("value134"))) + require.NoError(trie.Insert(context.Background(), []byte("key134"), []byte("value134"))) // only hashes the new branch node, the new child node, and root // shouldn't hash the existing node - require.Equal(t, oldCount+3, dbTrie.metrics.(*mockMetrics).hashCount) + require.Equal(oldCount+3, dbTrie.metrics.(*mockMetrics).hashCount) } func Test_Trie_HashCountOnDelete(t *testing.T) { + require := require.New(t) + trie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, trie) + require.NoError(err) + require.NotNil(trie) - require.NoError(t, trie.Insert(context.Background(), []byte("k"), []byte("value0"))) - require.NoError(t, trie.Insert(context.Background(), []byte("ke"), []byte("value1"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value2"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("value3"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key2"), []byte("value4"))) + require.NoError(trie.Insert(context.Background(), []byte("k"), []byte("value0"))) + require.NoError(trie.Insert(context.Background(), []byte("ke"), []byte("value1"))) + require.NoError(trie.Insert(context.Background(), []byte("key"), []byte("value2"))) + require.NoError(trie.Insert(context.Background(), []byte("key1"), []byte("value3"))) + require.NoError(trie.Insert(context.Background(), []byte("key2"), []byte("value4"))) oldCount := trie.metrics.(*mockMetrics).hashCount // delete the middle values view, err := trie.NewView() - require.NoError(t, err) - require.NoError(t, view.Remove(context.Background(), []byte("k"))) - require.NoError(t, view.Remove(context.Background(), []byte("ke"))) - require.NoError(t, view.Remove(context.Background(), []byte("key"))) - require.NoError(t, view.CommitToDB(context.Background())) + require.NoError(err) + require.NoError(view.Remove(context.Background(), []byte("k"))) + require.NoError(view.Remove(context.Background(), []byte("ke"))) + require.NoError(view.Remove(context.Background(), []byte("key"))) + require.NoError(view.CommitToDB(context.Background())) // the root is the only updated node so only one new hash - require.Equal(t, oldCount+1, trie.metrics.(*mockMetrics).hashCount) + require.Equal(oldCount+1, trie.metrics.(*mockMetrics).hashCount) } func Test_Trie_NoExistingResidual(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie := Trie(dbTrie) - require.NoError(t, trie.Insert(context.Background(), []byte("k"), []byte("1"))) - require.NoError(t, trie.Insert(context.Background(), []byte("ke"), []byte("2"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("3"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key123"), []byte("4"))) + require.NoError(trie.Insert(context.Background(), []byte("k"), []byte("1"))) + require.NoError(trie.Insert(context.Background(), []byte("ke"), []byte("2"))) + require.NoError(trie.Insert(context.Background(), []byte("key1"), []byte("3"))) + require.NoError(trie.Insert(context.Background(), []byte("key123"), []byte("4"))) value, err := getNodeValue(trie, "k") - require.NoError(t, err) - require.Equal(t, []byte("1"), value) + require.NoError(err) + require.Equal([]byte("1"), value) value, err = getNodeValue(trie, "ke") - require.NoError(t, err) - require.Equal(t, []byte("2"), value) + require.NoError(err) + require.Equal([]byte("2"), value) value, err = getNodeValue(trie, "key1") - require.NoError(t, err) - require.Equal(t, []byte("3"), value) + require.NoError(err) + require.Equal([]byte("3"), value) value, err = getNodeValue(trie, "key123") - require.NoError(t, err) - require.Equal(t, []byte("4"), value) + require.NoError(err) + require.Equal([]byte("4"), value) } func Test_Trie_CommitChanges(t *testing.T) { @@ -621,131 +644,141 @@ func Test_Trie_BatchApply(t *testing.T) { } func Test_Trie_ChainDeletion(t *testing.T) { + require := require.New(t) + trie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, trie) + require.NoError(err) + require.NotNil(trie) newTrie, err := trie.NewView() - require.NoError(t, err) + require.NoError(err) - require.NoError(t, newTrie.Insert(context.Background(), []byte("k"), []byte("value0"))) - require.NoError(t, newTrie.Insert(context.Background(), []byte("ke"), []byte("value1"))) - require.NoError(t, newTrie.Insert(context.Background(), []byte("key"), []byte("value2"))) - require.NoError(t, newTrie.Insert(context.Background(), []byte("key1"), []byte("value3"))) - require.NoError(t, newTrie.(*trieView).calculateNodeIDs(context.Background())) + require.NoError(newTrie.Insert(context.Background(), []byte("k"), []byte("value0"))) + require.NoError(newTrie.Insert(context.Background(), []byte("ke"), []byte("value1"))) + require.NoError(newTrie.Insert(context.Background(), []byte("key"), []byte("value2"))) + require.NoError(newTrie.Insert(context.Background(), []byte("key1"), []byte("value3"))) + require.NoError(newTrie.(*trieView).calculateNodeIDs(context.Background())) root, err := newTrie.getEditableNode(EmptyPath) - require.NoError(t, err) - require.Len(t, root.children, 1) - - require.NoError(t, newTrie.Remove(context.Background(), []byte("k"))) - require.NoError(t, newTrie.Remove(context.Background(), []byte("ke"))) - require.NoError(t, newTrie.Remove(context.Background(), []byte("key"))) - require.NoError(t, newTrie.Remove(context.Background(), []byte("key1"))) - require.NoError(t, newTrie.(*trieView).calculateNodeIDs(context.Background())) + require.NoError(err) + require.Len(root.children, 1) + + require.NoError(newTrie.Remove(context.Background(), []byte("k"))) + require.NoError(newTrie.Remove(context.Background(), []byte("ke"))) + require.NoError(newTrie.Remove(context.Background(), []byte("key"))) + require.NoError(newTrie.Remove(context.Background(), []byte("key1"))) + require.NoError(newTrie.(*trieView).calculateNodeIDs(context.Background())) root, err = newTrie.getEditableNode(EmptyPath) - require.NoError(t, err) + require.NoError(err) // since all values have been deleted, the nodes should have been cleaned up - require.Empty(t, root.children) + require.Empty(root.children) } func Test_Trie_Invalidate_Children_On_Edits(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie, err := dbTrie.NewView() - require.NoError(t, err) + require.NoError(err) childTrie1, err := trie.NewView() - require.NoError(t, err) + require.NoError(err) childTrie2, err := trie.NewView() - require.NoError(t, err) + require.NoError(err) childTrie3, err := trie.NewView() - require.NoError(t, err) + require.NoError(err) - require.False(t, childTrie1.(*trieView).isInvalid()) - require.False(t, childTrie2.(*trieView).isInvalid()) - require.False(t, childTrie3.(*trieView).isInvalid()) + require.False(childTrie1.(*trieView).isInvalid()) + require.False(childTrie2.(*trieView).isInvalid()) + require.False(childTrie3.(*trieView).isInvalid()) - require.NoError(t, trie.Insert(context.Background(), []byte{0}, []byte{0})) + require.NoError(trie.Insert(context.Background(), []byte{0}, []byte{0})) - require.True(t, childTrie1.(*trieView).isInvalid()) - require.True(t, childTrie2.(*trieView).isInvalid()) - require.True(t, childTrie3.(*trieView).isInvalid()) + require.True(childTrie1.(*trieView).isInvalid()) + require.True(childTrie2.(*trieView).isInvalid()) + require.True(childTrie3.(*trieView).isInvalid()) } func Test_Trie_Invalidate_Siblings_On_Commit(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) baseView, err := dbTrie.NewView() - require.NoError(t, err) + require.NoError(err) viewToCommit, err := baseView.NewView() - require.NoError(t, err) + require.NoError(err) sibling1, err := baseView.NewView() - require.NoError(t, err) + require.NoError(err) sibling2, err := baseView.NewView() - require.NoError(t, err) + require.NoError(err) - require.False(t, sibling1.(*trieView).isInvalid()) - require.False(t, sibling2.(*trieView).isInvalid()) + require.False(sibling1.(*trieView).isInvalid()) + require.False(sibling2.(*trieView).isInvalid()) - require.NoError(t, viewToCommit.Insert(context.Background(), []byte{0}, []byte{0})) - require.NoError(t, viewToCommit.CommitToDB(context.Background())) + require.NoError(viewToCommit.Insert(context.Background(), []byte{0}, []byte{0})) + require.NoError(viewToCommit.CommitToDB(context.Background())) - require.True(t, sibling1.(*trieView).isInvalid()) - require.True(t, sibling2.(*trieView).isInvalid()) - require.False(t, viewToCommit.(*trieView).isInvalid()) + require.True(sibling1.(*trieView).isInvalid()) + require.True(sibling2.(*trieView).isInvalid()) + require.False(viewToCommit.(*trieView).isInvalid()) } func Test_Trie_NodeCollapse(t *testing.T) { + require := require.New(t) + dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) trie, err := dbTrie.NewView() - require.NoError(t, err) + require.NoError(err) - require.NoError(t, trie.Insert(context.Background(), []byte("k"), []byte("value0"))) - require.NoError(t, trie.Insert(context.Background(), []byte("ke"), []byte("value1"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key"), []byte("value2"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key1"), []byte("value3"))) - require.NoError(t, trie.Insert(context.Background(), []byte("key2"), []byte("value4"))) + require.NoError(trie.Insert(context.Background(), []byte("k"), []byte("value0"))) + require.NoError(trie.Insert(context.Background(), []byte("ke"), []byte("value1"))) + require.NoError(trie.Insert(context.Background(), []byte("key"), []byte("value2"))) + require.NoError(trie.Insert(context.Background(), []byte("key1"), []byte("value3"))) + require.NoError(trie.Insert(context.Background(), []byte("key2"), []byte("value4"))) - require.NoError(t, trie.(*trieView).calculateNodeIDs(context.Background())) + require.NoError(trie.(*trieView).calculateNodeIDs(context.Background())) root, err := trie.getEditableNode(EmptyPath) - require.NoError(t, err) - require.Len(t, root.children, 1) + require.NoError(err) + require.Len(root.children, 1) root, err = trie.getEditableNode(EmptyPath) - require.NoError(t, err) - require.Len(t, root.children, 1) + require.NoError(err) + require.Len(root.children, 1) firstNode, err := trie.getEditableNode(root.getSingleChildPath()) - require.NoError(t, err) - require.Len(t, firstNode.children, 1) + require.NoError(err) + require.Len(firstNode.children, 1) // delete the middle values - require.NoError(t, trie.Remove(context.Background(), []byte("k"))) - require.NoError(t, trie.Remove(context.Background(), []byte("ke"))) - require.NoError(t, trie.Remove(context.Background(), []byte("key"))) + require.NoError(trie.Remove(context.Background(), []byte("k"))) + require.NoError(trie.Remove(context.Background(), []byte("ke"))) + require.NoError(trie.Remove(context.Background(), []byte("key"))) - require.NoError(t, trie.(*trieView).calculateNodeIDs(context.Background())) + require.NoError(trie.(*trieView).calculateNodeIDs(context.Background())) root, err = trie.getEditableNode(EmptyPath) - require.NoError(t, err) - require.Len(t, root.children, 1) + require.NoError(err) + require.Len(root.children, 1) firstNode, err = trie.getEditableNode(root.getSingleChildPath()) - require.NoError(t, err) - require.Len(t, firstNode.children, 2) + require.NoError(err) + require.Len(firstNode.children, 2) } func Test_Trie_MultipleStates(t *testing.T) { randCount := int64(0) for _, commitApproach := range []string{"never", "before", "after"} { t.Run(commitApproach, func(t *testing.T) { + require := require.New(t) + r := rand.New(rand.NewSource(randCount)) // #nosec G404 randCount++ rdb := memdb.New() @@ -759,38 +792,38 @@ func Test_Trie_MultipleStates(t *testing.T) { NodeCacheSize: 100, }, ) - require.NoError(t, err) + require.NoError(err) defer db.Close() initialSet := 1000 // Populate initial set of keys root, err := db.NewView() - require.NoError(t, err) + require.NoError(err) kv := [][]byte{} for i := 0; i < initialSet; i++ { k := []byte(strconv.Itoa(i)) kv = append(kv, k) - require.NoError(t, root.Insert(context.Background(), k, hashing.ComputeHash256(k))) + require.NoError(root.Insert(context.Background(), k, hashing.ComputeHash256(k))) } // Get initial root _, err = root.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) if commitApproach == "before" { - require.NoError(t, root.CommitToDB(context.Background())) + require.NoError(root.CommitToDB(context.Background())) } // Populate additional states concurrentStates := []Trie{} for i := 0; i < 5; i++ { newState, err := root.NewView() - require.NoError(t, err) + require.NoError(err) concurrentStates = append(concurrentStates, newState) } if commitApproach == "after" { - require.NoError(t, root.CommitToDB(context.Background())) + require.NoError(root.CommitToDB(context.Background())) } // Process ops @@ -800,7 +833,7 @@ func Test_Trie_MultipleStates(t *testing.T) { // New Key for _, state := range concurrentStates { k := []byte(strconv.Itoa(newStart)) - require.NoError(t, state.Insert(context.Background(), k, hashing.ComputeHash256(k))) + require.NoError(state.Insert(context.Background(), k, hashing.ComputeHash256(k))) } newStart++ } else { @@ -809,13 +842,13 @@ func Test_Trie_MultipleStates(t *testing.T) { var pastV []byte for _, state := range concurrentStates { v, err := state.GetValue(context.Background(), selectedKey) - require.NoError(t, err) + require.NoError(err) if pastV == nil { pastV = v } else { - require.Equal(t, pastV, v, "lookup mismatch") + require.Equal(pastV, v) } - require.NoError(t, state.Insert(context.Background(), selectedKey, hashing.ComputeHash256(v))) + require.NoError(state.Insert(context.Background(), selectedKey, hashing.ComputeHash256(v))) } } } @@ -824,11 +857,11 @@ func Test_Trie_MultipleStates(t *testing.T) { var pastRoot ids.ID for _, state := range concurrentStates { mroot, err := state.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) if pastRoot == ids.Empty { pastRoot = mroot } else { - require.Equal(t, pastRoot, mroot, "root mismatch") + require.Equal(pastRoot, mroot, "root mismatch") } } }) @@ -1111,103 +1144,107 @@ func TestTrieViewInvalidChildrenExcept(t *testing.T) { } func Test_Trie_CommitToParentView_Concurrent(t *testing.T) { + require := require.New(t) + for i := 0; i < 5000; i++ { dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) baseView, err := dbTrie.NewView() - require.NoError(t, err) + require.NoError(err) parentView, err := baseView.NewView() - require.NoError(t, err) - require.NoError(t, parentView.Insert(context.Background(), []byte{0}, []byte{0})) + require.NoError(err) + require.NoError(parentView.Insert(context.Background(), []byte{0}, []byte{0})) childView1, err := parentView.NewView() - require.NoError(t, err) - require.NoError(t, childView1.Insert(context.Background(), []byte{1}, []byte{1})) + require.NoError(err) + require.NoError(childView1.Insert(context.Background(), []byte{1}, []byte{1})) childView2, err := childView1.NewView() - require.NoError(t, err) - require.NoError(t, childView2.Insert(context.Background(), []byte{2}, []byte{2})) + require.NoError(err) + require.NoError(childView2.Insert(context.Background(), []byte{2}, []byte{2})) var wg sync.WaitGroup wg.Add(3) go func() { defer wg.Done() - require.NoError(t, parentView.CommitToParent(context.Background())) + require.NoError(parentView.CommitToParent(context.Background())) }() go func() { defer wg.Done() - require.NoError(t, childView1.CommitToParent(context.Background())) + require.NoError(childView1.CommitToParent(context.Background())) }() go func() { defer wg.Done() - require.NoError(t, childView2.CommitToParent(context.Background())) + require.NoError(childView2.CommitToParent(context.Background())) }() wg.Wait() val0, err := baseView.GetValue(context.Background(), []byte{0}) - require.NoError(t, err) - require.Equal(t, []byte{0}, val0) + require.NoError(err) + require.Equal([]byte{0}, val0) val1, err := baseView.GetValue(context.Background(), []byte{1}) - require.NoError(t, err) - require.Equal(t, []byte{1}, val1) + require.NoError(err) + require.Equal([]byte{1}, val1) val2, err := baseView.GetValue(context.Background(), []byte{2}) - require.NoError(t, err) - require.Equal(t, []byte{2}, val2) + require.NoError(err) + require.Equal([]byte{2}, val2) } } func Test_Trie_CommitToParentDB_Concurrent(t *testing.T) { + require := require.New(t) + for i := 0; i < 5000; i++ { dbTrie, err := getBasicDB() - require.NoError(t, err) - require.NotNil(t, dbTrie) + require.NoError(err) + require.NotNil(dbTrie) parentView, err := dbTrie.NewView() - require.NoError(t, err) - require.NoError(t, parentView.Insert(context.Background(), []byte{0}, []byte{0})) + require.NoError(err) + require.NoError(parentView.Insert(context.Background(), []byte{0}, []byte{0})) childView1, err := parentView.NewView() - require.NoError(t, err) - require.NoError(t, childView1.Insert(context.Background(), []byte{1}, []byte{1})) + require.NoError(err) + require.NoError(childView1.Insert(context.Background(), []byte{1}, []byte{1})) childView2, err := childView1.NewView() - require.NoError(t, err) - require.NoError(t, childView2.Insert(context.Background(), []byte{2}, []byte{2})) + require.NoError(err) + require.NoError(childView2.Insert(context.Background(), []byte{2}, []byte{2})) var wg sync.WaitGroup wg.Add(3) go func() { defer wg.Done() - require.NoError(t, parentView.CommitToParent(context.Background())) + require.NoError(parentView.CommitToParent(context.Background())) }() go func() { defer wg.Done() - require.NoError(t, childView1.CommitToParent(context.Background())) + require.NoError(childView1.CommitToParent(context.Background())) }() go func() { defer wg.Done() - require.NoError(t, childView2.CommitToParent(context.Background())) + require.NoError(childView2.CommitToParent(context.Background())) }() wg.Wait() val0, err := dbTrie.GetValue(context.Background(), []byte{0}) - require.NoError(t, err) - require.Equal(t, []byte{0}, val0) + require.NoError(err) + require.Equal([]byte{0}, val0) val1, err := dbTrie.GetValue(context.Background(), []byte{1}) - require.NoError(t, err) - require.Equal(t, []byte{1}, val1) + require.NoError(err) + require.Equal([]byte{1}, val1) val2, err := dbTrie.GetValue(context.Background(), []byte{2}) - require.NoError(t, err) - require.Equal(t, []byte{2}, val2) + require.NoError(err) + require.Equal([]byte{2}, val2) } } @@ -1233,7 +1270,7 @@ func Test_Trie_ConcurrentReadWrite(t *testing.T) { func() bool { value, err := newTrie.GetValue(context.Background(), []byte("key")) - if err == database.ErrNotFound { + if errors.Is(err, database.ErrNotFound) { return false } diff --git a/x/sync/client_test.go b/x/sync/client_test.go index db6e2f590b6..d2f132021d6 100644 --- a/x/sync/client_test.go +++ b/x/sync/client_test.go @@ -523,11 +523,10 @@ func TestGetChangeProof(t *testing.T) { require := require.New(t) proof, err := sendChangeRequest(t, trieDB, verificationDB, test.request, 1, test.modifyResponse) + require.ErrorIs(err, test.expectedErr) if test.expectedErr != nil { - require.ErrorIs(err, test.expectedErr) return } - require.NoError(err) if test.expectedResponseLen > 0 { require.LessOrEqual(len(proof.KeyChanges), test.expectedResponseLen) } diff --git a/x/sync/network_server_test.go b/x/sync/network_server_test.go index 085e8ed1d35..29dcfafe2e1 100644 --- a/x/sync/network_server_test.go +++ b/x/sync/network_server_test.go @@ -115,11 +115,10 @@ func Test_Server_GetRangeProof(t *testing.T) { ).AnyTimes() handler := NewNetworkServer(sender, smallTrieDB, logging.NoLog{}) err := handler.HandleRangeProofRequest(context.Background(), test.nodeID, 0, test.request) + require.ErrorIs(err, test.expectedErr) if test.expectedErr != nil { - require.ErrorIs(err, test.expectedErr) return } - require.NoError(err) if test.proofNil { require.Nil(proofResult) return @@ -262,11 +261,10 @@ func Test_Server_GetChangeProof(t *testing.T) { ).AnyTimes() handler := NewNetworkServer(sender, trieDB, logging.NoLog{}) err := handler.HandleChangeProofRequest(context.Background(), test.nodeID, 0, test.request) + require.ErrorIs(err, test.expectedErr) if test.expectedErr != nil { - require.ErrorIs(err, test.expectedErr) return } - require.NoError(err) if test.proofNil { require.Nil(proofResult) return diff --git a/x/sync/syncworkheap_test.go b/x/sync/syncworkheap_test.go index 03c0fbb9e30..7a68ca35fa7 100644 --- a/x/sync/syncworkheap_test.go +++ b/x/sync/syncworkheap_test.go @@ -78,11 +78,8 @@ func Test_SyncWorkHeap_Heap_Methods(t *testing.T) { item1.workItem.priority = lowPriority require.True(h.Less(0, 1)) - gotItem = h.Pop().(*heapItem) - require.Equal(item1, gotItem) - - gotItem = h.Pop().(*heapItem) - require.Equal(item2, gotItem) + require.Equal(item1, h.Pop().(*heapItem)) + require.Equal(item2, h.Pop().(*heapItem)) require.Zero(h.Len()) require.Empty(h.priorityHeap) @@ -128,14 +125,10 @@ func Test_SyncWorkHeap_Insert_GetWork(t *testing.T) { require.Equal([]*syncWorkItem{item1, item2, item3}, got) // Ensure priorities are in right order. - gotItem := h.GetWork() - require.Equal(item3, gotItem) - gotItem = h.GetWork() - require.Equal(item2, gotItem) - gotItem = h.GetWork() - require.Equal(item1, gotItem) - gotItem = h.GetWork() - require.Nil(gotItem) + require.Equal(item3, h.GetWork()) + require.Equal(item2, h.GetWork()) + require.Equal(item1, h.GetWork()) + require.Nil(h.GetWork()) require.Zero(h.Len()) } From 13082797be1bc53abcdb20d3fd27d9e4de8aadb3 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sun, 14 May 2023 13:05:21 -0400 Subject: [PATCH 44/79] Improve `vm/` tests with `require` --- vms/avm/blocks/builder/builder_test.go | 3 +- vms/avm/fx_test.go | 12 +- vms/avm/index_test.go | 194 ++--- vms/avm/service_test.go | 707 +++++++----------- vms/avm/state_test.go | 60 +- vms/avm/states/state_test.go | 16 +- vms/avm/static_service_test.go | 17 +- .../txs/executor/syntactic_verifier_test.go | 20 +- vms/avm/txs/initial_state_test.go | 91 +-- vms/avm/txs/mempool/mempool_test.go | 6 +- vms/avm/txs/operation_test.go | 48 +- vms/avm/vm_benchmark_test.go | 35 +- vms/avm/vm_test.go | 632 ++++++---------- vms/avm/wallet_service_test.go | 50 +- vms/components/avax/addresses.go | 10 +- vms/components/avax/asset_test.go | 39 +- vms/components/avax/metadata_test.go | 16 +- vms/components/avax/transferables_test.go | 141 ++-- vms/components/avax/utxo_fetching_test.go | 34 +- vms/components/avax/utxo_id_test.go | 47 +- vms/components/avax/utxo_state_test.go | 17 +- vms/components/avax/utxo_test.go | 47 +- vms/components/chain/state_test.go | 391 ++++------ vms/components/verify/verification_test.go | 13 +- vms/nftfx/credential_test.go | 7 +- vms/nftfx/factory_test.go | 12 +- vms/nftfx/fx_test.go | 174 ++--- vms/nftfx/mint_operation_test.go | 26 +- vms/nftfx/mint_output_test.go | 7 +- vms/nftfx/transfer_operation_test.go | 21 +- vms/nftfx/transfer_output_test.go | 22 +- vms/platformvm/api/static_service_test.go | 9 +- vms/platformvm/blocks/builder/builder_test.go | 7 +- vms/platformvm/blocks/builder/helpers_test.go | 90 +-- vms/platformvm/blocks/executor/block_test.go | 2 +- vms/platformvm/health_test.go | 8 +- vms/platformvm/reward/calculator_test.go | 12 +- vms/platformvm/service_test.go | 30 +- vms/platformvm/state/diff_test.go | 25 +- vms/platformvm/state/stakers_test.go | 8 +- vms/platformvm/state/state_test.go | 5 +- .../state/validator_metadata_test.go | 5 +- vms/platformvm/txs/base_tx_test.go | 12 +- .../txs/executor/advance_time_test.go | 24 +- .../txs/executor/create_chain_test.go | 10 +- .../txs/executor/create_subnet_test.go | 2 +- vms/platformvm/txs/executor/export_test.go | 2 +- vms/platformvm/txs/executor/helpers_test.go | 27 +- vms/platformvm/txs/executor/import_test.go | 2 +- .../txs/executor/proposal_tx_executor_test.go | 8 +- .../txs/executor/reward_validator_test.go | 12 +- .../txs/executor/standard_tx_executor_test.go | 18 +- .../txs/remove_subnet_validator_tx_test.go | 5 +- vms/platformvm/utxo/handler_test.go | 3 +- vms/platformvm/vm_regression_test.go | 20 +- vms/platformvm/vm_test.go | 152 ++-- vms/platformvm/warp/gwarp/signer_test.go | 4 +- vms/platformvm/warp/validator_test.go | 10 +- vms/propertyfx/burn_operation_test.go | 16 +- vms/propertyfx/credential_test.go | 7 +- vms/propertyfx/factory_test.go | 12 +- vms/propertyfx/fx_test.go | 141 ++-- vms/propertyfx/mint_operation_test.go | 21 +- vms/propertyfx/mint_output_test.go | 7 +- vms/propertyfx/owned_output_test.go | 7 +- vms/proposervm/batched_vm_test.go | 87 +-- vms/proposervm/indexer/block_server_test.go | 6 +- vms/proposervm/post_fork_block_test.go | 368 +++------ vms/proposervm/post_fork_option_test.go | 213 ++---- vms/proposervm/pre_fork_block_test.go | 222 ++---- vms/proposervm/scheduler/scheduler_test.go | 10 +- vms/proposervm/vm_byzantine_test.go | 197 ++--- vms/proposervm/vm_test.go | 585 +++++---------- vms/registry/vm_registry_test.go | 56 +- vms/rpcchainvm/vm_test.go | 4 +- vms/secp256k1fx/credential_test.go | 9 +- vms/secp256k1fx/fx_test.go | 6 +- 77 files changed, 1985 insertions(+), 3416 deletions(-) diff --git a/vms/avm/blocks/builder/builder_test.go b/vms/avm/blocks/builder/builder_test.go index 2a504cd9255..8321f51e1bd 100644 --- a/vms/avm/blocks/builder/builder_test.go +++ b/vms/avm/blocks/builder/builder_test.go @@ -487,13 +487,12 @@ func TestBuilderBuildBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() builder := tt.builderFunc(ctrl) _, err := builder.BuildBlock(context.Background()) - require.ErrorIs(err, tt.expectedErr) + require.ErrorIs(t, err, tt.expectedErr) }) } } diff --git a/vms/avm/fx_test.go b/vms/avm/fx_test.go index ebb525a2ccc..ee0cdbfd815 100644 --- a/vms/avm/fx_test.go +++ b/vms/avm/fx_test.go @@ -6,6 +6,8 @@ package avm import ( "errors" "testing" + + "github.com/stretchr/testify/require" ) var ( @@ -48,7 +50,7 @@ func (fx *FxTest) Initialize(vm interface{}) error { return nil } if fx.T != nil { - fx.T.Fatal(errCalledInitialize) + require.FailNow(fx.T, errCalledInitialize.Error()) } return errCalledInitialize } @@ -61,7 +63,7 @@ func (fx *FxTest) Bootstrapping() error { return nil } if fx.T != nil { - fx.T.Fatal(errCalledBootstrapping) + require.FailNow(fx.T, errCalledBootstrapping.Error()) } return errCalledBootstrapping } @@ -74,7 +76,7 @@ func (fx *FxTest) Bootstrapped() error { return nil } if fx.T != nil { - fx.T.Fatal(errCalledBootstrapped) + require.FailNow(fx.T, errCalledBootstrapped.Error()) } return errCalledBootstrapped } @@ -87,7 +89,7 @@ func (fx *FxTest) VerifyTransfer(tx, in, cred, utxo interface{}) error { return nil } if fx.T != nil { - fx.T.Fatal(errCalledVerifyTransfer) + require.FailNow(fx.T, errCalledVerifyTransfer.Error()) } return errCalledVerifyTransfer } @@ -100,7 +102,7 @@ func (fx *FxTest) VerifyOperation(tx, op, cred interface{}, utxos []interface{}) return nil } if fx.T != nil { - fx.T.Fatal(errCalledVerifyOperation) + require.FailNow(fx.T, errCalledVerifyOperation.Error()) } return errCalledVerifyOperation } diff --git a/vms/avm/index_test.go b/vms/avm/index_test.go index 22d4d13fc9f..4f2682c3d6d 100644 --- a/vms/avm/index_test.go +++ b/vms/avm/index_test.go @@ -38,6 +38,8 @@ var indexEnabledAvmConfig = Config{ } func TestIndexTransaction_Ordered(t *testing.T) { + require := require.New(t) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) baseDBManager := manager.NewMemDB(version.Semantic1_0_0) @@ -46,9 +48,7 @@ func TestIndexTransaction_Ordered(t *testing.T) { avaxID := genesisTx.ID() vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -69,9 +69,7 @@ func TestIndexTransaction_Ordered(t *testing.T) { tx := buildTX(utxoID, txAssetID, addr) // sign the transaction - if err := signTX(vm.parser.Codec(), tx, key); err != nil { - t.Fatal(err) - } + require.NoError(signTX(vm.parser.Codec(), tx, key)) // Provide the platform UTXO utxo := buildPlatformUTXO(utxoID, txAssetID, addr) @@ -80,25 +78,16 @@ func TestIndexTransaction_Ordered(t *testing.T) { vm.state.AddUTXO(utxo) // issue transaction - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatalf("should have issued the transaction correctly but erred: %s", err) - } + _, err := vm.IssueTx(tx.Bytes()) + require.NoError(err) ctx.Lock.Unlock() - - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - + require.Equal(common.PendingTxs, <-issuer) ctx.Lock.Lock() // get pending transactions txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } - + require.Len(txs, 1) parsedTx := txs[0] uniqueParsedTX := parsedTx.(*UniqueTx) uniqueTxs = append(uniqueTxs, uniqueParsedTX) @@ -106,19 +95,17 @@ func TestIndexTransaction_Ordered(t *testing.T) { var inputUTXOs []*avax.UTXO for _, utxoID := range uniqueParsedTX.InputUTXOs() { utxo, err := vm.dagState.GetUTXOFromID(utxoID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) inputUTXOs = append(inputUTXOs, utxo) } // index the transaction - require.NoError(t, vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs())) + require.NoError(vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs())) } // ensure length is 5 - require.Len(t, uniqueTxs, 5) + require.Len(uniqueTxs, 5) // for each *UniqueTx check its indexed at right index for i, tx := range uniqueTxs { assertIndexedTX(t, vm.db, uint64(i), addr, txAssetID.ID, tx.ID()) @@ -128,6 +115,8 @@ func TestIndexTransaction_Ordered(t *testing.T) { } func TestIndexTransaction_MultipleTransactions(t *testing.T) { + require := require.New(t) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) baseDBManager := manager.NewMemDB(version.Semantic1_0_0) @@ -137,9 +126,7 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { avaxID := genesisTx.ID() vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -158,9 +145,7 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { tx := buildTX(utxoID, txAssetID, addr) // sign the transaction - if err := signTX(vm.parser.Codec(), tx, key); err != nil { - t.Fatal(err) - } + require.NoError(signTX(vm.parser.Codec(), tx, key)) // Provide the platform UTXO utxo := buildPlatformUTXO(utxoID, txAssetID, addr) @@ -169,25 +154,16 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { vm.state.AddUTXO(utxo) // issue transaction - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatalf("should have issued the transaction correctly but erred: %s", err) - } + _, err := vm.IssueTx(tx.Bytes()) + require.NoError(err) ctx.Lock.Unlock() - - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - + require.Equal(common.PendingTxs, <-issuer) ctx.Lock.Lock() // get pending transactions txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } - + require.Len(txs, 1) parsedTx := txs[0] uniqueParsedTX := parsedTx.(*UniqueTx) addressTxMap[addr] = uniqueParsedTX @@ -195,19 +171,17 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { var inputUTXOs []*avax.UTXO for _, utxoID := range uniqueParsedTX.InputUTXOs() { utxo, err := vm.dagState.GetUTXOFromID(utxoID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) inputUTXOs = append(inputUTXOs, utxo) } // index the transaction - require.NoError(t, vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs())) + require.NoError(vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs())) } // ensure length is same as keys length - require.Len(t, addressTxMap, len(keys)) + require.Len(addressTxMap, len(keys)) // for each *UniqueTx check its indexed at right index for the right address for key, tx := range addressTxMap { @@ -217,6 +191,8 @@ func TestIndexTransaction_MultipleTransactions(t *testing.T) { } func TestIndexTransaction_MultipleAddresses(t *testing.T) { + require := require.New(t) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) baseDBManager := manager.NewMemDB(version.Semantic1_0_0) @@ -226,9 +202,7 @@ func TestIndexTransaction_MultipleAddresses(t *testing.T) { avaxID := genesisTx.ID() vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -251,9 +225,7 @@ func TestIndexTransaction_MultipleAddresses(t *testing.T) { tx := buildTX(utxoID, txAssetID, addrs...) // sign the transaction - if err := signTX(vm.parser.Codec(), tx, key); err != nil { - t.Fatal(err) - } + require.NoError(signTX(vm.parser.Codec(), tx, key)) // Provide the platform UTXO utxo := buildPlatformUTXO(utxoID, txAssetID, addr) @@ -264,21 +236,21 @@ func TestIndexTransaction_MultipleAddresses(t *testing.T) { var inputUTXOs []*avax.UTXO //nolint:prealloc for _, utxoID := range tx.Unsigned.InputUTXOs() { utxo, err := vm.dagState.GetUTXOFromID(utxoID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) inputUTXOs = append(inputUTXOs, utxo) } // index the transaction - require.NoError(t, vm.addressTxsIndexer.Accept(tx.ID(), inputUTXOs, tx.UTXOs())) + require.NoError(vm.addressTxsIndexer.Accept(tx.ID(), inputUTXOs, tx.UTXOs())) assertIndexedTX(t, vm.db, uint64(0), addr, txAssetID.ID, tx.ID()) assertLatestIdx(t, vm.db, addr, txAssetID.ID, 1) } func TestIndexTransaction_UnorderedWrites(t *testing.T) { + require := require.New(t) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) baseDBManager := manager.NewMemDB(version.Semantic1_0_0) @@ -287,9 +259,7 @@ func TestIndexTransaction_UnorderedWrites(t *testing.T) { avaxID := genesisTx.ID() vm := setupTestVM(t, ctx, baseDBManager, genesisBytes, issuer, indexEnabledAvmConfig) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -308,9 +278,7 @@ func TestIndexTransaction_UnorderedWrites(t *testing.T) { tx := buildTX(utxoID, txAssetID, addr) // sign the transaction - if err := signTX(vm.parser.Codec(), tx, key); err != nil { - t.Fatal(err) - } + require.NoError(signTX(vm.parser.Codec(), tx, key)) // Provide the platform UTXO utxo := buildPlatformUTXO(utxoID, txAssetID, addr) @@ -319,25 +287,16 @@ func TestIndexTransaction_UnorderedWrites(t *testing.T) { vm.state.AddUTXO(utxo) // issue transaction - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatalf("should have issued the transaction correctly but erred: %s", err) - } + _, err := vm.IssueTx(tx.Bytes()) + require.NoError(err) ctx.Lock.Unlock() - - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - + require.Equal(common.PendingTxs, <-issuer) ctx.Lock.Lock() // get pending transactions txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } - + require.Len(txs, 1) parsedTx := txs[0] uniqueParsedTX := parsedTx.(*UniqueTx) addressTxMap[addr] = uniqueParsedTX @@ -345,19 +304,17 @@ func TestIndexTransaction_UnorderedWrites(t *testing.T) { var inputUTXOs []*avax.UTXO for _, utxoID := range uniqueParsedTX.InputUTXOs() { utxo, err := vm.dagState.GetUTXOFromID(utxoID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) inputUTXOs = append(inputUTXOs, utxo) } // index the transaction, NOT calling Accept(ids.ID) method - require.NoError(t, vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs())) + require.NoError(vm.addressTxsIndexer.Accept(uniqueParsedTX.ID(), inputUTXOs, uniqueParsedTX.UTXOs())) } // ensure length is same as keys length - require.Len(t, addressTxMap, len(keys)) + require.Len(addressTxMap, len(keys)) // for each *UniqueTx check its indexed at right index for the right address for key, tx := range addressTxMap { @@ -367,13 +324,13 @@ func TestIndexTransaction_UnorderedWrites(t *testing.T) { } func TestIndexer_Read(t *testing.T) { + require := require.New(t) + // setup vm, db etc _, vm, _, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -384,21 +341,23 @@ func TestIndexer_Read(t *testing.T) { // setup some fake txs under the above generated address and asset IDs testTxCount := 25 testTxs := setupTestTxsInDB(t, vm.db, addr, assetID, testTxCount) - require.Len(t, testTxs, 25) + require.Len(testTxs, 25) // read the pages, 5 items at a time var cursor uint64 var pageSize uint64 = 5 for cursor < 25 { txIDs, err := vm.addressTxsIndexer.Read(addr[:], assetID, cursor, pageSize) - require.NoError(t, err) - require.Len(t, txIDs, 5) - require.Equal(t, txIDs, testTxs[cursor:cursor+pageSize]) + require.NoError(err) + require.Len(txIDs, 5) + require.Equal(txIDs, testTxs[cursor:cursor+pageSize]) cursor += pageSize } } func TestIndexingNewInitWithIndexingEnabled(t *testing.T) { + require := require.New(t) + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) ctx := NewContext(t) @@ -406,43 +365,47 @@ func TestIndexingNewInitWithIndexingEnabled(t *testing.T) { // start with indexing enabled _, err := index.NewIndexer(db, ctx.Log, "", prometheus.NewRegistry(), true) - require.NoError(t, err) + require.NoError(err) // now disable indexing with allow-incomplete set to false _, err = index.NewNoIndexer(db, false) - require.ErrorIs(t, err, index.ErrCausesIncompleteIndex) + require.ErrorIs(err, index.ErrCausesIncompleteIndex) // now disable indexing with allow-incomplete set to true _, err = index.NewNoIndexer(db, true) - require.NoError(t, err) + require.NoError(err) } func TestIndexingNewInitWithIndexingDisabled(t *testing.T) { + require := require.New(t) + ctx := NewContext(t) db := memdb.New() // disable indexing with allow-incomplete set to false _, err := index.NewNoIndexer(db, false) - require.NoError(t, err) + require.NoError(err) // It's not OK to have an incomplete index when allowIncompleteIndices is false _, err = index.NewIndexer(db, ctx.Log, "", prometheus.NewRegistry(), false) - require.ErrorIs(t, err, index.ErrIndexingRequiredFromGenesis) + require.ErrorIs(err, index.ErrIndexingRequiredFromGenesis) // It's OK to have an incomplete index when allowIncompleteIndices is true _, err = index.NewIndexer(db, ctx.Log, "", prometheus.NewRegistry(), true) - require.NoError(t, err) + require.NoError(err) // It's OK to have an incomplete index when indexing currently disabled _, err = index.NewNoIndexer(db, false) - require.NoError(t, err) + require.NoError(err) // It's OK to have an incomplete index when allowIncompleteIndices is true _, err = index.NewNoIndexer(db, true) - require.NoError(t, err) + require.NoError(err) } func TestIndexingAllowIncomplete(t *testing.T) { + require := require.New(t) + baseDBManager := manager.NewMemDB(version.Semantic1_0_0) ctx := NewContext(t) @@ -450,14 +413,14 @@ func TestIndexingAllowIncomplete(t *testing.T) { db := versiondb.New(prefixDB) // disabled indexer will persist idxEnabled as false _, err := index.NewNoIndexer(db, false) - require.NoError(t, err) + require.NoError(err) // we initialize with indexing enabled now and allow incomplete indexing as false _, err = index.NewIndexer(db, ctx.Log, "", prometheus.NewRegistry(), false) // we should get error because: // - indexing was disabled previously // - node now is asked to enable indexing with allow incomplete set to false - require.ErrorIs(t, err, index.ErrIndexingRequiredFromGenesis) + require.ErrorIs(err, index.ErrIndexingRequiredFromGenesis) } func buildPlatformUTXO(utxoID avax.UTXOID, txAssetID avax.Asset, addr ids.ShortID) *avax.UTXO { @@ -506,12 +469,14 @@ func buildTX(utxoID avax.UTXOID, txAssetID avax.Asset, address ...ids.ShortID) * } func setupTestVM(t *testing.T, ctx *snow.Context, baseDBManager manager.Manager, genesisBytes []byte, issuer chan common.Message, config Config) *VM { + require := require.New(t) + vm := &VM{} avmConfigBytes, err := json.Marshal(config) - require.NoError(t, err) + require.NoError(err) appSender := &common.SenderTest{T: t} - err = vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), @@ -524,20 +489,13 @@ func setupTestVM(t *testing.T, ctx *snow.Context, baseDBManager manager.Manager, Fx: &secp256k1fx.Fx{}, }}, appSender, - ) - if err != nil { - t.Fatal(err) - } + )) vm.batchTimeout = 0 - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) return vm } @@ -577,9 +535,7 @@ func checkIndexedTX(db database.Database, index uint64, sourceAddress ids.ShortI } func assertIndexedTX(t *testing.T, db database.Database, index uint64, sourceAddress ids.ShortID, assetID ids.ID, transactionID ids.ID) { - if err := checkIndexedTX(db, index, sourceAddress, assetID, transactionID); err != nil { - t.Fatal(err) - } + require.NoError(t, checkIndexedTX(db, index, sourceAddress, assetID, transactionID)) } // Sets up test tx IDs in DB in the following structure for the indexer to pick @@ -591,6 +547,8 @@ func assertIndexedTX(t *testing.T, db database.Database, index uint64, sourceAdd // - 0: txID1 // - 1: txID1 func setupTestTxsInDB(t *testing.T, db *versiondb.Database, address ids.ShortID, assetID ids.ID, txCount int) []ids.ID { + require := require.New(t) + var testTxs []ids.ID for i := 0; i < txCount; i++ { testTxs = append(testTxs, ids.GenerateTestID()) @@ -603,14 +561,14 @@ func setupTestTxsInDB(t *testing.T, db *versiondb.Database, address ids.ShortID, binary.BigEndian.PutUint64(idxBytes, idx) for _, txID := range testTxs { txID := txID - require.NoError(t, assetPrefixDB.Put(idxBytes, txID[:])) + require.NoError(assetPrefixDB.Put(idxBytes, txID[:])) idx++ binary.BigEndian.PutUint64(idxBytes, idx) } _, err := db.CommitBatch() - require.NoError(t, err) + require.NoError(err) - require.NoError(t, assetPrefixDB.Put([]byte("idx"), idxBytes)) - require.NoError(t, db.Commit()) + require.NoError(assetPrefixDB.Put([]byte("idx"), idxBytes)) + require.NoError(db.Commit()) return testTxs } diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 2c9e54bd3b5..17966c324df 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -4,7 +4,6 @@ package avm import ( - "bytes" "context" "fmt" "math/rand" @@ -13,6 +12,8 @@ import ( stdjson "encoding/json" + "github.com/btcsuite/btcd/btcutil/bech32" + "github.com/golang/mock/gomock" "github.com/prometheus/client_golang/prometheus" @@ -21,6 +22,7 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/prefixdb" @@ -105,21 +107,17 @@ func setupWithIssuer(t *testing.T, isAVAXAsset bool) ([]byte, *VM, *Service, cha // 3) The service that wraps the VM // 4) atomic memory to use in tests func setupWithKeys(t *testing.T, isAVAXAsset bool) ([]byte, *VM, *Service, *atomic.Memory, *txs.Tx) { + require := require.New(t) + genesisBytes, vm, s, m, tx := setup(t, isAVAXAsset) // Import the initially funded private keys user, err := keystore.NewUserFromKeystore(s.vm.ctx.Keystore, username, password) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := user.PutKeys(keys...); err != nil { - t.Fatalf("Failed to set key for user: %s", err) - } + require.NoError(user.PutKeys(keys...)) - if err := user.Close(); err != nil { - t.Fatal(err) - } + require.NoError(user.Close()) return genesisBytes, vm, s, m, tx } @@ -127,6 +125,8 @@ func setupWithKeys(t *testing.T, isAVAXAsset bool) ([]byte, *VM, *Service, *atom // The size of the sample is between 1 and len(addrs) // If len(addrs) == 0, returns nil func sampleAddrs(t *testing.T, vm *VM, addrs []ids.ShortID) ([]ids.ShortID, []string) { + require := require.New(t) + sampledAddrs := []ids.ShortID{} sampledAddrsStr := []string{} @@ -135,15 +135,11 @@ func sampleAddrs(t *testing.T, vm *VM, addrs []ids.ShortID) ([]ids.ShortID, []st numAddrs := 1 + rand.Intn(len(addrs)) // #nosec G404 indices, err := sampler.Sample(numAddrs) - if err != nil { - t.Fatal(err) - } + require.NoError(err) for _, index := range indices { addr := addrs[index] addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) sampledAddrs = append(sampledAddrs, addr) sampledAddrsStr = append(sampledAddrsStr, addrStr) @@ -163,9 +159,7 @@ func verifyTxFeeDeducted(t *testing.T, s *Service, fromAddrs []ids.ShortID, numT for _, addr := range addrs { // get balances for all addresses addrStr, err := s.vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) reply := &GetBalanceReply{} err = s.GetBalance(nil, &GetBalanceArgs{ @@ -195,33 +189,25 @@ func verifyTxFeeDeducted(t *testing.T, s *Service, fromAddrs []ids.ShortID, numT } func TestServiceIssueTx(t *testing.T) { + require := require.New(t) + genesisBytes, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() txArgs := &api.FormattedTx{} txReply := &api.JSONTxID{} err := s.IssueTx(nil, txArgs, txReply) - if err == nil { - t.Fatal("Expected empty transaction to return an error") - } + require.ErrorIs(err, codec.ErrCantUnpackVersion) tx := NewTx(t, genesisBytes, vm) txArgs.Tx, err = formatting.Encode(formatting.Hex, tx.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) txArgs.Encoding = formatting.Hex txReply = &api.JSONTxID{} - if err := s.IssueTx(nil, txArgs, txReply); err != nil { - t.Fatal(err) - } - if txReply.TxID != tx.ID() { - t.Fatalf("Expected %q, got %q", txReply.TxID, tx.ID()) - } + require.NoError(s.IssueTx(nil, txArgs, txReply)) + require.Equal(tx.ID(), txReply.TxID) } func TestServiceGetTxStatus(t *testing.T) { @@ -268,20 +254,18 @@ func TestServiceGetTxStatus(t *testing.T) { // Test the GetBalance method when argument Strict is true func TestServiceGetBalanceStrict(t *testing.T) { + require := require.New(t) + _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // A UTXO with a 2 out of 2 multisig // where one of the addresses is [addr] @@ -301,7 +285,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { } // Insert the UTXO vm.state.AddUTXO(twoOfTwoUTXO) - require.NoError(t, vm.state.Commit()) + require.NoError(vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs := &GetBalanceArgs{ @@ -310,10 +294,10 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply := &GetBalanceReply{} - require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Equal(t, uint64(1337), uint64(balanceReply.Balance)) - require.Len(t, balanceReply.UTXOIDs, 1, "should have only returned 1 utxoID") + require.Equal(uint64(1337), uint64(balanceReply.Balance)) + require.Len(balanceReply.UTXOIDs, 1) // Check the balance with IncludePartial set to false balanceArgs = &GetBalanceArgs{ @@ -321,10 +305,10 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Zero(t, balanceReply.Balance) - require.Empty(t, balanceReply.UTXOIDs, "should have returned 0 utxoIDs") + require.Zero(balanceReply.Balance) + require.Empty(balanceReply.UTXOIDs) // A UTXO with a 1 out of 2 multisig // where one of the addresses is [addr] @@ -344,7 +328,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { } // Insert the UTXO vm.state.AddUTXO(oneOfTwoUTXO) - require.NoError(t, vm.state.Commit()) + require.NoError(vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs = &GetBalanceArgs{ @@ -353,10 +337,10 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply = &GetBalanceReply{} - require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Equal(t, uint64(1337+1337), uint64(balanceReply.Balance)) - require.Len(t, balanceReply.UTXOIDs, 2, "should have only returned 2 utxoIDs") + require.Equal(uint64(1337+1337), uint64(balanceReply.Balance)) + require.Len(balanceReply.UTXOIDs, 2) // Check the balance with IncludePartial set to false balanceArgs = &GetBalanceArgs{ @@ -364,10 +348,10 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Zero(t, balanceReply.Balance) - require.Empty(t, balanceReply.UTXOIDs, "should have returned 0 utxoIDs") + require.Zero(balanceReply.Balance) + require.Empty(balanceReply.UTXOIDs) // A UTXO with a 1 out of 1 multisig // but with a locktime in the future @@ -389,7 +373,7 @@ func TestServiceGetBalanceStrict(t *testing.T) { } // Insert the UTXO vm.state.AddUTXO(futureUTXO) - require.NoError(t, vm.state.Commit()) + require.NoError(vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs = &GetBalanceArgs{ @@ -398,10 +382,10 @@ func TestServiceGetBalanceStrict(t *testing.T) { IncludePartial: true, } balanceReply = &GetBalanceReply{} - require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Equal(t, uint64(1337*3), uint64(balanceReply.Balance)) - require.Len(t, balanceReply.UTXOIDs, 3, "should have returned 3 utxoIDs") + require.Equal(uint64(1337*3), uint64(balanceReply.Balance)) + require.Len(balanceReply.UTXOIDs, 3) // Check the balance with IncludePartial set to false balanceArgs = &GetBalanceArgs{ @@ -409,30 +393,27 @@ func TestServiceGetBalanceStrict(t *testing.T) { AssetID: assetID.String(), } balanceReply = &GetBalanceReply{} - require.NoError(t, s.GetBalance(nil, balanceArgs, balanceReply)) + require.NoError(s.GetBalance(nil, balanceArgs, balanceReply)) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Zero(t, balanceReply.Balance) - require.Empty(t, balanceReply.UTXOIDs, "should have returned 0 utxoIDs") + require.Zero(balanceReply.Balance) + require.Empty(balanceReply.UTXOIDs) } func TestServiceGetTxs(t *testing.T) { + require := require.New(t) _, vm, s, _, _ := setup(t, true) var err error vm.addressTxsIndexer, err = index.NewIndexer(vm.db, vm.ctx.Log, "", prometheus.NewRegistry(), false) - require.NoError(t, err) + require.NoError(err) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) testTxCount := 25 testTxs := setupTestTxsInDB(t, vm.db, addr, assetID, testTxCount) @@ -444,33 +425,31 @@ func TestServiceGetTxs(t *testing.T) { AssetID: assetID.String(), } getTxsReply := &GetAddressTxsReply{} - require.NoError(t, s.GetAddressTxs(nil, getTxsArgs, getTxsReply)) - require.Len(t, getTxsReply.TxIDs, 10) - require.Equal(t, getTxsReply.TxIDs, testTxs[:10]) + require.NoError(s.GetAddressTxs(nil, getTxsArgs, getTxsReply)) + require.Len(getTxsReply.TxIDs, 10) + require.Equal(getTxsReply.TxIDs, testTxs[:10]) // get the second page getTxsArgs.Cursor = getTxsReply.Cursor getTxsReply = &GetAddressTxsReply{} - require.NoError(t, s.GetAddressTxs(nil, getTxsArgs, getTxsReply)) - require.Len(t, getTxsReply.TxIDs, 10) - require.Equal(t, getTxsReply.TxIDs, testTxs[10:20]) + require.NoError(s.GetAddressTxs(nil, getTxsArgs, getTxsReply)) + require.Len(getTxsReply.TxIDs, 10) + require.Equal(getTxsReply.TxIDs, testTxs[10:20]) } func TestServiceGetAllBalances(t *testing.T) { + require := require.New(t) + _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() assetID := ids.GenerateTestID() addr := ids.GenerateTestShortID() addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) // A UTXO with a 2 out of 2 multisig // where one of the addresses is [addr] twoOfTwoUTXO := &avax.UTXO{ @@ -489,7 +468,7 @@ func TestServiceGetAllBalances(t *testing.T) { } // Insert the UTXO vm.state.AddUTXO(twoOfTwoUTXO) - require.NoError(t, vm.state.Commit()) + require.NoError(vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs := &GetAllBalancesArgs{ @@ -497,19 +476,19 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply := &GetAllBalancesReply{} - require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(s.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Len(t, reply.Balances, 1) - require.Equal(t, assetID.String(), reply.Balances[0].AssetID) - require.Equal(t, uint64(1337), uint64(reply.Balances[0].Balance)) + require.Len(reply.Balances, 1) + require.Equal(assetID.String(), reply.Balances[0].AssetID) + require.Equal(uint64(1337), uint64(reply.Balances[0].Balance)) // Check the balance with IncludePartial set to false balanceArgs = &GetAllBalancesArgs{ JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) - require.Empty(t, reply.Balances) + require.NoError(s.GetAllBalances(nil, balanceArgs, reply)) + require.Empty(reply.Balances) // A UTXO with a 1 out of 2 multisig // where one of the addresses is [addr] @@ -529,7 +508,7 @@ func TestServiceGetAllBalances(t *testing.T) { } // Insert the UTXO vm.state.AddUTXO(oneOfTwoUTXO) - require.NoError(t, vm.state.Commit()) + require.NoError(vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs = &GetAllBalancesArgs{ @@ -537,20 +516,20 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(s.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Len(t, reply.Balances, 1) - require.Equal(t, assetID.String(), reply.Balances[0].AssetID) - require.Equal(t, uint64(1337*2), uint64(reply.Balances[0].Balance)) + require.Len(reply.Balances, 1) + require.Equal(assetID.String(), reply.Balances[0].AssetID) + require.Equal(uint64(1337*2), uint64(reply.Balances[0].Balance)) // Check the balance with IncludePartial set to false balanceArgs = &GetAllBalancesArgs{ JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(s.GetAllBalances(nil, balanceArgs, reply)) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Empty(t, reply.Balances) + require.Empty(reply.Balances) // A UTXO with a 1 out of 1 multisig // but with a locktime in the future @@ -572,7 +551,7 @@ func TestServiceGetAllBalances(t *testing.T) { } // Insert the UTXO vm.state.AddUTXO(futureUTXO) - require.NoError(t, vm.state.Commit()) + require.NoError(vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs = &GetAllBalancesArgs{ @@ -580,20 +559,20 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(s.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] // The balance should include the UTXO since it is partly owned by [addr] - require.Len(t, reply.Balances, 1) - require.Equal(t, assetID.String(), reply.Balances[0].AssetID) - require.Equal(t, uint64(1337*3), uint64(reply.Balances[0].Balance)) + require.Len(reply.Balances, 1) + require.Equal(assetID.String(), reply.Balances[0].AssetID) + require.Equal(uint64(1337*3), uint64(reply.Balances[0].Balance)) // Check the balance with IncludePartial set to false balanceArgs = &GetAllBalancesArgs{ JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(s.GetAllBalances(nil, balanceArgs, reply)) // The balance should not include the UTXO since it is only partly owned by [addr] - require.Empty(t, reply.Balances) + require.Empty(reply.Balances) // A UTXO for a different asset otherAssetID := ids.GenerateTestID() @@ -613,7 +592,7 @@ func TestServiceGetAllBalances(t *testing.T) { } // Insert the UTXO vm.state.AddUTXO(otherAssetUTXO) - require.NoError(t, vm.state.Commit()) + require.NoError(vm.state.Commit()) // Check the balance with IncludePartial set to true balanceArgs = &GetAllBalancesArgs{ @@ -621,24 +600,24 @@ func TestServiceGetAllBalances(t *testing.T) { IncludePartial: true, } reply = &GetAllBalancesReply{} - require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(s.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Len(t, reply.Balances, 2) + require.Len(reply.Balances, 2) gotAssetIDs := []string{reply.Balances[0].AssetID, reply.Balances[1].AssetID} - require.Contains(t, gotAssetIDs, assetID.String()) - require.Contains(t, gotAssetIDs, otherAssetID.String()) + require.Contains(gotAssetIDs, assetID.String()) + require.Contains(gotAssetIDs, otherAssetID.String()) gotBalances := []uint64{uint64(reply.Balances[0].Balance), uint64(reply.Balances[1].Balance)} - require.Contains(t, gotBalances, uint64(1337)) - require.Contains(t, gotBalances, uint64(1337*3)) + require.Contains(gotBalances, uint64(1337)) + require.Contains(gotBalances, uint64(1337*3)) // Check the balance with IncludePartial set to false balanceArgs = &GetAllBalancesArgs{ JSONAddress: api.JSONAddress{Address: addrStr}, } reply = &GetAllBalancesReply{} - require.NoError(t, s.GetAllBalances(nil, balanceArgs, reply)) + require.NoError(s.GetAllBalances(nil, balanceArgs, reply)) // The balance should include the UTXO since it is partly owned by [addr] - require.Empty(t, reply.Balances) + require.Empty(reply.Balances) } func TestServiceGetTx(t *testing.T) { @@ -1366,9 +1345,7 @@ func newAvaxBaseTxWithOutputs(t *testing.T, genesisBytes []byte, vm *VM) *txs.Tx avaxTx := GetAVAXTxFromGenesisTest(genesisBytes, t) key := keys[0] tx := buildBaseTx(avaxTx, vm, key) - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) return tx } @@ -1376,18 +1353,14 @@ func newAvaxExportTxWithOutputs(t *testing.T, genesisBytes []byte, vm *VM) *txs. avaxTx := GetAVAXTxFromGenesisTest(genesisBytes, t) key := keys[0] tx := buildExportTx(avaxTx, vm, key) - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } + require.NoError(t, tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) return tx } func newAvaxCreateAssetTxWithOutputs(t *testing.T, vm *VM) *txs.Tx { key := keys[0] tx := buildCreateAssetTx(key) - if err := vm.parser.InitializeTx(tx); err != nil { - t.Fatal(err) - } + require.NoError(t, vm.parser.InitializeTx(tx)) return tx } @@ -1609,39 +1582,37 @@ func buildOperationTxWithOp(op ...*txs.Operation) *txs.Tx { } func TestServiceGetNilTx(t *testing.T) { + require := require.New(t) + _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() reply := api.GetTxReply{} err := s.GetTx(nil, &api.GetTxArgs{}, &reply) - require.ErrorIs(t, err, errNilTxID) + require.ErrorIs(err, errNilTxID) } func TestServiceGetUnknownTx(t *testing.T) { + require := require.New(t) + _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() reply := api.GetTxReply{} err := s.GetTx(nil, &api.GetTxArgs{TxID: ids.GenerateTestID()}, &reply) - require.ErrorIs(t, err, database.ErrNotFound) + require.ErrorIs(err, database.ErrNotFound) } func TestServiceGetUTXOs(t *testing.T) { _, vm, s, m, _ := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1688,9 +1659,7 @@ func TestServiceGetUTXOs(t *testing.T) { } utxoBytes, err := codec.Marshal(txs.CodecVersion, utxo) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) utxoID := utxo.InputID() elems[i] = &atomic.Element{ Key: utxoID[:], @@ -1701,87 +1670,77 @@ func TestServiceGetUTXOs(t *testing.T) { } } - if err := sm.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: elems}}); err != nil { - t.Fatal(err) - } + require.NoError(t, sm.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: elems}})) hrp := constants.GetHRP(vm.ctx.NetworkID) xAddr, err := vm.FormatLocalAddress(rawAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) pAddr, err := vm.FormatAddress(constants.PlatformChainID, rawAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) unknownChainAddr, err := address.Format("R", hrp, rawAddr.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) xEmptyAddr, err := vm.FormatLocalAddress(rawEmptyAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(t, err) tests := []struct { - label string - count int - shouldErr bool - args *api.GetUTXOsArgs + label string + count int + expectedErr error + args *api.GetUTXOsArgs }{ { - label: "invalid address: ''", - shouldErr: true, + label: "invalid address: ''", + expectedErr: address.ErrNoSeparator, args: &api.GetUTXOsArgs{ Addresses: []string{""}, }, }, { - label: "invalid address: '-'", - shouldErr: true, + label: "invalid address: '-'", + expectedErr: bech32.ErrInvalidLength(0), args: &api.GetUTXOsArgs{ Addresses: []string{"-"}, }, }, { - label: "invalid address: 'foo'", - shouldErr: true, + label: "invalid address: 'foo'", + expectedErr: address.ErrNoSeparator, args: &api.GetUTXOsArgs{ Addresses: []string{"foo"}, }, }, { - label: "invalid address: 'foo-bar'", - shouldErr: true, + label: "invalid address: 'foo-bar'", + expectedErr: bech32.ErrInvalidLength(3), args: &api.GetUTXOsArgs{ Addresses: []string{"foo-bar"}, }, }, { - label: "invalid address: ''", - shouldErr: true, + label: "invalid address: ''", + expectedErr: address.ErrNoSeparator, args: &api.GetUTXOsArgs{ Addresses: []string{vm.ctx.ChainID.String()}, }, }, { - label: "invalid address: '-'", - shouldErr: true, + label: "invalid address: '-'", + expectedErr: bech32.ErrInvalidLength(0), args: &api.GetUTXOsArgs{ Addresses: []string{fmt.Sprintf("%s-", vm.ctx.ChainID.String())}, }, }, { - label: "invalid address: '-'", - shouldErr: true, + label: "invalid address: '-'", + expectedErr: ids.ErrNoIDWithAlias, args: &api.GetUTXOsArgs{ Addresses: []string{unknownChainAddr}, }, }, { - label: "no addresses", - shouldErr: true, - args: &api.GetUTXOsArgs{}, + label: "no addresses", + expectedErr: errNoAddresses, + args: &api.GetUTXOsArgs{}, }, { label: "get all X-chain UTXOs", @@ -1842,9 +1801,9 @@ func TestServiceGetUTXOs(t *testing.T) { }, }, { - label: "invalid source chain ID", - shouldErr: true, - count: numUTXOs, + label: "invalid source chain ID", + expectedErr: ids.ErrNoIDWithAlias, + count: numUTXOs, args: &api.GetUTXOsArgs{ Addresses: []string{ xAddr, @@ -1863,8 +1822,8 @@ func TestServiceGetUTXOs(t *testing.T) { }, }, { - label: "get UTXOs from multiple chains", - shouldErr: true, + label: "get UTXOs from multiple chains", + expectedErr: avax.ErrMismatchedChainIDs, args: &api.GetUTXOsArgs{ Addresses: []string{ xAddr, @@ -1873,8 +1832,8 @@ func TestServiceGetUTXOs(t *testing.T) { }, }, { - label: "get UTXOs for an address on a different chain", - shouldErr: true, + label: "get UTXOs for an address on a different chain", + expectedErr: avax.ErrMismatchedChainIDs, args: &api.GetUTXOsArgs{ Addresses: []string{ pAddr, @@ -1884,57 +1843,44 @@ func TestServiceGetUTXOs(t *testing.T) { } for _, test := range tests { t.Run(test.label, func(t *testing.T) { + require := require.New(t) reply := &api.GetUTXOsReply{} err := s.GetUTXOs(nil, test.args, reply) - if err != nil { - if !test.shouldErr { - t.Fatal(err) - } + require.ErrorIs(err, test.expectedErr) + if test.expectedErr != nil { return } - if test.shouldErr { - t.Fatal("should have erred") - } - if test.count != len(reply.UTXOs) { - t.Fatalf("Expected %d utxos, got %d", test.count, len(reply.UTXOs)) - } + require.Len(reply.UTXOs, test.count) }) } } func TestGetAssetDescription(t *testing.T) { + require := require.New(t) + _, vm, s, _, genesisTx := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() avaxAssetID := genesisTx.ID() reply := GetAssetDescriptionReply{} - err := s.GetAssetDescription(nil, &GetAssetDescriptionArgs{ + require.NoError(s.GetAssetDescription(nil, &GetAssetDescriptionArgs{ AssetID: avaxAssetID.String(), - }, &reply) - if err != nil { - t.Fatal(err) - } + }, &reply)) - if reply.Name != "AVAX" { - t.Fatalf("Wrong name returned from GetAssetDescription %s", reply.Name) - } - if reply.Symbol != "SYMB" { - t.Fatalf("Wrong name returned from GetAssetDescription %s", reply.Symbol) - } + require.Equal("AVAX", reply.Name) + require.Equal("SYMB", reply.Symbol) } func TestGetBalance(t *testing.T) { + require := require.New(t) + _, vm, s, _, genesisTx := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1942,46 +1888,34 @@ func TestGetBalance(t *testing.T) { reply := GetBalanceReply{} addrStr, err := vm.FormatLocalAddress(keys[0].PublicKey().Address()) - if err != nil { - t.Fatal(err) - } - err = s.GetBalance(nil, &GetBalanceArgs{ + require.NoError(err) + require.NoError(s.GetBalance(nil, &GetBalanceArgs{ Address: addrStr, AssetID: avaxAssetID.String(), - }, &reply) - if err != nil { - t.Fatal(err) - } + }, &reply)) - if uint64(reply.Balance) != startBalance { - t.Fatalf("Wrong balance returned from GetBalance %d", reply.Balance) - } + require.Equal(startBalance, uint64(reply.Balance)) } func TestCreateFixedCapAsset(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + require := require.New(t) _, vm, s, _, _ := setupWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() reply := AssetIDChangeAddr{} addrStr, err := vm.FormatLocalAddress(keys[0].PublicKey().Address()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) changeAddrStr, err := vm.FormatLocalAddress(testChangeAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, fromAddrsStr := sampleAddrs(t, vm, addrs) - err = s.CreateFixedCapAsset(nil, &CreateAssetArgs{ + require.NoError(s.CreateFixedCapAsset(nil, &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ Username: username, @@ -1997,12 +1931,8 @@ func TestCreateFixedCapAsset(t *testing.T) { Amount: 123456789, Address: addrStr, }}, - }, &reply) - if err != nil { - t.Fatal(err) - } else if reply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address %s but got %s", changeAddrStr, reply.ChangeAddr) - } + }, &reply)) + require.Equal(changeAddrStr, reply.ChangeAddr) }) } } @@ -2010,26 +1940,21 @@ func TestCreateFixedCapAsset(t *testing.T) { func TestCreateVariableCapAsset(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + require := require.New(t) _, vm, s, _, _ := setupWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() reply := AssetIDChangeAddr{} minterAddrStr, err := vm.FormatLocalAddress(keys[0].PublicKey().Address()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, fromAddrsStr := sampleAddrs(t, vm, addrs) changeAddrStr := fromAddrsStr[0] - if err != nil { - t.Fatal(err) - } + require.NoError(err) - err = s.CreateVariableCapAsset(nil, &CreateAssetArgs{ + require.NoError(s.CreateVariableCapAsset(nil, &CreateAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ Username: username, @@ -2048,23 +1973,15 @@ func TestCreateVariableCapAsset(t *testing.T) { }, }, }, - }, &reply) - if err != nil { - t.Fatal(err) - } else if reply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address %s but got %s", changeAddrStr, reply.ChangeAddr) - } + }, &reply)) + require.Equal(changeAddrStr, reply.ChangeAddr) createAssetTx := UniqueTx{ vm: vm, txID: reply.AssetID, } - if status := createAssetTx.Status(); status != choices.Processing { - t.Fatalf("CreateVariableCapAssetTx status should have been Processing, but was %s", status) - } - if err := createAssetTx.Accept(context.Background()); err != nil { - t.Fatalf("Failed to accept CreateVariableCapAssetTx due to: %s", err) - } + require.Equal(choices.Processing, createAssetTx.Status()) + require.NoError(createAssetTx.Accept(context.Background())) createdAssetID := reply.AssetID.String() // Test minting of the created variable cap asset @@ -2081,23 +1998,16 @@ func TestCreateVariableCapAsset(t *testing.T) { To: minterAddrStr, // Send newly minted tokens to this address } mintReply := &api.JSONTxIDChangeAddr{} - if err := s.Mint(nil, mintArgs, mintReply); err != nil { - t.Fatalf("Failed to mint variable cap asset due to: %s", err) - } else if mintReply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address %s but got %s", changeAddrStr, mintReply.ChangeAddr) - } + require.NoError(s.Mint(nil, mintArgs, mintReply)) + require.Equal(changeAddrStr, mintReply.ChangeAddr) mintTx := UniqueTx{ vm: vm, txID: mintReply.TxID, } - if status := mintTx.Status(); status != choices.Processing { - t.Fatalf("MintTx status should have been Processing, but was %s", status) - } - if err := mintTx.Accept(context.Background()); err != nil { - t.Fatalf("Failed to accept MintTx due to: %s", err) - } + require.Equal(choices.Processing, mintTx.Status()) + require.NoError(mintTx.Accept(context.Background())) sendArgs := &SendArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -2115,11 +2025,8 @@ func TestCreateVariableCapAsset(t *testing.T) { }, } sendReply := &api.JSONTxIDChangeAddr{} - if err := s.Send(nil, sendArgs, sendReply); err != nil { - t.Fatalf("Failed to send newly minted variable cap asset due to: %s", err) - } else if sendReply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address to be %s but got %s", changeAddrStr, sendReply.ChangeAddr) - } + require.NoError(s.Send(nil, sendArgs, sendReply)) + require.Equal(changeAddrStr, sendReply.ChangeAddr) }) } } @@ -2127,11 +2034,10 @@ func TestCreateVariableCapAsset(t *testing.T) { func TestNFTWorkflow(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + require := require.New(t) _, vm, s, _, _ := setupWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -2139,9 +2045,7 @@ func TestNFTWorkflow(t *testing.T) { // Test minting of the created variable cap asset addrStr, err := vm.FormatLocalAddress(keys[0].PublicKey().Address()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) createArgs := &CreateNFTAssetArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -2164,11 +2068,8 @@ func TestNFTWorkflow(t *testing.T) { }, } createReply := &AssetIDChangeAddr{} - if err := s.CreateNFTAsset(nil, createArgs, createReply); err != nil { - t.Fatalf("Failed to mint variable cap asset due to: %s", err) - } else if createReply.ChangeAddr != fromAddrsStr[0] { - t.Fatalf("expected change address to be %s but got %s", fromAddrsStr[0], createReply.ChangeAddr) - } + require.NoError(s.CreateNFTAsset(nil, createArgs, createReply)) + require.Equal(fromAddrsStr[0], createReply.ChangeAddr) assetID := createReply.AssetID createNFTTx := UniqueTx{ @@ -2176,19 +2077,12 @@ func TestNFTWorkflow(t *testing.T) { txID: createReply.AssetID, } // Accept the transaction so that we can Mint NFTs for the test - if createNFTTx.Status() != choices.Processing { - t.Fatalf("CreateNFTTx should have been processing after creating the NFT") - } - if err := createNFTTx.Accept(context.Background()); err != nil { - t.Fatalf("Failed to accept CreateNFT transaction: %s", err) - } else if err := verifyTxFeeDeducted(t, s, fromAddrs, 1); err != nil { - t.Fatal(err) - } + require.Equal(choices.Processing, createNFTTx.Status()) + require.NoError(createNFTTx.Accept(context.Background())) + require.NoError(verifyTxFeeDeducted(t, s, fromAddrs, 1)) payload, err := formatting.Encode(formatting.Hex, []byte{1, 2, 3, 4, 5}) - if err != nil { - t.Fatal(err) - } + require.NoError(err) mintArgs := &MintNFTArgs{ JSONSpendHeader: api.JSONSpendHeader{ UserPass: api.UserPass{ @@ -2205,24 +2099,17 @@ func TestNFTWorkflow(t *testing.T) { } mintReply := &api.JSONTxIDChangeAddr{} - if err := s.MintNFT(nil, mintArgs, mintReply); err != nil { - t.Fatalf("MintNFT returned an error: %s", err) - } else if createReply.ChangeAddr != fromAddrsStr[0] { - t.Fatalf("expected change address to be %s but got %s", fromAddrsStr[0], mintReply.ChangeAddr) - } + require.NoError(s.MintNFT(nil, mintArgs, mintReply)) + require.Equal(fromAddrsStr[0], createReply.ChangeAddr) mintNFTTx := UniqueTx{ vm: vm, txID: mintReply.TxID, } - if mintNFTTx.Status() != choices.Processing { - t.Fatal("MintNFTTx should have been processing after minting the NFT") - } + require.Equal(choices.Processing, mintNFTTx.Status()) // Accept the transaction so that we can send the newly minted NFT - if err := mintNFTTx.Accept(context.Background()); err != nil { - t.Fatalf("Failed to accept MintNFTTx: %s", err) - } + require.NoError(mintNFTTx.Accept(context.Background())) sendArgs := &SendNFTArgs{ JSONSpendHeader: api.JSONSpendHeader{ @@ -2238,29 +2125,24 @@ func TestNFTWorkflow(t *testing.T) { To: addrStr, } sendReply := &api.JSONTxIDChangeAddr{} - if err := s.SendNFT(nil, sendArgs, sendReply); err != nil { - t.Fatalf("Failed to send NFT due to: %s", err) - } else if sendReply.ChangeAddr != fromAddrsStr[0] { - t.Fatalf("expected change address to be %s but got %s", fromAddrsStr[0], sendReply.ChangeAddr) - } + require.NoError(s.SendNFT(nil, sendArgs, sendReply)) + require.Equal(fromAddrsStr[0], sendReply.ChangeAddr) }) } } func TestImportExportKey(t *testing.T) { + require := require.New(t) + _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() factory := secp256k1.Factory{} sk, err := factory.NewPrivateKey() - if err != nil { - t.Fatalf("problem generating private key: %s", err) - } + require.NoError(err) importArgs := &ImportKeyArgs{ UserPass: api.UserPass{ @@ -2270,14 +2152,10 @@ func TestImportExportKey(t *testing.T) { PrivateKey: sk, } importReply := &api.JSONAddress{} - if err := s.ImportKey(nil, importArgs, importReply); err != nil { - t.Fatal(err) - } + require.NoError(s.ImportKey(nil, importArgs, importReply)) addrStr, err := vm.FormatLocalAddress(sk.PublicKey().Address()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) exportArgs := &ExportKeyArgs{ UserPass: api.UserPass{ Username: username, @@ -2286,30 +2164,23 @@ func TestImportExportKey(t *testing.T) { Address: addrStr, } exportReply := &ExportKeyReply{} - if err := s.ExportKey(nil, exportArgs, exportReply); err != nil { - t.Fatal(err) - } - - if !bytes.Equal(sk.Bytes(), exportReply.PrivateKey.Bytes()) { - t.Fatal("Unexpected key was found in ExportKeyReply") - } + require.NoError(s.ExportKey(nil, exportArgs, exportReply)) + require.Equal(sk.Bytes(), exportReply.PrivateKey.Bytes()) } func TestImportAVMKeyNoDuplicates(t *testing.T) { + require := require.New(t) + _, vm, s, _, _ := setup(t, true) ctx := vm.ctx defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() factory := secp256k1.Factory{} sk, err := factory.NewPrivateKey() - if err != nil { - t.Fatalf("problem generating private key: %s", err) - } + require.NoError(err) args := ImportKeyArgs{ UserPass: api.UserPass{ Username: username, @@ -2318,52 +2189,35 @@ func TestImportAVMKeyNoDuplicates(t *testing.T) { PrivateKey: sk, } reply := api.JSONAddress{} - if err := s.ImportKey(nil, &args, &reply); err != nil { - t.Fatal(err) - } + require.NoError(s.ImportKey(nil, &args, &reply)) expectedAddress, err := vm.FormatLocalAddress(sk.PublicKey().Address()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if reply.Address != expectedAddress { - t.Fatalf("Reply address: %s did not match expected address: %s", reply.Address, expectedAddress) - } + require.Equal(expectedAddress, reply.Address) reply2 := api.JSONAddress{} - if err := s.ImportKey(nil, &args, &reply2); err != nil { - t.Fatal(err) - } + require.NoError(s.ImportKey(nil, &args, &reply2)) - if reply2.Address != expectedAddress { - t.Fatalf("Reply address: %s did not match expected address: %s", reply2.Address, expectedAddress) - } + require.Equal(expectedAddress, reply2.Address) addrsArgs := api.UserPass{ Username: username, Password: password, } addrsReply := api.JSONAddresses{} - if err := s.ListAddresses(nil, &addrsArgs, &addrsReply); err != nil { - t.Fatal(err) - } + require.NoError(s.ListAddresses(nil, &addrsArgs, &addrsReply)) - if len(addrsReply.Addresses) != 1 { - t.Fatal("Importing the same key twice created duplicate addresses") - } - - if addrsReply.Addresses[0] != expectedAddress { - t.Fatal("List addresses returned an incorrect address") - } + require.Len(addrsReply.Addresses, 1) + require.Equal(expectedAddress, addrsReply.Addresses[0]) } func TestSend(t *testing.T) { + require := require.New(t) + _, vm, s, _, genesisTx := setupWithKeys(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -2371,13 +2225,9 @@ func TestSend(t *testing.T) { addr := keys[0].PublicKey().Address() addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) changeAddrStr, err := vm.FormatLocalAddress(testChangeAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, fromAddrsStr := sampleAddrs(t, vm, addrs) args := &SendArgs{ @@ -2397,30 +2247,21 @@ func TestSend(t *testing.T) { } reply := &api.JSONTxIDChangeAddr{} vm.timer.Cancel() - if err := s.Send(nil, args, reply); err != nil { - t.Fatalf("Failed to send transaction: %s", err) - } else if reply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address to be %s but got %s", changeAddrStr, reply.ChangeAddr) - } + require.NoError(s.Send(nil, args, reply)) + require.Equal(changeAddrStr, reply.ChangeAddr) pendingTxs := vm.txs - if len(pendingTxs) != 1 { - t.Fatalf("Expected to find 1 pending tx after send, but found %d", len(pendingTxs)) - } - - if reply.TxID != pendingTxs[0].ID() { - t.Fatal("Transaction ID returned by Send does not match the transaction found in vm's pending transactions") - } + require.Len(pendingTxs, 1) + require.Equal(pendingTxs[0].ID(), reply.TxID) } func TestSendMultiple(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + require := require.New(t) _, vm, s, _, genesisTx := setupWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -2428,13 +2269,9 @@ func TestSendMultiple(t *testing.T) { addr := keys[0].PublicKey().Address() addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) changeAddrStr, err := vm.FormatLocalAddress(testChangeAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, fromAddrsStr := sampleAddrs(t, vm, addrs) args := &SendMultipleArgs{ @@ -2461,34 +2298,24 @@ func TestSendMultiple(t *testing.T) { } reply := &api.JSONTxIDChangeAddr{} vm.timer.Cancel() - if err := s.SendMultiple(nil, args, reply); err != nil { - t.Fatalf("Failed to send transaction: %s", err) - } else if reply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address to be %s but got %s", changeAddrStr, reply.ChangeAddr) - } + require.NoError(s.SendMultiple(nil, args, reply)) + require.Equal(changeAddrStr, reply.ChangeAddr) pendingTxs := vm.txs - if len(pendingTxs) != 1 { - t.Fatalf("Expected to find 1 pending tx after send, but found %d", len(pendingTxs)) - } - - if reply.TxID != pendingTxs[0].ID() { - t.Fatal("Transaction ID returned by SendMultiple does not match the transaction found in vm's pending transactions") - } - - if _, err := vm.GetTx(context.Background(), reply.TxID); err != nil { - t.Fatalf("Failed to retrieve created transaction: %s", err) - } + require.Len(pendingTxs, 1) + require.Equal(pendingTxs[0].ID(), reply.TxID) + _, err = vm.GetTx(context.Background(), reply.TxID) + require.NoError(err) }) } } func TestCreateAndListAddresses(t *testing.T) { + require := require.New(t) + _, vm, s, _, _ := setup(t, true) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -2498,9 +2325,7 @@ func TestCreateAndListAddresses(t *testing.T) { } createReply := &api.JSONAddress{} - if err := s.CreateAddress(nil, createArgs, createReply); err != nil { - t.Fatalf("Failed to create address: %s", err) - } + require.NoError(s.CreateAddress(nil, createArgs, createReply)) newAddr := createReply.Address @@ -2510,26 +2335,18 @@ func TestCreateAndListAddresses(t *testing.T) { } listReply := &api.JSONAddresses{} - if err := s.ListAddresses(nil, listArgs, listReply); err != nil { - t.Fatalf("Failed to list addresses: %s", err) - } + require.NoError(s.ListAddresses(nil, listArgs, listReply)) - for _, addr := range listReply.Addresses { - if addr == newAddr { - return - } - } - t.Fatalf("Failed to find newly created address among %d addresses", len(listReply.Addresses)) + require.Contains(listReply.Addresses, newAddr) } func TestImport(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { + require := require.New(t) _, vm, s, m, genesisTx := setupWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() assetID := genesisTx.ID() @@ -2547,26 +2364,20 @@ func TestImport(t *testing.T) { }, } utxoBytes, err := vm.parser.Codec().Marshal(txs.CodecVersion, utxo) - if err != nil { - t.Fatal(err) - } + require.NoError(err) peerSharedMemory := m.NewSharedMemory(constants.PlatformChainID) utxoID := utxo.InputID() - if err := peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ Key: utxoID[:], Value: utxoBytes, Traits: [][]byte{ addr0.Bytes(), }, - }}}}); err != nil { - t.Fatal(err) - } + }}}})) addrStr, err := vm.FormatLocalAddress(keys[0].PublicKey().Address()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) args := &ImportArgs{ UserPass: api.UserPass{ Username: username, @@ -2576,15 +2387,12 @@ func TestImport(t *testing.T) { To: addrStr, } reply := &api.JSONTxID{} - if err := s.Import(nil, args, reply); err != nil { - t.Fatalf("Failed to import AVAX due to %s", err) - } + require.NoError(s.Import(nil, args, reply)) }) } } func TestServiceGetBlock(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -2658,7 +2466,7 @@ func TestServiceGetBlock(t *testing.T) { block.EXPECT().Bytes().Return(blockBytes) expected, err := formatting.Encode(formatting.Hex, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2682,7 +2490,7 @@ func TestServiceGetBlock(t *testing.T) { block.EXPECT().Bytes().Return(blockBytes) expected, err := formatting.Encode(formatting.HexC, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2706,7 +2514,7 @@ func TestServiceGetBlock(t *testing.T) { block.EXPECT().Bytes().Return(blockBytes) expected, err := formatting.Encode(formatting.HexNC, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2726,6 +2534,8 @@ func TestServiceGetBlock(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + service, expected := tt.serviceAndExpectedBlockFunc(ctrl) args := &api.GetBlockArgs{ @@ -2735,16 +2545,16 @@ func TestServiceGetBlock(t *testing.T) { reply := &api.GetBlockResponse{} err := service.GetBlock(nil, args, reply) require.ErrorIs(err, tt.expectedErr) - if err == nil { - require.Equal(tt.encoding, reply.Encoding) - require.Equal(expected, reply.Block) + if tt.expectedErr != nil { + return } + require.Equal(tt.encoding, reply.Encoding) + require.Equal(expected, reply.Block) }) } } func TestServiceGetBlockByHeight(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -2850,7 +2660,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) expected, err := formatting.Encode(formatting.Hex, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2878,7 +2688,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) expected, err := formatting.Encode(formatting.HexC, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2906,7 +2716,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) expected, err := formatting.Encode(formatting.HexNC, blockBytes) - require.NoError(err) + require.NoError(t, err) manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(block, nil) @@ -2927,6 +2737,8 @@ func TestServiceGetBlockByHeight(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + service, expected := tt.serviceAndExpectedBlockFunc(ctrl) args := &api.GetBlockByHeightArgs{ @@ -2945,7 +2757,6 @@ func TestServiceGetBlockByHeight(t *testing.T) { } func TestServiceGetHeight(t *testing.T) { - require := require.New(t) ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -3019,14 +2830,16 @@ func TestServiceGetHeight(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + require := require.New(t) service := tt.serviceFunc(ctrl) reply := &api.GetHeightResponse{} err := service.GetHeight(nil, nil, reply) require.ErrorIs(err, tt.expectedErr) - if err == nil { - require.Equal(json.Uint64(blockHeight), reply.Height) + if tt.expectedErr != nil { + return } + require.Equal(json.Uint64(blockHeight), reply.Height) }) } } diff --git a/vms/avm/state_test.go b/vms/avm/state_test.go index 32a3874f004..65ba2821262 100644 --- a/vms/avm/state_test.go +++ b/vms/avm/state_test.go @@ -22,6 +22,8 @@ import ( ) func TestSetsAndGets(t *testing.T) { + require := require.New(t) + _, _, vm, _ := GenesisVMWithArgs( t, []*common.Fx{{ @@ -37,9 +39,7 @@ func TestSetsAndGets(t *testing.T) { ) ctx := vm.ctx defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -74,9 +74,7 @@ func TestSetsAndGets(t *testing.T) { }, }}, }}} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}}); err != nil { - t.Fatal(err) - } + require.NoError(tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) txID := tx.ID() @@ -85,30 +83,20 @@ func TestSetsAndGets(t *testing.T) { state.AddStatus(txID, choices.Accepted) resultUTXO, err := state.GetUTXO(utxoID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) resultTx, err := state.GetTx(txID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) resultStatus, err := state.GetStatus(txID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if resultUTXO.OutputIndex != 1 { - t.Fatalf("Wrong UTXO returned") - } - if resultTx.ID() != tx.ID() { - t.Fatalf("Wrong Tx returned") - } - if resultStatus != choices.Accepted { - t.Fatalf("Wrong Status returned") - } + require.Equal(uint32(1), resultUTXO.OutputIndex) + require.Equal(tx.ID(), resultTx.ID()) + require.Equal(choices.Accepted, resultStatus) } func TestFundingNoAddresses(t *testing.T) { + require := require.New(t) + _, _, vm, _ := GenesisVMWithArgs( t, []*common.Fx{{ @@ -124,9 +112,7 @@ func TestFundingNoAddresses(t *testing.T) { ) ctx := vm.ctx defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -146,6 +132,8 @@ func TestFundingNoAddresses(t *testing.T) { } func TestFundingAddresses(t *testing.T) { + require := require.New(t) + _, _, vm, _ := GenesisVMWithArgs( t, []*common.Fx{{ @@ -161,9 +149,7 @@ func TestFundingAddresses(t *testing.T) { ) ctx := vm.ctx defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -181,17 +167,17 @@ func TestFundingAddresses(t *testing.T) { } state.AddUTXO(utxo) - require.NoError(t, state.Commit()) + require.NoError(state.Commit()) utxos, err := state.UTXOIDs([]byte{0}, ids.Empty, math.MaxInt32) - require.NoError(t, err) - require.Len(t, utxos, 1) - require.Equal(t, utxo.InputID(), utxos[0]) + require.NoError(err) + require.Len(utxos, 1) + require.Equal(utxo.InputID(), utxos[0]) state.DeleteUTXO(utxo.InputID()) - require.NoError(t, state.Commit()) + require.NoError(state.Commit()) utxos, err = state.UTXOIDs([]byte{0}, ids.Empty, math.MaxInt32) - require.NoError(t, err) - require.Empty(t, utxos) + require.NoError(err) + require.Empty(utxos) } diff --git a/vms/avm/states/state_test.go b/vms/avm/states/state_test.go index 149ad0927f6..2c79750bb12 100644 --- a/vms/avm/states/state_test.go +++ b/vms/avm/states/state_test.go @@ -95,18 +95,20 @@ func (v *versions) GetState(blkID ids.ID) (Chain, bool) { } func TestState(t *testing.T) { + require := require.New(t) + db := memdb.New() vdb := versiondb.New(db) s, err := New(vdb, parser, prometheus.NewRegistry()) - require.NoError(t, err) + require.NoError(err) s.AddUTXO(populatedUTXO) s.AddTx(populatedTx) s.AddBlock(populatedBlk) - require.NoError(t, s.Commit()) + require.NoError(s.Commit()) s, err = New(vdb, parser, prometheus.NewRegistry()) - require.NoError(t, err) + require.NoError(err) ChainUTXOTest(t, s) ChainTxTest(t, s) @@ -114,15 +116,17 @@ func TestState(t *testing.T) { } func TestDiff(t *testing.T) { + require := require.New(t) + db := memdb.New() vdb := versiondb.New(db) s, err := New(vdb, parser, prometheus.NewRegistry()) - require.NoError(t, err) + require.NoError(err) s.AddUTXO(populatedUTXO) s.AddTx(populatedTx) s.AddBlock(populatedBlk) - require.NoError(t, s.Commit()) + require.NoError(s.Commit()) parentID := ids.GenerateTestID() d, err := NewDiff(parentID, &versions{ @@ -130,7 +134,7 @@ func TestDiff(t *testing.T) { parentID: s, }, }) - require.NoError(t, err) + require.NoError(err) ChainUTXOTest(t, d) ChainTxTest(t, d) diff --git a/vms/avm/static_service_test.go b/vms/avm/static_service_test.go index ed2b7e43512..be68ca8ab38 100644 --- a/vms/avm/static_service_test.go +++ b/vms/avm/static_service_test.go @@ -6,6 +6,8 @@ package avm import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/formatting" @@ -23,17 +25,15 @@ var addrStrArray = []string{ var testHRP = constants.NetworkIDToHRP[constants.UnitTestID] func TestBuildGenesis(t *testing.T) { + require := require.New(t) + ss := CreateStaticService() addrMap := map[string]string{} for _, addrStr := range addrStrArray { addr, err := ids.ShortFromString(addrStr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) addrMap[addrStr], err = address.FormatBech32(testHRP, addr[:]) - if err != nil { - t.Fatal(err) - } + require.NoError(err) } args := BuildGenesisArgs{ Encoding: formatting.Hex, @@ -101,8 +101,5 @@ func TestBuildGenesis(t *testing.T) { }, } reply := BuildGenesisReply{} - err := ss.BuildGenesis(nil, &args, &reply) - if err != nil { - t.Fatal(err) - } + require.NoError(ss.BuildGenesis(nil, &args, &reply)) } diff --git a/vms/avm/txs/executor/syntactic_verifier_test.go b/vms/avm/txs/executor/syntactic_verifier_test.go index 37dd48bf47e..e201eafd295 100644 --- a/vms/avm/txs/executor/syntactic_verifier_test.go +++ b/vms/avm/txs/executor/syntactic_verifier_test.go @@ -408,15 +408,13 @@ func TestSyntacticVerifierBaseTx(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - tx := test.txFunc() verifier := &SyntacticVerifier{ Backend: backend, Tx: tx, } err := tx.Unsigned.Visit(verifier) - require.ErrorIs(err, test.err) + require.ErrorIs(t, err, test.err) }) } } @@ -1017,15 +1015,13 @@ func TestSyntacticVerifierCreateAssetTx(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - tx := test.txFunc() verifier := &SyntacticVerifier{ Backend: backend, Tx: tx, } err := tx.Unsigned.Visit(verifier) - require.ErrorIs(err, test.err) + require.ErrorIs(t, err, test.err) }) } } @@ -1506,15 +1502,13 @@ func TestSyntacticVerifierOperationTx(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - tx := test.txFunc() verifier := &SyntacticVerifier{ Backend: backend, Tx: tx, } err := tx.Unsigned.Visit(verifier) - require.ErrorIs(err, test.err) + require.ErrorIs(t, err, test.err) }) } } @@ -1906,15 +1900,13 @@ func TestSyntacticVerifierImportTx(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - tx := test.txFunc() verifier := &SyntacticVerifier{ Backend: backend, Tx: tx, } err := tx.Unsigned.Visit(verifier) - require.ErrorIs(err, test.err) + require.ErrorIs(t, err, test.err) }) } } @@ -2318,15 +2310,13 @@ func TestSyntacticVerifierExportTx(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - require := require.New(t) - tx := test.txFunc() verifier := &SyntacticVerifier{ Backend: backend, Tx: tx, } err := tx.Unsigned.Visit(verifier) - require.ErrorIs(err, test.err) + require.ErrorIs(t, err, test.err) }) } } diff --git a/vms/avm/txs/initial_state_test.go b/vms/avm/txs/initial_state_test.go index 1d294870d3a..48d17affa0c 100644 --- a/vms/avm/txs/initial_state_test.go +++ b/vms/avm/txs/initial_state_test.go @@ -4,7 +4,6 @@ package txs import ( - "bytes" "errors" "testing" @@ -21,14 +20,12 @@ import ( var errTest = errors.New("non-nil error") func TestInitialStateVerifySerialization(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() - if err := c.RegisterType(&secp256k1fx.TransferOutput{}); err != nil { - t.Fatal(err) - } + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) expected := []byte{ // Codec version: @@ -75,93 +72,78 @@ func TestInitialStateVerifySerialization(t *testing.T) { } isBytes, err := m.Marshal(CodecVersion, is) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(isBytes, expected) { - t.Fatalf("Expected:\n0x%x\nResult:\n0x%x", - expected, - isBytes, - ) - } + require.NoError(err) + require.Equal(expected, isBytes) } func TestInitialStateVerifyNil(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 is := (*InitialState)(nil) - if err := is.Verify(m, numFxs); err == nil { - t.Fatalf("Should have erred due to nil initial state") - } + err := is.Verify(m, numFxs) + require.ErrorIs(err, ErrNilInitialState) } func TestInitialStateVerifyUnknownFxID(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 is := InitialState{ FxIndex: 1, } - if err := is.Verify(m, numFxs); err == nil { - t.Fatalf("Should have erred due to unknown FxIndex") - } + err := is.Verify(m, numFxs) + require.ErrorIs(err, ErrUnknownFx) } func TestInitialStateVerifyNilOutput(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 is := InitialState{ FxIndex: 0, Outs: []verify.State{nil}, } - if err := is.Verify(m, numFxs); err == nil { - t.Fatalf("Should have erred due to a nil output") - } + err := is.Verify(m, numFxs) + require.ErrorIs(err, ErrNilFxOutput) } func TestInitialStateVerifyInvalidOutput(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() - if err := c.RegisterType(&avax.TestVerifiable{}); err != nil { - t.Fatal(err) - } + require.NoError(c.RegisterType(&avax.TestVerifiable{})) m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 is := InitialState{ FxIndex: 0, Outs: []verify.State{&avax.TestVerifiable{Err: errTest}}, } - if err := is.Verify(m, numFxs); err == nil { - t.Fatalf("Should have erred due to an invalid output") - } + err := is.Verify(m, numFxs) + require.ErrorIs(err, errTest) } func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() - if err := c.RegisterType(&avax.TestTransferable{}); err != nil { - t.Fatal(err) - } + require.NoError(c.RegisterType(&avax.TestTransferable{})) m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) numFxs := 1 is := InitialState{ @@ -171,15 +153,10 @@ func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { &avax.TestTransferable{Val: 0}, }, } - if err := is.Verify(m, numFxs); err == nil { - t.Fatalf("Should have erred due to unsorted outputs") - } - + err := is.Verify(m, numFxs) + require.ErrorIs(err, ErrOutputsNotSorted) is.Sort(m) - - if err := is.Verify(m, numFxs); err != nil { - t.Fatal(err) - } + require.NoError(is.Verify(m, numFxs)) } func TestInitialStateLess(t *testing.T) { diff --git a/vms/avm/txs/mempool/mempool_test.go b/vms/avm/txs/mempool/mempool_test.go index 5e69304fe4a..4e1396ac3d4 100644 --- a/vms/avm/txs/mempool/mempool_test.go +++ b/vms/avm/txs/mempool/mempool_test.go @@ -66,7 +66,7 @@ func TestTxsInMempool(t *testing.T) { mempool.RequestBuildBlock() select { case <-toEngine: - t.Fatalf("should not have sent message to engine") + require.FailNow("should not have sent message to engine") default: } @@ -102,7 +102,7 @@ func TestTxsInMempool(t *testing.T) { select { case <-toEngine: default: - t.Fatalf("should have sent message to engine") + require.FailNow("should have sent message to engine") } mempool.Remove(testTxs) @@ -110,7 +110,7 @@ func TestTxsInMempool(t *testing.T) { mempool.RequestBuildBlock() select { case <-toEngine: - t.Fatalf("should not have sent message to engine") + require.FailNow("should not have sent message to engine") default: } } diff --git a/vms/avm/txs/operation_test.go b/vms/avm/txs/operation_test.go index 55a0626892e..f0dc3ec3c74 100644 --- a/vms/avm/txs/operation_test.go +++ b/vms/avm/txs/operation_test.go @@ -6,6 +6,8 @@ package txs import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" @@ -28,18 +30,16 @@ func (o *testOperable) Outs() []verify.State { func TestOperationVerifyNil(t *testing.T) { op := (*Operation)(nil) - if err := op.Verify(); err == nil { - t.Fatalf("Should have erred due to nil operation") - } + err := op.Verify() + require.ErrorIs(t, err, ErrNilOperation) } func TestOperationVerifyEmpty(t *testing.T) { op := &Operation{ Asset: avax.Asset{ID: ids.Empty}, } - if err := op.Verify(); err == nil { - t.Fatalf("Should have erred due to empty operation") - } + err := op.Verify() + require.ErrorIs(t, err, ErrNilFxOperation) } func TestOperationVerifyUTXOIDsNotSorted(t *testing.T) { @@ -57,9 +57,8 @@ func TestOperationVerifyUTXOIDsNotSorted(t *testing.T) { }, Op: &testOperable{}, } - if err := op.Verify(); err == nil { - t.Fatalf("Should have erred due to unsorted utxoIDs") - } + err := op.Verify() + require.ErrorIs(t, err, ErrNotSortedAndUniqueUTXOIDs) } func TestOperationVerify(t *testing.T) { @@ -74,21 +73,17 @@ func TestOperationVerify(t *testing.T) { }, Op: &testOperable{}, } - if err := op.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(t, op.Verify()) } func TestOperationSorting(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() - if err := c.RegisterType(&testOperable{}); err != nil { - t.Fatal(err) - } + require.NoError(c.RegisterType(&testOperable{})) m := codec.NewDefaultManager() - if err := m.RegisterCodec(CodecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(m.RegisterCodec(CodecVersion, c)) ops := []*Operation{ { @@ -112,13 +107,9 @@ func TestOperationSorting(t *testing.T) { Op: &testOperable{}, }, } - if IsSortedAndUniqueOperations(ops, m) { - t.Fatalf("Shouldn't be sorted") - } + require.False(IsSortedAndUniqueOperations(ops, m)) SortOperations(ops, m) - if !IsSortedAndUniqueOperations(ops, m) { - t.Fatalf("Should be sorted") - } + require.True(IsSortedAndUniqueOperations(ops, m)) ops = append(ops, &Operation{ Asset: avax.Asset{ID: ids.Empty}, UTXOIDs: []*avax.UTXOID{ @@ -129,14 +120,11 @@ func TestOperationSorting(t *testing.T) { }, Op: &testOperable{}, }) - if IsSortedAndUniqueOperations(ops, m) { - t.Fatalf("Shouldn't be unique") - } + require.False(IsSortedAndUniqueOperations(ops, m)) } func TestOperationTxNotState(t *testing.T) { intf := interface{}(&OperationTx{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/vms/avm/vm_benchmark_test.go b/vms/avm/vm_benchmark_test.go index 09dfff611d9..0bb6fae1369 100644 --- a/vms/avm/vm_benchmark_test.go +++ b/vms/avm/vm_benchmark_test.go @@ -20,25 +20,21 @@ import ( func BenchmarkLoadUser(b *testing.B) { runLoadUserBenchmark := func(b *testing.B, numKeys int) { + require := require.New(b) + // This will segfault instead of failing gracefully if there's an error _, _, vm, _ := GenesisVM(nil) ctx := vm.ctx defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - b.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() user, err := keystore.NewUserFromKeystore(vm.ctx.Keystore, username, password) - if err != nil { - b.Fatalf("Failed to get user keystore db: %s", err) - } + require.NoError(err) keys, err := keystore.NewKeys(user, numKeys) - if err != nil { - b.Fatalf("problem generating private key: %s", err) - } + require.NoError(err) b.ResetTimer() @@ -47,16 +43,13 @@ func BenchmarkLoadUser(b *testing.B) { addrIndex := n % numKeys fromAddrs.Clear() fromAddrs.Add(keys[addrIndex].PublicKey().Address()) - if _, _, err := vm.LoadUser(username, password, fromAddrs); err != nil { - b.Fatalf("Failed to load user: %s", err) - } + _, _, err := vm.LoadUser(username, password, fromAddrs) + require.NoError(err) } b.StopTimer() - if err := user.Close(); err != nil { - b.Fatal(err) - } + require.NoError(user.Close()) } benchmarkSize := []int{10, 100, 1000, 10000} @@ -69,12 +62,12 @@ func BenchmarkLoadUser(b *testing.B) { // GetAllUTXOsBenchmark is a helper func to benchmark the GetAllUTXOs depending on the size func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { + require := require.New(b) + _, _, vm, _ := GenesisVM(b) ctx := vm.ctx defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - b.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -100,7 +93,7 @@ func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { vm.state.AddUTXO(utxo) } - require.NoError(b, vm.state.Commit()) + require.NoError(vm.state.Commit()) addrsSet := set.Set[ids.ShortID]{} addrsSet.Add(addr) @@ -110,8 +103,8 @@ func GetAllUTXOsBenchmark(b *testing.B, utxoCount int) { for i := 0; i < b.N; i++ { // Fetch all UTXOs older version notPaginatedUTXOs, err := avax.GetAllUTXOs(vm.state, addrsSet) - require.NoError(b, err) - require.Len(b, notPaginatedUTXOs, utxoCount) + require.NoError(err) + require.Len(notPaginatedUTXOs, utxoCount) } } diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index 177bb1e7b7e..b431c82dcd0 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -4,7 +4,6 @@ package avm import ( - "bytes" "context" "errors" "math" @@ -18,6 +17,7 @@ import ( "github.com/ava-labs/avalanchego/api/keystore" "github.com/ava-labs/avalanchego/chains/atomic" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/manager" "github.com/ava-labs/avalanchego/database/memdb" @@ -33,7 +33,6 @@ import ( "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/formatting/address" "github.com/ava-labs/avalanchego/utils/json" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/avm/config" "github.com/ava-labs/avalanchego/vms/avm/fxs" @@ -80,6 +79,8 @@ func init() { } func NewContext(tb testing.TB) *snow.Context { + require := require.New(tb) + genesisBytes := BuildGenesisTest(tb) tx := GetAVAXTxFromGenesisTest(genesisBytes, tb) @@ -92,16 +93,10 @@ func NewContext(tb testing.TB) *snow.Context { ctx.CChainID = ids.Empty.Prefix(1) aliaser := ctx.BCLookup.(ids.Aliaser) - errs := wrappers.Errs{} - errs.Add( - aliaser.Alias(chainID, "X"), - aliaser.Alias(chainID, chainID.String()), - aliaser.Alias(constants.PlatformChainID, "P"), - aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String()), - ) - if errs.Errored() { - tb.Fatal(errs.Err) - } + require.NoError(aliaser.Alias(chainID, "X")) + require.NoError(aliaser.Alias(chainID, chainID.String())) + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) + require.NoError(aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String())) ctx.ValidatorState = &validators.TestState{ GetSubnetIDF: func(_ context.Context, chainID ids.ID) (ids.ID, error) { @@ -123,22 +118,18 @@ func NewContext(tb testing.TB) *snow.Context { // 1. tx in genesis that creates asset // 2. the index of the output func GetCreateTxFromGenesisTest(tb testing.TB, genesisBytes []byte, assetName string) *txs.Tx { + require := require.New(tb) parser, err := txs.NewParser([]fxs.Fx{ &secp256k1fx.Fx{}, }) - if err != nil { - tb.Fatal(err) - } + require.NoError(err) cm := parser.GenesisCodec() genesis := Genesis{} - if _, err := cm.Unmarshal(genesisBytes, &genesis); err != nil { - tb.Fatal(err) - } + _, err = cm.Unmarshal(genesisBytes, &genesis) + require.NoError(err) - if len(genesis.Txs) == 0 { - tb.Fatal("genesis tx didn't have any txs") - } + require.NotEmpty(genesis.Txs) var assetTx *GenesisAsset for _, tx := range genesis.Txs { @@ -147,17 +138,12 @@ func GetCreateTxFromGenesisTest(tb testing.TB, genesisBytes []byte, assetName st break } } - if assetTx == nil { - tb.Fatal("there is no create tx") - return nil - } + require.NotNil(assetTx) tx := &txs.Tx{ Unsigned: &assetTx.CreateAssetTx, } - if err := parser.InitializeGenesisTx(tx); err != nil { - tb.Fatal(err) - } + require.NoError(parser.InitializeGenesisTx(tx)) return tx } @@ -253,19 +239,15 @@ func BuildGenesisTest(tb testing.TB) []byte { // BuildGenesisTestWithArgs allows building the genesis while injecting different starting points (args) func BuildGenesisTestWithArgs(tb testing.TB, args *BuildGenesisArgs) []byte { + require := require.New(tb) + ss := CreateStaticService() reply := BuildGenesisReply{} - err := ss.BuildGenesis(nil, args, &reply) - if err != nil { - tb.Fatal(err) - } + require.NoError(ss.BuildGenesis(nil, args, &reply)) b, err := formatting.Decode(reply.Encoding, reply.Bytes) - if err != nil { - tb.Fatal(err) - } - + require.NoError(err) return b } @@ -274,6 +256,8 @@ func GenesisVM(tb testing.TB) ([]byte, chan common.Message, *VM, *atomic.Memory) } func GenesisVMWithArgs(tb testing.TB, additionalFxs []*common.Fx, args *BuildGenesisArgs) ([]byte, chan common.Message, *VM, *atomic.Memory) { + require := require.New(tb) + var genesisBytes []byte if args != nil { @@ -294,12 +278,8 @@ func GenesisVMWithArgs(tb testing.TB, additionalFxs []*common.Fx, args *BuildGen ctx.Lock.Lock() userKeystore, err := keystore.CreateTestKeystore() - if err != nil { - tb.Fatal(err) - } - if err := userKeystore.CreateUser(username, password); err != nil { - tb.Fatal(err) - } + require.NoError(err) + require.NoError(userKeystore.CreateUser(username, password)) ctx.Keystore = userKeystore.NewBlockchainKeyStore(ctx.ChainID) issuer := make(chan common.Message, 1) @@ -308,10 +288,8 @@ func GenesisVMWithArgs(tb testing.TB, additionalFxs []*common.Fx, args *BuildGen CreateAssetTxFee: testTxFee, }} configBytes, err := stdjson.Marshal(Config{IndexTransactions: true}) - if err != nil { - tb.Fatal("should not have caused error in creating avm config bytes") - } - err = vm.Initialize( + require.NoError(err) + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), @@ -333,19 +311,11 @@ func GenesisVMWithArgs(tb testing.TB, additionalFxs []*common.Fx, args *BuildGen additionalFxs..., ), nil, - ) - if err != nil { - tb.Fatal(err) - } + )) vm.batchTimeout = 0 - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - tb.Fatal(err) - } - - if err := vm.SetState(context.Background(), snow.NormalOp); err != nil { - tb.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) return genesisBytes, issuer, vm, m } @@ -355,6 +325,8 @@ func NewTx(t *testing.T, genesisBytes []byte, vm *VM) *txs.Tx { } func NewTxWithAsset(t *testing.T, genesisBytes []byte, vm *VM, assetName string) *txs.Tx { + require := require.New(t) + createTx := GetCreateTxFromGenesisTest(t, genesisBytes, assetName) newTx := &txs.Tx{Unsigned: &txs.BaseTx{ @@ -378,13 +350,13 @@ func NewTxWithAsset(t *testing.T, genesisBytes []byte, vm *VM, assetName string) }}, }, }} - if err := newTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}}); err != nil { - t.Fatal(err) - } + require.NoError(newTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) return newTx } func setupIssueTx(t testing.TB) (chan common.Message, *VM, *snow.Context, []*txs.Tx) { + require := require.New(t) + genesisBytes, issuer, vm, _ := GenesisVM(t) ctx := vm.ctx @@ -421,9 +393,7 @@ func setupIssueTx(t testing.TB) (chan common.Message, *VM, *snow.Context, []*txs }}, }, }} - if err := firstTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } + require.NoError(firstTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) secondTx := &txs.Tx{Unsigned: &txs.BaseTx{ BaseTx: avax.BaseTx{ @@ -456,20 +426,18 @@ func setupIssueTx(t testing.TB) (chan common.Message, *VM, *snow.Context, []*txs }}, }, }} - if err := secondTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } + require.NoError(secondTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) return issuer, vm, ctx, []*txs.Tx{avaxTx, firstTx, secondTx} } func TestInvalidGenesis(t *testing.T) { + require := require.New(t) + vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -484,19 +452,17 @@ func TestInvalidGenesis(t *testing.T) { nil, // fxs nil, // AppSender ) - if err == nil { - t.Fatalf("Should have erred due to an invalid genesis") - } + require.ErrorIs(err, codec.ErrCantUnpackVersion) } func TestInvalidFx(t *testing.T) { + require := require.New(t) + vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -514,19 +480,17 @@ func TestInvalidFx(t *testing.T) { }, nil, ) - if err == nil { - t.Fatalf("Should have erred due to an invalid interface") - } + require.ErrorIs(err, errIncompatibleFx) } func TestFxInitializationFailure(t *testing.T) { + require := require.New(t) + vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -549,94 +513,74 @@ func TestFxInitializationFailure(t *testing.T) { }}, nil, ) - if err == nil { - t.Fatalf("Should have erred due to an invalid fx initialization") - } + require.ErrorIs(err, errUnknownFx) } func TestIssueTx(t *testing.T) { + require := require.New(t) + genesisBytes, issuer, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() newTx := NewTx(t, genesisBytes, vm) txID, err := vm.IssueTx(newTx.Bytes()) - if err != nil { - t.Fatal(err) - } - if txID != newTx.ID() { - t.Fatalf("Issue Tx returned wrong TxID") - } + require.NoError(err) + require.Equal(newTx.ID(), txID) ctx.Lock.Unlock() - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, <-issuer) ctx.Lock.Lock() - txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + require.Len(vm.PendingTxs(context.Background()), 1) } // Test issuing a transaction that consumes a currently pending UTXO. The // transaction should be issued successfully. func TestIssueDependentTx(t *testing.T) { + require := require.New(t) + issuer, vm, ctx, txs := setupIssueTx(t) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() firstTx := txs[1] secondTx := txs[2] - if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { - t.Fatal(err) - } + _, err := vm.IssueTx(firstTx.Bytes()) + require.NoError(err) - if _, err := vm.IssueTx(secondTx.Bytes()); err != nil { - t.Fatal(err) - } + _, err = vm.IssueTx(secondTx.Bytes()) + require.NoError(err) ctx.Lock.Unlock() - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, <-issuer) ctx.Lock.Lock() - pendingTxs := vm.PendingTxs(context.Background()) - if len(pendingTxs) != 2 { - t.Fatalf("Should have returned %d tx(s)", 2) - } + require.Len(vm.PendingTxs(context.Background()), 2) } // Test issuing a transaction that creates an NFT family func TestIssueNFT(t *testing.T) { + require := require.New(t) + vm := &VM{} ctx := NewContext(t) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, manager.NewMemDB(version.Semantic1_0_0), @@ -655,21 +599,12 @@ func TestIssueNFT(t *testing.T) { }, }, nil, - ) - if err != nil { - t.Fatal(err) - } + )) vm.batchTimeout = 0 - err = vm.SetState(context.Background(), snow.Bootstrapping) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) createAssetTx := &txs.Tx{Unsigned: &txs.CreateAssetTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -699,13 +634,10 @@ func TestIssueNFT(t *testing.T) { }, }}, }} - if err := vm.parser.InitializeTx(createAssetTx); err != nil { - t.Fatal(err) - } + require.NoError(vm.parser.InitializeTx(createAssetTx)) - if _, err := vm.IssueTx(createAssetTx.Bytes()); err != nil { - t.Fatal(err) - } + _, err := vm.IssueTx(createAssetTx.Bytes()) + require.NoError(err) mintNFTTx := &txs.Tx{Unsigned: &txs.OperationTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -728,13 +660,10 @@ func TestIssueNFT(t *testing.T) { }, }}, }} - if err := mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}}); err != nil { - t.Fatal(err) - } + require.NoError(mintNFTTx.SignNFTFx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}})) - if _, err := vm.IssueTx(mintNFTTx.Bytes()); err != nil { - t.Fatal(err) - } + _, err = vm.IssueTx(mintNFTTx.Bytes()) + require.NoError(err) transferNFTTx := &txs.Tx{ Unsigned: &txs.OperationTx{ @@ -762,13 +691,10 @@ func TestIssueNFT(t *testing.T) { {Verifiable: &nftfx.Credential{}}, }, } - if err := vm.parser.InitializeTx(transferNFTTx); err != nil { - t.Fatal(err) - } + require.NoError(vm.parser.InitializeTx(transferNFTTx)) - if _, err := vm.IssueTx(transferNFTTx.Bytes()); err != nil { - t.Fatal(err) - } + _, err = vm.IssueTx(transferNFTTx.Bytes()) + require.NoError(err) } // Test issuing a transaction that creates an Property family @@ -896,6 +822,8 @@ func TestIssueProperty(t *testing.T) { } func setupTxFeeAssets(t *testing.T) ([]byte, chan common.Message, *VM, *atomic.Memory) { + require := require.New(t) + addr0Str, _ := address.FormatBech32(testHRP, addrs[0].Bytes()) addr1Str, _ := address.FormatBech32(testHRP, addrs[1].Bytes()) addr2Str, _ := address.FormatBech32(testHRP, addrs[2].Bytes()) @@ -947,41 +875,45 @@ func setupTxFeeAssets(t *testing.T) ([]byte, chan common.Message, *VM, *atomic.M } genesisBytes, issuer, vm, m := GenesisVMWithArgs(t, nil, customArgs) expectedID, err := vm.Aliaser.Lookup(assetAlias) - require.NoError(t, err) - require.Equal(t, expectedID, vm.feeAssetID) + require.NoError(err) + require.Equal(expectedID, vm.feeAssetID) return genesisBytes, issuer, vm, m } func TestIssueTxWithFeeAsset(t *testing.T) { + require := require.New(t) + genesisBytes, issuer, vm, _ := setupTxFeeAssets(t) ctx := vm.ctx defer func() { - require.NoError(t, vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() // send first asset newTx := NewTxWithAsset(t, genesisBytes, vm, feeAssetName) txID, err := vm.IssueTx(newTx.Bytes()) - require.NoError(t, err) - require.Equal(t, txID, newTx.ID()) + require.NoError(err) + require.Equal(txID, newTx.ID()) ctx.Lock.Unlock() msg := <-issuer - require.Equal(t, msg, common.PendingTxs) + require.Equal(msg, common.PendingTxs) ctx.Lock.Lock() txs := vm.PendingTxs(context.Background()) - require.Len(t, txs, 1) + require.Len(txs, 1) t.Log(txs) } func TestIssueTxWithAnotherAsset(t *testing.T) { + require := require.New(t) + genesisBytes, issuer, vm, _ := setupTxFeeAssets(t) ctx := vm.ctx defer func() { - require.NoError(t, vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -1029,30 +961,26 @@ func TestIssueTxWithAnotherAsset(t *testing.T) { }, }, }} - if err := newTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}, {keys[0]}}); err != nil { - t.Fatal(err) - } + require.NoError(newTx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0]}, {keys[0]}})) txID, err := vm.IssueTx(newTx.Bytes()) - require.NoError(t, err) - require.Equal(t, txID, newTx.ID()) + require.NoError(err) + require.Equal(txID, newTx.ID()) ctx.Lock.Unlock() msg := <-issuer - require.Equal(t, msg, common.PendingTxs) + require.Equal(msg, common.PendingTxs) ctx.Lock.Lock() txs := vm.PendingTxs(context.Background()) - require.Len(t, txs, 1) + require.Len(txs, 1) } func TestVMFormat(t *testing.T) { _, _, vm, _ := GenesisVM(t) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(t, vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -1064,24 +992,21 @@ func TestVMFormat(t *testing.T) { } for _, test := range tests { t.Run(test.in.String(), func(t *testing.T) { + require := require.New(t) addrStr, err := vm.FormatLocalAddress(test.in) - if err != nil { - t.Error(err) - } - if test.expected != addrStr { - t.Errorf("Expected %q, got %q", test.expected, addrStr) - } + require.NoError(err) + require.Equal(test.expected, addrStr) }) } } func TestTxCached(t *testing.T) { + require := require.New(t) + genesisBytes, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -1089,33 +1014,33 @@ func TestTxCached(t *testing.T) { txBytes := newTx.Bytes() _, err := vm.ParseTx(context.Background(), txBytes) - require.NoError(t, err) + require.NoError(err) registerer := prometheus.NewRegistry() vm.metrics, err = metrics.New("", registerer) - require.NoError(t, err) + require.NoError(err) db := memdb.New() vdb := versiondb.New(db) vm.state, err = states.New(vdb, vm.parser, registerer) - require.NoError(t, err) + require.NoError(err) _, err = vm.ParseTx(context.Background(), txBytes) - require.NoError(t, err) + require.NoError(err) count, err := database.Count(vdb) - require.NoError(t, err) - require.Zero(t, count) + require.NoError(err) + require.Zero(count) } func TestTxNotCached(t *testing.T) { + require := require.New(t) + genesisBytes, _, vm, _ := GenesisVM(t) ctx := vm.ctx defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() @@ -1123,110 +1048,87 @@ func TestTxNotCached(t *testing.T) { txBytes := newTx.Bytes() _, err := vm.ParseTx(context.Background(), txBytes) - require.NoError(t, err) + require.NoError(err) registerer := prometheus.NewRegistry() - require.NoError(t, err) + require.NoError(err) vm.metrics, err = metrics.New("", registerer) - require.NoError(t, err) + require.NoError(err) db := memdb.New() vdb := versiondb.New(db) vm.state, err = states.New(vdb, vm.parser, registerer) - require.NoError(t, err) + require.NoError(err) vm.uniqueTxs.Flush() _, err = vm.ParseTx(context.Background(), txBytes) - require.NoError(t, err) + require.NoError(err) count, err := database.Count(vdb) - require.NoError(t, err) - require.NotZero(t, count) + require.NoError(err) + require.NotZero(count) } func TestTxVerifyAfterIssueTx(t *testing.T) { + require := require.New(t) + issuer, vm, ctx, issueTxs := setupIssueTx(t) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() firstTx := issueTxs[1] secondTx := issueTxs[2] parsedSecondTx, err := vm.ParseTx(context.Background(), secondTx.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(parsedSecondTx.Verify(context.Background())) + _, err = vm.IssueTx(firstTx.Bytes()) + require.NoError(err) + require.NoError(parsedSecondTx.Accept(context.Background())) ctx.Lock.Unlock() - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, <-issuer) ctx.Lock.Lock() txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } - parsedFirstTx := txs[0] + require.Len(txs, 1) - if err := parsedFirstTx.Verify(context.Background()); err == nil { - t.Fatalf("Should have erred due to a missing UTXO") - } + parsedFirstTx := txs[0] + err = parsedFirstTx.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) } func TestTxVerifyAfterGet(t *testing.T) { + require := require.New(t) + _, vm, ctx, issueTxs := setupIssueTx(t) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() firstTx := issueTxs[1] secondTx := issueTxs[2] parsedSecondTx, err := vm.ParseTx(context.Background(), secondTx.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(parsedSecondTx.Verify(context.Background())) + _, err = vm.IssueTx(firstTx.Bytes()) + require.NoError(err) parsedFirstTx, err := vm.GetTx(context.Background(), firstTx.ID()) - if err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } - if err := parsedFirstTx.Verify(context.Background()); err == nil { - t.Fatalf("Should have erred due to a missing UTXO") - } + require.NoError(err) + require.NoError(parsedSecondTx.Accept(context.Background())) + err = parsedFirstTx.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) } func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { + require := require.New(t) + _, vm, ctx, issueTxs := setupIssueTx(t) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() avaxTx := issueTxs[0] @@ -1262,36 +1164,25 @@ func TestTxVerifyAfterVerifyAncestorTx(t *testing.T) { }, }}, }}} - if err := firstTxDescendant.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } + require.NoError(firstTxDescendant.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) parsedSecondTx, err := vm.ParseTx(context.Background(), secondTx.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if _, err := vm.IssueTx(firstTx.Bytes()); err != nil { - t.Fatal(err) - } - if _, err := vm.IssueTx(firstTxDescendant.Bytes()); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(parsedSecondTx.Verify(context.Background())) + _, err = vm.IssueTx(firstTx.Bytes()) + require.NoError(err) + _, err = vm.IssueTx(firstTxDescendant.Bytes()) + require.NoError(err) parsedFirstTx, err := vm.GetTx(context.Background(), firstTx.ID()) - if err != nil { - t.Fatal(err) - } - if err := parsedSecondTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } - if err := parsedFirstTx.Verify(context.Background()); err == nil { - t.Fatalf("Should have erred due to a missing UTXO") - } + require.NoError(err) + require.NoError(parsedSecondTx.Accept(context.Background())) + err = parsedFirstTx.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) } func TestImportTxSerialization(t *testing.T) { + require := require.New(t) + _, vm, _, _ := setupIssueTx(t) expected := []byte{ // Codec version @@ -1382,14 +1273,9 @@ func TestImportTxSerialization(t *testing.T) { }}, }} - if err := vm.parser.InitializeTx(tx); err != nil { - t.Fatal(err) - } - require.Equal(t, tx.ID().String(), "9wdPb5rsThXYLX4WxkNeyYrNMfDE5cuWLgifSjxKiA2dCmgCZ") - result := tx.Bytes() - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } + require.NoError(vm.parser.InitializeTx(tx)) + require.Equal(tx.ID().String(), "9wdPb5rsThXYLX4WxkNeyYrNMfDE5cuWLgifSjxKiA2dCmgCZ") + require.Equal(expected, tx.Bytes()) credBytes := []byte{ // type id @@ -1440,22 +1326,19 @@ func TestImportTxSerialization(t *testing.T) { 0x1f, 0x49, 0x9b, 0x0a, 0x4f, 0xbf, 0x95, 0xfc, 0x31, 0x39, 0x46, 0x4e, 0xa1, 0xaf, 0x00, } - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0], keys[0]}, {keys[0], keys[0]}}); err != nil { - t.Fatal(err) - } - require.Equal(t, tx.ID().String(), "pCW7sVBytzdZ1WrqzGY1DvA2S9UaMr72xpUMxVyx1QHBARNYx") - result = tx.Bytes() + require.NoError(tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{keys[0], keys[0]}, {keys[0], keys[0]}})) + require.Equal(tx.ID().String(), "pCW7sVBytzdZ1WrqzGY1DvA2S9UaMr72xpUMxVyx1QHBARNYx") // there are two credentials expected[len(expected)-1] = 0x02 expected = append(expected, credBytes...) - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) - } + require.Equal(expected, tx.Bytes()) } // Test issuing an import transaction. func TestIssueImportTx(t *testing.T) { + require := require.New(t) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) @@ -1479,9 +1362,9 @@ func TestIssueImportTx(t *testing.T) { } avmConfigBytes, err := stdjson.Marshal(avmConfig) - require.NoError(t, err) + require.NoError(err) vm := &VM{} - err = vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), @@ -1494,20 +1377,12 @@ func TestIssueImportTx(t *testing.T) { Fx: &secp256k1fx.Fx{}, }}, nil, - ) - if err != nil { - t.Fatal(err) - } + )) vm.batchTimeout = 0 - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) key := keys[0] @@ -1548,13 +1423,10 @@ func TestIssueImportTx(t *testing.T) { }, }}, }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } + require.NoError(tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - if _, err := vm.IssueTx(tx.Bytes()); err == nil { - t.Fatal(err) - } + _, err = vm.IssueTx(tx.Bytes()) + require.ErrorIs(err, database.ErrNotFound) // Provide the platform UTXO: @@ -1571,65 +1443,49 @@ func TestIssueImportTx(t *testing.T) { } utxoBytes, err := vm.parser.Codec().Marshal(txs.CodecVersion, utxo) - if err != nil { - t.Fatal(err) - } + require.NoError(err) inputID := utxo.InputID() - if err := peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {PutRequests: []*atomic.Element{{ Key: inputID[:], Value: utxoBytes, Traits: [][]byte{ key.PublicKey().Address().Bytes(), }, - }}}}); err != nil { - t.Fatal(err) - } + }}}})) - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatalf("should have issued the transaction correctly but erred: %s", err) - } + _, err = vm.IssueTx(tx.Bytes()) + require.NoError(err) ctx.Lock.Unlock() - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, <-issuer) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + require.Len(txs, 1) parsedTx := txs[0] - if err := parsedTx.Verify(context.Background()); err != nil { - t.Fatal("Failed verify", err) - } - - if err := parsedTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedTx.Verify(context.Background())) + require.NoError(parsedTx.Accept(context.Background())) assertIndexedTX(t, vm.db, 0, key.PublicKey().Address(), txAssetID.AssetID(), parsedTx.ID()) assertLatestIdx(t, vm.db, key.PublicKey().Address(), avaxID, 1) id := utxoID.InputID() - if _, err := vm.ctx.SharedMemory.Get(platformID, [][]byte{id[:]}); err == nil { - t.Fatalf("shouldn't have been able to read the utxo") - } + _, err = vm.ctx.SharedMemory.Get(platformID, [][]byte{id[:]}) + require.ErrorIs(err, database.ErrNotFound) } // Test force accepting an import transaction. func TestForceAcceptImportTx(t *testing.T) { + require := require.New(t) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) @@ -1645,12 +1501,10 @@ func TestForceAcceptImportTx(t *testing.T) { vm := &VM{} ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), @@ -1663,20 +1517,12 @@ func TestForceAcceptImportTx(t *testing.T) { Fx: &secp256k1fx.Fx{}, }}, nil, - ) - if err != nil { - t.Fatal(err) - } + )) vm.batchTimeout = 0 - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) key := keys[0] @@ -1707,34 +1553,27 @@ func TestForceAcceptImportTx(t *testing.T) { }}, }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } + require.NoError(tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) parsedTx, err := vm.ParseTx(context.Background(), tx.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := parsedTx.Verify(context.Background()); err == nil { - t.Fatalf("Should have failed verification") - } + err = parsedTx.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) - if err := parsedTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedTx.Accept(context.Background())) id := utxoID.InputID() - if _, err := vm.ctx.SharedMemory.Get(platformID, [][]byte{id[:]}); err == nil { - t.Fatalf("shouldn't have been able to read the utxo") - } + _, err = vm.ctx.SharedMemory.Get(platformID, [][]byte{id[:]}) + require.ErrorIs(err, database.ErrNotFound) } func TestImportTxNotState(t *testing.T) { + require := require.New(t) + intf := interface{}(&txs.ImportTx{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(ok) } // Test issuing an import transaction. @@ -1841,6 +1680,8 @@ func TestIssueExportTx(t *testing.T) { } func TestClearForceAcceptedExportTx(t *testing.T) { + require := require.New(t) + genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) @@ -1862,9 +1703,9 @@ func TestClearForceAcceptedExportTx(t *testing.T) { IndexTransactions: true, } avmConfigBytes, err := stdjson.Marshal(avmConfig) - require.NoError(t, err) + require.NoError(err) vm := &VM{} - err = vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, baseDBManager.NewPrefixDBManager([]byte{1}), @@ -1877,20 +1718,12 @@ func TestClearForceAcceptedExportTx(t *testing.T) { Fx: &secp256k1fx.Fx{}, }}, nil, - ) - if err != nil { - t.Fatal(err) - } + )) vm.batchTimeout = 0 - if err := vm.SetState(context.Background(), snow.Bootstrapping); err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - t.Fatal(err) - } + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) key := keys[0] @@ -1923,38 +1756,26 @@ func TestClearForceAcceptedExportTx(t *testing.T) { }, }}, }} - if err := tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}}); err != nil { - t.Fatal(err) - } + require.NoError(tx.SignSECP256K1Fx(vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - if _, err := vm.IssueTx(tx.Bytes()); err != nil { - t.Fatal(err) - } + _, err = vm.IssueTx(tx.Bytes()) + require.NoError(err) ctx.Lock.Unlock() - msg := <-issuer - if msg != common.PendingTxs { - t.Fatalf("Wrong message") - } + require.Equal(common.PendingTxs, <-issuer) ctx.Lock.Lock() defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() txs := vm.PendingTxs(context.Background()) - if len(txs) != 1 { - t.Fatalf("Should have returned %d tx(s)", 1) - } + require.Len(txs, 1) parsedTx := txs[0] - if err := parsedTx.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedTx.Verify(context.Background())) utxo := avax.UTXOID{ TxID: tx.ID(), @@ -1963,18 +1784,13 @@ func TestClearForceAcceptedExportTx(t *testing.T) { utxoID := utxo.InputID() peerSharedMemory := m.NewSharedMemory(platformID) - if err := peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {RemoveRequests: [][]byte{utxoID[:]}}}); err != nil { - t.Fatal(err) - } + require.NoError(peerSharedMemory.Apply(map[ids.ID]*atomic.Requests{vm.ctx.ChainID: {RemoveRequests: [][]byte{utxoID[:]}}})) - if err := parsedTx.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedTx.Accept(context.Background())) assertIndexedTX(t, vm.db, 0, key.PublicKey().Address(), assetID.AssetID(), parsedTx.ID()) assertLatestIdx(t, vm.db, key.PublicKey().Address(), assetID.AssetID(), 1) - if _, err := peerSharedMemory.Get(vm.ctx.ChainID, [][]byte{utxoID[:]}); err == nil { - t.Fatalf("should have failed to read the utxo") - } + _, err = peerSharedMemory.Get(vm.ctx.ChainID, [][]byte{utxoID[:]}) + require.ErrorIs(err, database.ErrNotFound) } diff --git a/vms/avm/wallet_service_test.go b/vms/avm/wallet_service_test.go index 23713ff965d..8602a4f3e2b 100644 --- a/vms/avm/wallet_service_test.go +++ b/vms/avm/wallet_service_test.go @@ -7,6 +7,8 @@ import ( "context" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" @@ -46,32 +48,28 @@ func setupWS(t *testing.T, isAVAXAsset bool) ([]byte, *VM, *WalletService, *atom // 3) The wallet service that wraps the VM // 4) atomic memory to use in tests func setupWSWithKeys(t *testing.T, isAVAXAsset bool) ([]byte, *VM, *WalletService, *atomic.Memory, *txs.Tx) { + require := require.New(t) + genesisBytes, vm, ws, m, tx := setupWS(t, isAVAXAsset) // Import the initially funded private keys user, err := keystore.NewUserFromKeystore(ws.vm.ctx.Keystore, username, password) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := user.PutKeys(keys...); err != nil { - t.Fatalf("Failed to set key for user: %s", err) - } + require.NoError(user.PutKeys(keys...)) - if err := user.Close(); err != nil { - t.Fatal(err) - } + require.NoError(user.Close()) return genesisBytes, vm, ws, m, tx } func TestWalletService_SendMultiple(t *testing.T) { + require := require.New(t) + for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { _, vm, ws, _, genesisTx := setupWSWithKeys(t, tc.avaxAsset) defer func() { - if err := vm.Shutdown(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() @@ -79,13 +77,9 @@ func TestWalletService_SendMultiple(t *testing.T) { addr := keys[0].PublicKey().Address() addrStr, err := vm.FormatLocalAddress(addr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) changeAddrStr, err := vm.FormatLocalAddress(testChangeAddr) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, fromAddrsStr := sampleAddrs(t, vm, addrs) args := &SendMultipleArgs{ @@ -112,24 +106,16 @@ func TestWalletService_SendMultiple(t *testing.T) { } reply := &api.JSONTxIDChangeAddr{} vm.timer.Cancel() - if err := ws.SendMultiple(nil, args, reply); err != nil { - t.Fatalf("Failed to send transaction: %s", err) - } else if reply.ChangeAddr != changeAddrStr { - t.Fatalf("expected change address to be %s but got %s", changeAddrStr, reply.ChangeAddr) - } + require.NoError(ws.SendMultiple(nil, args, reply)) + require.Equal(changeAddrStr, reply.ChangeAddr) pendingTxs := vm.txs - if len(pendingTxs) != 1 { - t.Fatalf("Expected to find 1 pending tx after send, but found %d", len(pendingTxs)) - } + require.Len(pendingTxs, 1) - if reply.TxID != pendingTxs[0].ID() { - t.Fatal("Transaction ID returned by SendMultiple does not match the transaction found in vm's pending transactions") - } + require.Equal(pendingTxs[0].ID(), reply.TxID) - if _, err := vm.GetTx(context.Background(), reply.TxID); err != nil { - t.Fatalf("Failed to retrieve created transaction: %s", err) - } + _, err = vm.GetTx(context.Background(), reply.TxID) + require.NoError(err) }) } } diff --git a/vms/components/avax/addresses.go b/vms/components/avax/addresses.go index 400000f25c4..40929e22f89 100644 --- a/vms/components/avax/addresses.go +++ b/vms/components/avax/addresses.go @@ -4,6 +4,7 @@ package avax import ( + "errors" "fmt" "github.com/ava-labs/avalanchego/ids" @@ -13,7 +14,11 @@ import ( "github.com/ava-labs/avalanchego/utils/set" ) -var _ AddressManager = (*addressManager)(nil) +var ( + _ AddressManager = (*addressManager)(nil) + + ErrMismatchedChainIDs = errors.New("mismatched chainIDs") +) type AddressManager interface { // ParseLocalAddress takes in an address for this chain and produces the ID @@ -49,7 +54,8 @@ func (a *addressManager) ParseLocalAddress(addrStr string) (ids.ShortID, error) } if chainID != a.ctx.ChainID { return ids.ShortID{}, fmt.Errorf( - "expected chainID to be %q but was %q", + "%w: expected %q but got %q", + ErrMismatchedChainIDs, a.ctx.ChainID, chainID, ) diff --git a/vms/components/avax/asset_test.go b/vms/components/avax/asset_test.go index b1744f627a4..68c371ae1b0 100644 --- a/vms/components/avax/asset_test.go +++ b/vms/components/avax/asset_test.go @@ -6,6 +6,8 @@ package avax import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" @@ -13,24 +15,22 @@ import ( func TestAssetVerifyNil(t *testing.T) { id := (*Asset)(nil) - if err := id.Verify(); err == nil { - t.Fatalf("Should have errored due to nil AssetID") - } + err := id.Verify() + require.ErrorIs(t, err, errNilAssetID) } func TestAssetVerifyEmpty(t *testing.T) { id := Asset{} - if err := id.Verify(); err == nil { - t.Fatalf("Should have errored due to empty AssetID") - } + err := id.Verify() + require.ErrorIs(t, err, errEmptyAssetID) } func TestAssetID(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() manager := codec.NewDefaultManager() - if err := manager.RegisterCodec(codecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(manager.RegisterCodec(codecVersion, c)) id := Asset{ ID: ids.ID{ @@ -41,25 +41,16 @@ func TestAssetID(t *testing.T) { }, } - if err := id.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(id.Verify()) bytes, err := manager.Marshal(codecVersion, &id) - if err != nil { - t.Fatal(err) - } + require.NoError(err) newID := Asset{} - if _, err := manager.Unmarshal(bytes, &newID); err != nil { - t.Fatal(err) - } + _, err = manager.Unmarshal(bytes, &newID) + require.NoError(err) - if err := newID.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(newID.Verify()) - if id.AssetID() != newID.AssetID() { - t.Fatalf("Parsing returned the wrong Asset ID") - } + require.Equal(id.AssetID(), newID.AssetID()) } diff --git a/vms/components/avax/metadata_test.go b/vms/components/avax/metadata_test.go index c8b9d7284f3..01dada2feda 100644 --- a/vms/components/avax/metadata_test.go +++ b/vms/components/avax/metadata_test.go @@ -3,18 +3,20 @@ package avax -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/require" +) func TestMetaDataVerifyNil(t *testing.T) { md := (*Metadata)(nil) - if err := md.Verify(); err == nil { - t.Fatalf("Should have errored due to nil metadata") - } + err := md.Verify() + require.ErrorIs(t, err, errNilMetadata) } func TestMetaDataVerifyUninitialized(t *testing.T) { md := &Metadata{} - if err := md.Verify(); err == nil { - t.Fatalf("Should have errored due to uninitialized metadata") - } + err := md.Verify() + require.ErrorIs(t, err, errMetadataNotInitialize) } diff --git a/vms/components/avax/transferables_test.go b/vms/components/avax/transferables_test.go index 589fb0e03f4..686294c6100 100644 --- a/vms/components/avax/transferables_test.go +++ b/vms/components/avax/transferables_test.go @@ -4,9 +4,10 @@ package avax import ( - "bytes" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" @@ -16,41 +17,35 @@ import ( func TestTransferableOutputVerifyNil(t *testing.T) { to := (*TransferableOutput)(nil) - if err := to.Verify(); err == nil { - t.Fatalf("Should have errored due to nil transferable output") - } + err := to.Verify() + require.ErrorIs(t, err, ErrNilTransferableOutput) } func TestTransferableOutputVerifyNilFx(t *testing.T) { to := &TransferableOutput{Asset: Asset{ID: ids.Empty}} - if err := to.Verify(); err == nil { - t.Fatalf("Should have errored due to nil transferable fx output") - } + err := to.Verify() + require.ErrorIs(t, err, ErrNilTransferableFxOutput) } func TestTransferableOutputVerify(t *testing.T) { + require := require.New(t) + assetID := ids.GenerateTestID() to := &TransferableOutput{ Asset: Asset{ID: assetID}, Out: &TestTransferable{Val: 1}, } - if err := to.Verify(); err != nil { - t.Fatal(err) - } - if to.Output() != to.Out { - t.Fatalf("Should have returned the fx output") - } + require.NoError(to.Verify()) + require.Equal(to.Out, to.Output()) } func TestTransferableOutputSorting(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() - if err := c.RegisterType(&TestTransferable{}); err != nil { - t.Fatal(err) - } + require.NoError(c.RegisterType(&TestTransferable{})) manager := codec.NewDefaultManager() - if err := manager.RegisterCodec(codecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(manager.RegisterCodec(codecVersion, c)) assetID1 := ids.ID{1} outs := []*TransferableOutput{ @@ -76,39 +71,23 @@ func TestTransferableOutputSorting(t *testing.T) { }, } - if IsSortedTransferableOutputs(outs, manager) { - t.Fatalf("Shouldn't be sorted") - } + require.False(IsSortedTransferableOutputs(outs, manager)) SortTransferableOutputs(outs, manager) - if !IsSortedTransferableOutputs(outs, manager) { - t.Fatalf("Should be sorted") - } - if result := outs[0].Out.(*TestTransferable).Val; result != 0 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if result := outs[1].Out.(*TestTransferable).Val; result != 0 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if result := outs[2].Out.(*TestTransferable).Val; result != 1 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if result := outs[3].AssetID(); result != assetID1 { - t.Fatalf("Val expected: %s ; result: %s", assetID1, result) - } - if result := outs[4].AssetID(); result != assetID1 { - t.Fatalf("Val expected: %s ; result: %s", assetID1, result) - } + require.True(IsSortedTransferableOutputs(outs, manager)) + require.Zero(outs[0].Out.(*TestTransferable).Val) + require.Zero(outs[1].Out.(*TestTransferable).Val) + require.Equal(uint64(1), outs[2].Out.(*TestTransferable).Val) + require.Equal(assetID1, outs[3].AssetID()) + require.Equal(assetID1, outs[4].AssetID()) } func TestTransferableOutputSerialization(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() - if err := c.RegisterType(&secp256k1fx.TransferOutput{}); err != nil { - t.Fatal(err) - } + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) manager := codec.NewDefaultManager() - if err := manager.RegisterCodec(codecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(manager.RegisterCodec(codecVersion, c)) expected := []byte{ // Codec version @@ -161,22 +140,14 @@ func TestTransferableOutputSerialization(t *testing.T) { } outBytes, err := manager.Marshal(codecVersion, out) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(outBytes, expected) { - t.Fatalf("Expected:\n0x%x\nResult:\n0x%x", - expected, - outBytes, - ) - } + require.NoError(err) + require.Equal(expected, outBytes) } func TestTransferableInputVerifyNil(t *testing.T) { ti := (*TransferableInput)(nil) - if err := ti.Verify(); err == nil { - t.Fatalf("Should have errored due to nil transferable input") - } + err := ti.Verify() + require.ErrorIs(t, err, ErrNilTransferableInput) } func TestTransferableInputVerifyNilFx(t *testing.T) { @@ -184,31 +155,28 @@ func TestTransferableInputVerifyNilFx(t *testing.T) { UTXOID: UTXOID{TxID: ids.Empty}, Asset: Asset{ID: ids.Empty}, } - if err := ti.Verify(); err == nil { - t.Fatalf("Should have errored due to nil transferable fx input") - } + err := ti.Verify() + require.ErrorIs(t, err, ErrNilTransferableFxInput) } func TestTransferableInputVerify(t *testing.T) { + require := require.New(t) + assetID := ids.GenerateTestID() ti := &TransferableInput{ UTXOID: UTXOID{TxID: assetID}, Asset: Asset{ID: assetID}, In: &TestTransferable{}, } - if err := ti.Verify(); err != nil { - t.Fatal(err) - } - if ti.Input() != ti.In { - t.Fatalf("Should have returned the fx input") - } + require.NoError(ti.Verify()) + require.Equal(ti.In, ti.Input()) } func TestTransferableInputSorting(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() - if err := c.RegisterType(&TestTransferable{}); err != nil { - t.Fatal(err) - } + require.NoError(c.RegisterType(&TestTransferable{})) ins := []*TransferableInput{ { @@ -245,13 +213,9 @@ func TestTransferableInputSorting(t *testing.T) { }, } - if utils.IsSortedAndUniqueSortable(ins) { - t.Fatalf("Shouldn't be sorted") - } + require.False(utils.IsSortedAndUniqueSortable(ins)) utils.Sort(ins) - if !utils.IsSortedAndUniqueSortable(ins) { - t.Fatalf("Should be sorted") - } + require.True(utils.IsSortedAndUniqueSortable(ins)) ins = append(ins, &TransferableInput{ UTXOID: UTXOID{ @@ -262,20 +226,16 @@ func TestTransferableInputSorting(t *testing.T) { In: &TestTransferable{}, }) - if utils.IsSortedAndUniqueSortable(ins) { - t.Fatalf("Shouldn't be unique") - } + require.False(utils.IsSortedAndUniqueSortable(ins)) } func TestTransferableInputSerialization(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() - if err := c.RegisterType(&secp256k1fx.TransferInput{}); err != nil { - t.Fatal(err) - } + require.NoError(c.RegisterType(&secp256k1fx.TransferInput{})) manager := codec.NewDefaultManager() - if err := manager.RegisterCodec(codecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(manager.RegisterCodec(codecVersion, c)) expected := []byte{ // Codec version @@ -325,13 +285,6 @@ func TestTransferableInputSerialization(t *testing.T) { } inBytes, err := manager.Marshal(codecVersion, in) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(inBytes, expected) { - t.Fatalf("Expected:\n0x%x\nResult:\n0x%x", - expected, - inBytes, - ) - } + require.NoError(err) + require.Equal(expected, inBytes) } diff --git a/vms/components/avax/utxo_fetching_test.go b/vms/components/avax/utxo_fetching_test.go index e483508ef66..c7280099f23 100644 --- a/vms/components/avax/utxo_fetching_test.go +++ b/vms/components/avax/utxo_fetching_test.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -44,12 +43,8 @@ func TestFetchUTXOs(t *testing.T) { c := linearcodec.NewDefault() manager := codec.NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&secp256k1fx.TransferOutput{}), - manager.RegisterCodec(codecVersion, c), - ) - require.NoError(errs.Err) + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) + require.NoError(manager.RegisterCodec(codecVersion, c)) db := memdb.New() s := NewUTXOState(db, manager) @@ -81,12 +76,8 @@ func TestGetPaginatedUTXOs(t *testing.T) { c := linearcodec.NewDefault() manager := codec.NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&secp256k1fx.TransferOutput{}), - manager.RegisterCodec(codecVersion, c), - ) - require.NoError(errs.Err) + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) + require.NoError(manager.RegisterCodec(codecVersion, c)) db := memdb.New() s := NewUTXOState(db, manager) @@ -158,24 +149,15 @@ func TestGetPaginatedUTXOs(t *testing.T) { var totalUTXOs []*UTXO for i := 0; i <= 10; i++ { fetchedUTXOs, lastAddr, lastIdx, err = GetPaginatedUTXOs(s, addrs, lastAddr, lastIdx, 512) - if err != nil { - t.Fatal(err) - } + require.NoError(err) totalUTXOs = append(totalUTXOs, fetchedUTXOs...) } - if len(totalUTXOs) != 2000 { - t.Fatalf("Wrong number of utxos. Should have paginated through all. Expected (%d) returned (%d)", 2000, len(totalUTXOs)) - } + require.Len(totalUTXOs, 2000) // Fetch all UTXOs notPaginatedUTXOs, err := GetAllUTXOs(s, addrs) - if err != nil { - t.Fatal(err) - } - - if len(notPaginatedUTXOs) != len(totalUTXOs) { - t.Fatalf("Wrong number of utxos. Expected (%d) returned (%d)", len(totalUTXOs), len(notPaginatedUTXOs)) - } + require.NoError(err) + require.Len(notPaginatedUTXOs, len(totalUTXOs)) } diff --git a/vms/components/avax/utxo_id_test.go b/vms/components/avax/utxo_id_test.go index a35ac023693..391f8701423 100644 --- a/vms/components/avax/utxo_id_test.go +++ b/vms/components/avax/utxo_id_test.go @@ -16,18 +16,16 @@ import ( func TestUTXOIDVerifyNil(t *testing.T) { utxoID := (*UTXOID)(nil) - - if err := utxoID.Verify(); err == nil { - t.Fatalf("Should have errored due to a nil utxo ID") - } + err := utxoID.Verify() + require.ErrorIs(t, err, errNilUTXOID) } func TestUTXOID(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() manager := codec.NewDefaultManager() - if err := manager.RegisterCodec(codecVersion, c); err != nil { - t.Fatal(err) - } + require.NoError(manager.RegisterCodec(codecVersion, c)) utxoID := UTXOID{ TxID: ids.ID{ @@ -39,27 +37,17 @@ func TestUTXOID(t *testing.T) { OutputIndex: 0x20212223, } - if err := utxoID.Verify(); err != nil { - t.Fatal(err) - } + require.NoError(utxoID.Verify()) bytes, err := manager.Marshal(codecVersion, &utxoID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) newUTXOID := UTXOID{} - if _, err := manager.Unmarshal(bytes, &newUTXOID); err != nil { - t.Fatal(err) - } - - if err := newUTXOID.Verify(); err != nil { - t.Fatal(err) - } + _, err = manager.Unmarshal(bytes, &newUTXOID) + require.NoError(err) - if utxoID.InputID() != newUTXOID.InputID() { - t.Fatalf("Parsing returned the wrong UTXO ID") - } + require.NoError(newUTXOID.Verify()) + require.Equal(utxoID.InputID(), newUTXOID.InputID()) } func TestUTXOIDLess(t *testing.T) { @@ -111,8 +99,7 @@ func TestUTXOIDLess(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - require.Equal(tt.expected, tt.id1.Less(&tt.id2)) + require.Equal(t, tt.expected, tt.id1.Less(&tt.id2)) }) } } @@ -204,12 +191,12 @@ func TestUTXOIDFromString(t *testing.T) { retrievedUTXOID, err := UTXOIDFromString(test.expectedStr) require.ErrorIs(err, test.parseErr) - - if err == nil { - require.Equal(test.utxoID.InputID(), retrievedUTXOID.InputID()) - require.Equal(test.utxoID, retrievedUTXOID) - require.Equal(test.utxoID.String(), retrievedUTXOID.String()) + if test.parseErr != nil { + return } + require.Equal(test.utxoID.InputID(), retrievedUTXOID.InputID()) + require.Equal(test.utxoID, retrievedUTXOID) + require.Equal(test.utxoID.String(), retrievedUTXOID.String()) }) } } diff --git a/vms/components/avax/utxo_state_test.go b/vms/components/avax/utxo_state_test.go index 0444285607f..fa6b63bcb8f 100644 --- a/vms/components/avax/utxo_state_test.go +++ b/vms/components/avax/utxo_state_test.go @@ -13,7 +13,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -43,16 +42,12 @@ func TestUTXOState(t *testing.T) { c := linearcodec.NewDefault() manager := codec.NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&secp256k1fx.MintOutput{}), - c.RegisterType(&secp256k1fx.TransferOutput{}), - c.RegisterType(&secp256k1fx.Input{}), - c.RegisterType(&secp256k1fx.TransferInput{}), - c.RegisterType(&secp256k1fx.Credential{}), - manager.RegisterCodec(codecVersion, c), - ) - require.NoError(errs.Err) + require.NoError(c.RegisterType(&secp256k1fx.MintOutput{})) + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) + require.NoError(c.RegisterType(&secp256k1fx.Input{})) + require.NoError(c.RegisterType(&secp256k1fx.TransferInput{})) + require.NoError(c.RegisterType(&secp256k1fx.Credential{})) + require.NoError(manager.RegisterCodec(codecVersion, c)) db := memdb.New() s := NewUTXOState(db, manager) diff --git a/vms/components/avax/utxo_test.go b/vms/components/avax/utxo_test.go index a872e673d24..7561f85da2c 100644 --- a/vms/components/avax/utxo_test.go +++ b/vms/components/avax/utxo_test.go @@ -4,22 +4,20 @@ package avax import ( - "bytes" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec" "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestUTXOVerifyNil(t *testing.T) { utxo := (*UTXO)(nil) - - if err := utxo.Verify(); err == nil { - t.Fatalf("Should have errored due to a nil utxo") - } + err := utxo.Verify() + require.ErrorIs(t, err, errNilUTXO) } func TestUTXOVerifyEmpty(t *testing.T) { @@ -27,28 +25,22 @@ func TestUTXOVerifyEmpty(t *testing.T) { UTXOID: UTXOID{TxID: ids.Empty}, Asset: Asset{ID: ids.Empty}, } - - if err := utxo.Verify(); err == nil { - t.Fatalf("Should have errored due to an empty utxo") - } + err := utxo.Verify() + require.ErrorIs(t, err, errEmptyUTXO) } func TestUTXOSerialize(t *testing.T) { + require := require.New(t) + c := linearcodec.NewDefault() manager := codec.NewDefaultManager() - errs := wrappers.Errs{} - errs.Add( - c.RegisterType(&secp256k1fx.MintOutput{}), - c.RegisterType(&secp256k1fx.TransferOutput{}), - c.RegisterType(&secp256k1fx.Input{}), - c.RegisterType(&secp256k1fx.TransferInput{}), - c.RegisterType(&secp256k1fx.Credential{}), - manager.RegisterCodec(codecVersion, c), - ) - if errs.Errored() { - t.Fatal(errs.Err) - } + require.NoError(c.RegisterType(&secp256k1fx.MintOutput{})) + require.NoError(c.RegisterType(&secp256k1fx.TransferOutput{})) + require.NoError(c.RegisterType(&secp256k1fx.Input{})) + require.NoError(c.RegisterType(&secp256k1fx.TransferInput{})) + require.NoError(c.RegisterType(&secp256k1fx.Credential{})) + require.NoError(manager.RegisterCodec(codecVersion, c)) expected := []byte{ // Codec version @@ -116,13 +108,6 @@ func TestUTXOSerialize(t *testing.T) { } utxoBytes, err := manager.Marshal(codecVersion, utxo) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(utxoBytes, expected) { - t.Fatalf("Expected:\n0x%x\nResult:\n0x%x", - expected, - utxoBytes, - ) - } + require.NoError(err) + require.Equal(expected, utxoBytes) } diff --git a/vms/components/chain/state_test.go b/vms/components/chain/state_test.go index 28b83f1f822..f572738bf87 100644 --- a/vms/components/chain/state_test.go +++ b/vms/components/chain/state_test.go @@ -4,7 +4,6 @@ package chain import ( - "bytes" "context" "errors" "fmt" @@ -19,15 +18,17 @@ import ( "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/metric" ) var ( _ Block = (*TestBlock)(nil) - errCantBuildBlock = errors.New("can't build new block") - errVerify = errors.New("verify failed") - errAccept = errors.New("accept failed") - errReject = errors.New("reject failed") + errCantBuildBlock = errors.New("can't build new block") + errVerify = errors.New("verify failed") + errAccept = errors.New("accept failed") + errReject = errors.New("reject failed") + errUnexpectedBlockBytes = errors.New("unexpected block bytes") ) type TestBlock struct { @@ -80,9 +81,7 @@ func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) ( for _, blk := range blks { blkMap[blk.ID()] = blk blkBytes := blk.Bytes() - if len(blkBytes) != 1 { - t.Fatalf("Expected block bytes to be length 1, but found %d", len(blkBytes)) - } + require.Len(t, blkBytes, 1) blkByteMap[blkBytes[0]] = blk } @@ -102,7 +101,7 @@ func createInternalBlockFuncs(t *testing.T, blks []*TestBlock) ( blk, ok := blkByteMap[b[0]] if !ok { - return nil, fmt.Errorf("parsed unexpected block with bytes %x", b) + return nil, fmt.Errorf("%w: %x", errUnexpectedBlockBytes, b) } if blk.Status() == choices.Unknown { blk.SetStatus(choices.Processing) @@ -140,29 +139,15 @@ func checkProcessingBlock(t *testing.T, s *State, blk snowman.Block) { require.IsType(&BlockWrapper{}, blk) parsedBlk, err := s.ParseBlock(context.Background(), blk.Bytes()) - if err != nil { - t.Fatalf("Failed to parse verified block due to %s", err) - } - if parsedBlk.ID() != blk.ID() { - t.Fatalf("Expected parsed block to have the same ID as the requested block") - } - if !bytes.Equal(parsedBlk.Bytes(), blk.Bytes()) { - t.Fatalf("Expected parsed block to have the same bytes as the requested block") - } - if status := parsedBlk.Status(); status != choices.Processing { - t.Fatalf("Expected parsed block to have status Processing, but found %s", status) - } - if parsedBlk != blk { - t.Fatalf("Expected parsed block to return a uniquified block") - } + require.NoError(err) + require.Equal(blk.ID(), parsedBlk.ID()) + require.Equal(blk.Bytes(), parsedBlk.Bytes()) + require.Equal(choices.Processing, parsedBlk.Status()) + require.Equal(blk, parsedBlk) getBlk, err := s.GetBlock(context.Background(), blk.ID()) - if err != nil { - t.Fatalf("Unexpected error during GetBlock for processing block %s", err) - } - if getBlk != parsedBlk { - t.Fatalf("Expected GetBlock to return the same unique block as ParseBlock") - } + require.NoError(err) + require.Equal(parsedBlk, getBlk) } // checkDecidedBlock asserts that [blk] is returned with the correct status by ParseBlock @@ -173,42 +158,25 @@ func checkDecidedBlock(t *testing.T, s *State, blk snowman.Block, expectedStatus require.IsType(&BlockWrapper{}, blk) parsedBlk, err := s.ParseBlock(context.Background(), blk.Bytes()) - if err != nil { - t.Fatalf("Unexpected error parsing decided block %s", err) - } - if parsedBlk.ID() != blk.ID() { - t.Fatalf("ParseBlock returned block with unexpected ID %s, expected %s", parsedBlk.ID(), blk.ID()) - } - if !bytes.Equal(parsedBlk.Bytes(), blk.Bytes()) { - t.Fatalf("Expected parsed block to have the same bytes as the requested block") - } - if status := parsedBlk.Status(); status != expectedStatus { - t.Fatalf("Expected parsed block to have status %s, but found %s", expectedStatus, status) - } + require.NoError(err) + require.Equal(blk.ID(), parsedBlk.ID()) + require.Equal(blk.Bytes(), parsedBlk.Bytes()) + require.Equal(expectedStatus, parsedBlk.Status()) + // If the block should be in the cache, assert that the returned block is identical to [blk] - if cached && parsedBlk != blk { - t.Fatalf("Expected parsed block to have been cached, but retrieved non-unique decided block") + if cached { + require.Equal(blk, parsedBlk) } getBlk, err := s.GetBlock(context.Background(), blk.ID()) - if err != nil { - t.Fatalf("Unexpected error during GetBlock for decided block %s", err) - } - if getBlk.ID() != blk.ID() { - t.Fatalf("GetBlock returned block with unexpected ID %s, expected %s", getBlk.ID(), blk.ID()) - } - if !bytes.Equal(getBlk.Bytes(), blk.Bytes()) { - t.Fatalf("Expected block from GetBlock to have the same bytes as the requested block") - } - if status := getBlk.Status(); status != expectedStatus { - t.Fatalf("Expected block from GetBlock to have status %s, but found %s", expectedStatus, status) - } + require.NoError(err) + require.Equal(blk.ID(), getBlk.ID()) + require.Equal(blk.Bytes(), getBlk.Bytes()) + require.Equal(expectedStatus, getBlk.Status()) // Since ParseBlock should have triggered a cache hit, assert that the block is identical // to the parsed block. - if getBlk != parsedBlk { - t.Fatalf("Expected block returned by GetBlock to have been cached, but retrieved non-unique decided block") - } + require.Equal(parsedBlk, getBlk) } func checkAcceptedBlock(t *testing.T, s *State, blk snowman.Block, cached bool) { @@ -220,6 +188,8 @@ func checkRejectedBlock(t *testing.T, s *State, blk snowman.Block, cached bool) } func TestState(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(3) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -256,97 +226,54 @@ func TestState(t *testing.T) { }) lastAccepted, err := chainState.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAccepted != genesisBlock.ID() { - t.Fatal("Expected last accepted block to be the genesis block") - } + require.NoError(err) + require.Equal(genesisBlock.ID(), lastAccepted) wrappedGenesisBlk, err := chainState.GetBlock(context.Background(), genesisBlock.ID()) - if err != nil { - t.Fatalf("Failed to get genesis block due to: %s", err) - } + require.NoError(err) // Check that a cache miss on a block is handled correctly - if _, err := chainState.GetBlock(context.Background(), blk1.ID()); err == nil { - t.Fatal("expected GetBlock to return an error for blk1 before it's been parsed") - } - if _, err := chainState.GetBlock(context.Background(), blk1.ID()); err == nil { - t.Fatal("expected GetBlock to return an error for blk1 before it's been parsed") - } + _, err = chainState.GetBlock(context.Background(), blk1.ID()) + require.ErrorIs(err, database.ErrNotFound) // Parse and verify blk1 and blk2 parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) - if err != nil { - t.Fatal("Failed to parse blk1 due to: %w", err) - } - if err := parsedBlk1.Verify(context.Background()); err != nil { - t.Fatal("Parsed blk1 failed verification unexpectedly due to %w", err) - } + require.NoError(err) + require.NoError(parsedBlk1.Verify(context.Background())) + parsedBlk2, err := chainState.ParseBlock(context.Background(), blk2.Bytes()) - if err != nil { - t.Fatalf("Failed to parse blk2 due to: %s", err) - } - if err := parsedBlk2.Verify(context.Background()); err != nil { - t.Fatalf("Parsed blk2 failed verification unexpectedly due to %s", err) - } + require.NoError(err) + require.NoError(parsedBlk2.Verify(context.Background())) // Check that the verified blocks have been placed in the processing map - if numProcessing := len(chainState.verifiedBlocks); numProcessing != 2 { - t.Fatalf("Expected chain state to have 2 processing blocks, but found: %d", numProcessing) - } + require.Len(chainState.verifiedBlocks, 2) parsedBlk3, err := chainState.ParseBlock(context.Background(), blk3.Bytes()) - if err != nil { - t.Fatalf("Failed to parse blk3 due to %s", err) - } + require.NoError(err) getBlk3, err := chainState.GetBlock(context.Background(), blk3.ID()) - if err != nil { - t.Fatalf("Failed to get blk3 due to %s", err) - } - require.Equal(t, parsedBlk3.ID(), getBlk3.ID(), "State GetBlock returned the wrong block") + require.NoError(err) + require.Equal(parsedBlk3.ID(), getBlk3.ID(), "State GetBlock returned the wrong block") // Check that parsing blk3 does not add it to processing blocks since it has // not been verified. - if numProcessing := len(chainState.verifiedBlocks); numProcessing != 2 { - t.Fatalf("Expected State to have 2 processing blocks, but found: %d", numProcessing) - } + require.Len(chainState.verifiedBlocks, 2) - if err := parsedBlk3.Verify(context.Background()); err != nil { - t.Fatalf("Parsed blk3 failed verification unexpectedly due to %s", err) - } + require.NoError(parsedBlk3.Verify(context.Background())) // Check that blk3 has been added to processing blocks. - if numProcessing := len(chainState.verifiedBlocks); numProcessing != 3 { - t.Fatalf("Expected chain state to have 3 processing blocks, but found: %d", numProcessing) - } + require.Len(chainState.verifiedBlocks, 3) // Decide the blocks and ensure they are removed from the processing blocks map - if err := parsedBlk1.Accept(context.Background()); err != nil { - t.Fatal(err) - } - if err := parsedBlk2.Accept(context.Background()); err != nil { - t.Fatal(err) - } - if err := parsedBlk3.Reject(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlk1.Accept(context.Background())) + require.NoError(parsedBlk2.Accept(context.Background())) + require.NoError(parsedBlk3.Reject(context.Background())) - if numProcessing := len(chainState.verifiedBlocks); numProcessing != 0 { - t.Fatalf("Expected chain state to have 0 processing blocks, but found: %d", numProcessing) - } + require.Empty(chainState.verifiedBlocks) // Check that the last accepted block was updated correctly lastAcceptedID, err := chainState.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != blk2.ID() { - t.Fatal("Expected last accepted block to be blk2") - } - if lastAcceptedID := chainState.LastAcceptedBlock().ID(); lastAcceptedID != blk2.ID() { - t.Fatal("Expected last accepted block to be blk2") - } + require.NoError(err) + require.Equal(blk2.ID(), lastAcceptedID) + require.Equal(blk2.ID(), chainState.LastAcceptedBlock().ID()) // Flush the caches to ensure decided blocks are handled correctly on cache misses. chainState.Flush() @@ -357,6 +284,8 @@ func TestState(t *testing.T) { } func TestBuildBlock(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(2) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -382,26 +311,22 @@ func TestBuildBlock(t *testing.T) { }) builtBlk, err := chainState.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } - require.Empty(t, chainState.verifiedBlocks) + require.NoError(err) + require.Empty(chainState.verifiedBlocks) - if err := builtBlk.Verify(context.Background()); err != nil { - t.Fatalf("Built block failed verification due to %s", err) - } - require.Len(t, chainState.verifiedBlocks, 1) + require.NoError(builtBlk.Verify(context.Background())) + require.Len(chainState.verifiedBlocks, 1) checkProcessingBlock(t, chainState, builtBlk) - if err := builtBlk.Accept(context.Background()); err != nil { - t.Fatalf("Unexpected error while accepting built block %s", err) - } + require.NoError(builtBlk.Accept(context.Background())) checkAcceptedBlock(t, chainState, builtBlk, true) } func TestStateDecideBlock(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(4) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -426,50 +351,38 @@ func TestStateDecideBlock(t *testing.T) { // Parse badVerifyBlk (which should fail verification) badBlk, err := chainState.ParseBlock(context.Background(), badVerifyBlk.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := badBlk.Verify(context.Background()); err == nil { - t.Fatal("Bad block should have failed verification") - } + require.NoError(err) + err = badBlk.Verify(context.Background()) + require.ErrorIs(err, errVerify) // Ensure a block that fails verification is not marked as processing - require.Empty(t, chainState.verifiedBlocks) + require.Empty(chainState.verifiedBlocks) // Ensure that an error during block acceptance is propagated correctly badBlk, err = chainState.ParseBlock(context.Background(), badAcceptBlk.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := badBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - require.Len(t, chainState.verifiedBlocks, 1) + require.NoError(err) + require.NoError(badBlk.Verify(context.Background())) + require.Len(chainState.verifiedBlocks, 1) - if err := badBlk.Accept(context.Background()); err == nil { - t.Fatal("Block should have errored on Accept") - } + err = badBlk.Accept(context.Background()) + require.ErrorIs(err, errAccept) // Ensure that an error during block reject is propagated correctly badBlk, err = chainState.ParseBlock(context.Background(), badRejectBlk.Bytes()) - if err != nil { - t.Fatal(err) - } - if err := badBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(badBlk.Verify(context.Background())) // Note: an error during block Accept/Reject is fatal, so it is undefined whether // the block that failed on Accept should be removed from processing or not. We allow // either case here to make this test more flexible. - if numProcessing := len(chainState.verifiedBlocks); numProcessing > 2 || numProcessing == 0 { - t.Fatalf("Expected number of processing blocks to be either 1 or 2, but found %d", numProcessing) - } + numProcessing := len(chainState.verifiedBlocks) + require.Contains([]int{1, 2}, numProcessing) - if err := badBlk.Reject(context.Background()); err == nil { - t.Fatal("Block should have errored on Reject") - } + err = badBlk.Reject(context.Background()) + require.ErrorIs(err, errReject) } func TestStateParent(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(3) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -490,33 +403,24 @@ func TestStateParent(t *testing.T) { }) parsedBlk2, err := chainState.ParseBlock(context.Background(), blk2.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) missingBlk1ID := parsedBlk2.Parent() - if _, err := chainState.GetBlock(context.Background(), missingBlk1ID); err == nil { - t.Fatalf("Expected parent of blk2 to be not found") - } + _, err = chainState.GetBlock(context.Background(), missingBlk1ID) + require.ErrorIs(err, database.ErrNotFound) parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) genesisBlkParentID := parsedBlk1.Parent() genesisBlkParent, err := chainState.GetBlock(context.Background(), genesisBlkParentID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) checkAcceptedBlock(t, chainState, genesisBlkParent, true) parentBlk1ID := parsedBlk2.Parent() parentBlk1, err := chainState.GetBlock(context.Background(), parentBlk1ID) - if err != nil { - t.Fatal(err) - } + require.NoError(err) checkProcessingBlock(t, chainState, parentBlk1) } @@ -541,22 +445,18 @@ func TestGetBlockInternal(t *testing.T) { genesisBlockInternal := chainState.LastAcceptedBlockInternal() require.IsType(&TestBlock{}, genesisBlockInternal) - if genesisBlockInternal.ID() != genesisBlock.ID() { - t.Fatalf("Expected LastAcceptedBlockInternal to be blk %s, but found %s", genesisBlock.ID(), genesisBlockInternal.ID()) - } + require.Equal(genesisBlock.ID(), genesisBlockInternal.ID()) blk, err := chainState.GetBlockInternal(context.Background(), genesisBlock.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) require.IsType(&TestBlock{}, blk) - if blk.ID() != genesisBlock.ID() { - t.Fatalf("Expected GetBlock to be blk %s, but found %s", genesisBlock.ID(), blk.ID()) - } + require.Equal(genesisBlock.ID(), blk.ID()) } func TestGetBlockError(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(2) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -583,20 +483,14 @@ func TestGetBlockError(t *testing.T) { }) _, err := chainState.GetBlock(context.Background(), blk1.ID()) - if err == nil { - t.Fatal("Expected GetBlock to return an error for unknown block") - } + require.ErrorIs(err, database.ErrNotFound) // Update the status to Processing, so that it will be returned by the internal get block // function. blk1.SetStatus(choices.Processing) blk, err := chainState.GetBlock(context.Background(), blk1.ID()) - if err != nil { - t.Fatal(err) - } - if blk.ID() != blk1.ID() { - t.Fatalf("Expected GetBlock to retrieve %s, but found %s", blk1.ID(), blk.ID()) - } + require.NoError(err) + require.Equal(blk1.ID(), blk.ID()) checkProcessingBlock(t, chainState, blk) } @@ -618,10 +512,8 @@ func TestParseBlockError(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - blk, err := chainState.ParseBlock(context.Background(), []byte{255}) - if err == nil { - t.Fatalf("Expected ParseBlock to return an error parsing an invalid block but found block of type %T", blk) - } + _, err := chainState.ParseBlock(context.Background(), []byte{255}) + require.ErrorIs(t, err, errUnexpectedBlockBytes) } func TestBuildBlockError(t *testing.T) { @@ -642,13 +534,13 @@ func TestBuildBlockError(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) - blk, err := chainState.BuildBlock(context.Background()) - if err == nil { - t.Fatalf("Expected BuildBlock to return an error but found block of type %T", blk) - } + _, err := chainState.BuildBlock(context.Background()) + require.ErrorIs(t, err, errCantBuildBlock) } func TestMeteredCache(t *testing.T) { + require := require.New(t) + registry := prometheus.NewRegistry() testBlks := NewTestBlocks(1) @@ -668,13 +560,9 @@ func TestMeteredCache(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, } _, err := NewMeteredState(registry, config) - if err != nil { - t.Fatal(err) - } + require.NoError(err) _, err = NewMeteredState(registry, config) - if err == nil { - t.Fatal("Expected creating a second NewMeteredState to error due to a registry conflict") - } + require.ErrorIs(err, metric.ErrFailedRegistering) } // Test the bytesToIDCache @@ -689,7 +577,7 @@ func TestStateBytesToIDCache(t *testing.T) { getBlock, parseBlock, getCanonicalBlockID := createInternalBlockFuncs(t, testBlks) buildBlock := func(context.Context) (snowman.Block, error) { - t.Fatal("shouldn't have been called") + require.FailNow("shouldn't have been called") return nil, nil } @@ -733,6 +621,8 @@ func TestStateBytesToIDCache(t *testing.T) { // TestSetLastAcceptedBlock ensures chainState's last accepted block // can be updated by calling [SetLastAcceptedBlock]. func TestSetLastAcceptedBlock(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(1) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -774,49 +664,25 @@ func TestSetLastAcceptedBlock(t *testing.T) { GetBlockIDAtHeight: getCanonicalBlockID, }) lastAcceptedID, err := chainState.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != genesisBlock.ID() { - t.Fatal("Expected last accepted block to be the genesis block") - } + require.NoError(err) + require.Equal(genesisBlock.ID(), lastAcceptedID) // call SetLastAcceptedBlock for postSetBlk1 - if err := chainState.SetLastAcceptedBlock(postSetBlk1); err != nil { - t.Fatal(err) - } + require.NoError(chainState.SetLastAcceptedBlock(postSetBlk1)) lastAcceptedID, err = chainState.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != postSetBlk1.ID() { - t.Fatal("Expected last accepted block to be postSetBlk1") - } - if lastAcceptedID := chainState.LastAcceptedBlock().ID(); lastAcceptedID != postSetBlk1.ID() { - t.Fatal("Expected last accepted block to be postSetBlk1") - } + require.NoError(err) + require.Equal(postSetBlk1.ID(), lastAcceptedID) + require.Equal(postSetBlk1.ID(), chainState.LastAcceptedBlock().ID()) // ensure further blocks can be accepted parsedpostSetBlk2, err := chainState.ParseBlock(context.Background(), postSetBlk2.Bytes()) - if err != nil { - t.Fatal("Failed to parse postSetBlk2 due to: %w", err) - } - if err := parsedpostSetBlk2.Verify(context.Background()); err != nil { - t.Fatal("Parsed postSetBlk2 failed verification unexpectedly due to %w", err) - } - if err := parsedpostSetBlk2.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NoError(parsedpostSetBlk2.Verify(context.Background())) + require.NoError(parsedpostSetBlk2.Accept(context.Background())) lastAcceptedID, err = chainState.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } - if lastAcceptedID != postSetBlk2.ID() { - t.Fatal("Expected last accepted block to be postSetBlk2") - } - if lastAcceptedID := chainState.LastAcceptedBlock().ID(); lastAcceptedID != postSetBlk2.ID() { - t.Fatal("Expected last accepted block to be postSetBlk2") - } + require.NoError(err) + require.Equal(postSetBlk2.ID(), lastAcceptedID) + require.Equal(postSetBlk2.ID(), chainState.LastAcceptedBlock().ID()) checkAcceptedBlock(t, chainState, parsedpostSetBlk2, false) } @@ -863,6 +729,8 @@ func TestSetLastAcceptedBlockWithProcessingBlocksErrors(t *testing.T) { } func TestStateParseTransitivelyAcceptedBlock(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(3) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -884,16 +752,13 @@ func TestStateParseTransitivelyAcceptedBlock(t *testing.T) { }) parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) - if err != nil { - t.Fatalf("Failed to parse blk1 due to: %s", err) - } - - if blk1.Height() != parsedBlk1.Height() { - t.Fatalf("Parsed blk1 reported incorrect height. Expected %d got %d", blk1.Height(), parsedBlk1.Height()) - } + require.NoError(err) + require.Equal(blk1.Height(), parsedBlk1.Height()) } func TestIsProcessing(t *testing.T) { + require := require.New(t) + testBlks := NewTestBlocks(2) genesisBlock := testBlks[0] genesisBlock.SetStatus(choices.Accepted) @@ -914,20 +779,20 @@ func TestIsProcessing(t *testing.T) { // Parse blk1 parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) - require.NoError(t, err) + require.NoError(err) // Check that it is not processing in consensus - require.False(t, chainState.IsProcessing(parsedBlk1.ID())) + require.False(chainState.IsProcessing(parsedBlk1.ID())) // Verify blk1 - require.NoError(t, parsedBlk1.Verify(context.Background())) + require.NoError(parsedBlk1.Verify(context.Background())) // Check that it is processing in consensus - require.True(t, chainState.IsProcessing(parsedBlk1.ID())) + require.True(chainState.IsProcessing(parsedBlk1.ID())) // Accept blk1 - require.NoError(t, parsedBlk1.Accept(context.Background())) + require.NoError(parsedBlk1.Accept(context.Background())) // Check that it is no longer processing in consensus - require.False(t, chainState.IsProcessing(parsedBlk1.ID())) + require.False(chainState.IsProcessing(parsedBlk1.ID())) } diff --git a/vms/components/verify/verification_test.go b/vms/components/verify/verification_test.go index fe854e16dba..408fc2e9473 100644 --- a/vms/components/verify/verification_test.go +++ b/vms/components/verify/verification_test.go @@ -6,6 +6,8 @@ package verify import ( "errors" "testing" + + "github.com/stretchr/testify/require" ) var errTest = errors.New("non-nil error") @@ -17,13 +19,10 @@ func (v testVerifiable) Verify() error { } func TestAllNil(t *testing.T) { - err := All( + require.NoError(t, All( testVerifiable{}, testVerifiable{}, - ) - if err != nil { - t.Fatal(err) - } + )) } func TestAllError(t *testing.T) { @@ -31,7 +30,5 @@ func TestAllError(t *testing.T) { testVerifiable{}, testVerifiable{err: errTest}, ) - if err == nil { - t.Fatalf("Should have returned an error") - } + require.ErrorIs(t, err, errTest) } diff --git a/vms/nftfx/credential_test.go b/vms/nftfx/credential_test.go index e27d441b4aa..c1e83ccab00 100644 --- a/vms/nftfx/credential_test.go +++ b/vms/nftfx/credential_test.go @@ -6,12 +6,13 @@ package nftfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" ) func TestCredentialState(t *testing.T) { intf := interface{}(&Credential{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/vms/nftfx/factory_test.go b/vms/nftfx/factory_test.go index 83aa31d1786..10581c8db92 100644 --- a/vms/nftfx/factory_test.go +++ b/vms/nftfx/factory_test.go @@ -6,14 +6,16 @@ package nftfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { + require := require.New(t) + factory := Factory{} - if fx, err := factory.New(logging.NoLog{}); err != nil { - t.Fatal(err) - } else if fx == nil { - t.Fatalf("Factory.New returned nil") - } + fx, err := factory.New(logging.NoLog{}) + require.NoError(err) + require.NotNil(fx) } diff --git a/vms/nftfx/fx_test.go b/vms/nftfx/fx_test.go index cb464dd1027..d054d680c30 100644 --- a/vms/nftfx/fx_test.go +++ b/vms/nftfx/fx_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -41,21 +43,18 @@ func TestFxInitialize(t *testing.T) { Log: logging.NoLog{}, } fx := Fx{} - err := fx.Initialize(&vm) - if err != nil { - t.Fatal(err) - } + require.NoError(t, fx.Initialize(&vm)) } func TestFxInitializeInvalid(t *testing.T) { fx := Fx{} err := fx.Initialize(nil) - if err == nil { - t.Fatalf("Should have returned an error") - } + require.ErrorIs(t, err, secp256k1fx.ErrWrongVMType) } func TestFxVerifyMintOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -64,9 +63,7 @@ func TestFxVerifyMintOperation(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -88,12 +85,12 @@ func TestFxVerifyMintOperation(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { - t.Fatal(err) - } + require.NoError(fx.VerifyOperation(tx, op, cred, utxos)) } func TestFxVerifyMintOperationWrongTx(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -102,9 +99,7 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) cred := &Credential{Credential: secp256k1fx.Credential{ Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, @@ -123,12 +118,13 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(nil, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid tx") - } + err := fx.VerifyOperation(nil, op, cred, utxos) + require.ErrorIs(err, errWrongTxType) } func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -137,9 +133,7 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -155,12 +149,13 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { } utxos := []interface{}{} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to not enough utxos") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongNumberOfUTXOs) } func TestFxVerifyMintOperationWrongCredential(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -169,9 +164,7 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -188,12 +181,13 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, nil, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to a bad credential") - } + err := fx.VerifyOperation(tx, op, nil, utxos) + require.ErrorIs(err, errWrongCredentialType) } func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -202,9 +196,7 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -220,12 +212,13 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { } utxos := []interface{}{nil} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUTXOType) } func TestFxVerifyMintOperationFailingVerification(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -234,9 +227,7 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -259,12 +250,13 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, secp256k1fx.ErrAddrsNotSortedUnique) } func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -273,9 +265,7 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -298,12 +288,13 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid Group ID") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUniqueID) } func TestFxVerifyTransferOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -312,9 +303,7 @@ func TestFxVerifyTransferOperation(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -350,12 +339,12 @@ func TestFxVerifyTransferOperation(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { - t.Fatal(err) - } + require.NoError(fx.VerifyOperation(tx, op, cred, utxos)) } func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -364,9 +353,7 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -392,12 +379,13 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { } utxos := []interface{}{nil} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUTXOType) } func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -406,9 +394,7 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -441,12 +427,13 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, secp256k1fx.ErrOutputUnspendable) } func TestFxVerifyTransferOperationWrongGroupID(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -455,9 +442,7 @@ func TestFxVerifyTransferOperationWrongGroupID(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -493,12 +478,13 @@ func TestFxVerifyTransferOperationWrongGroupID(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to a wrong unique id") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUniqueID) } func TestFxVerifyTransferOperationWrongBytes(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -507,9 +493,7 @@ func TestFxVerifyTransferOperationWrongBytes(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -545,12 +529,13 @@ func TestFxVerifyTransferOperationWrongBytes(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to the wrong hash being produced") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongBytes) } func TestFxVerifyTransferOperationTooSoon(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -559,9 +544,7 @@ func TestFxVerifyTransferOperationTooSoon(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -598,12 +581,13 @@ func TestFxVerifyTransferOperationTooSoon(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("Should have errored due to locktime") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, secp256k1fx.ErrTimelocked) } func TestFxVerifyOperationUnknownOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -612,9 +596,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -635,12 +617,13 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, nil, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an unknown operation") - } + err := fx.VerifyOperation(tx, nil, cred, utxos) + require.ErrorIs(err, errWrongOperationType) } func TestFxVerifyTransfer(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -649,10 +632,7 @@ func TestFxVerifyTransfer(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - if err := fx.VerifyTransfer(nil, nil, nil, nil); err == nil { - t.Fatalf("this Fx doesn't support transfers") - } + require.NoError(fx.Initialize(&vm)) + err := fx.VerifyTransfer(nil, nil, nil, nil) + require.ErrorIs(err, errCantTransfer) } diff --git a/vms/nftfx/mint_operation_test.go b/vms/nftfx/mint_operation_test.go index d462885e9d0..4dc1ce2a3eb 100644 --- a/vms/nftfx/mint_operation_test.go +++ b/vms/nftfx/mint_operation_test.go @@ -6,24 +6,24 @@ package nftfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestMintOperationVerifyNil(t *testing.T) { op := (*MintOperation)(nil) - if err := op.Verify(); err == nil { - t.Fatalf("nil operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, errNilMintOperation) } func TestMintOperationVerifyTooLargePayload(t *testing.T) { op := MintOperation{ Payload: make([]byte, MaxPayloadSize+1), } - if err := op.Verify(); err == nil { - t.Fatalf("operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, errPayloadTooLarge) } func TestMintOperationVerifyInvalidOutput(t *testing.T) { @@ -32,23 +32,19 @@ func TestMintOperationVerifyInvalidOutput(t *testing.T) { Threshold: 1, }}, } - if err := op.Verify(); err == nil { - t.Fatalf("operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, secp256k1fx.ErrOutputUnspendable) } func TestMintOperationOuts(t *testing.T) { op := MintOperation{ Outputs: []*secp256k1fx.OutputOwners{{}}, } - if outs := op.Outs(); len(outs) != 1 { - t.Fatalf("Wrong number of outputs returned") - } + require.Len(t, op.Outs(), 1) } func TestMintOperationState(t *testing.T) { intf := interface{}(&MintOperation{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/vms/nftfx/mint_output_test.go b/vms/nftfx/mint_output_test.go index c33ede0e4ba..583f211f972 100644 --- a/vms/nftfx/mint_output_test.go +++ b/vms/nftfx/mint_output_test.go @@ -6,12 +6,13 @@ package nftfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" ) func TestMintOutputState(t *testing.T) { intf := interface{}(&MintOutput{}) - if _, ok := intf.(verify.State); !ok { - t.Fatalf("should be marked as state") - } + _, ok := intf.(verify.State) + require.True(t, ok) } diff --git a/vms/nftfx/transfer_operation_test.go b/vms/nftfx/transfer_operation_test.go index 15d395016cd..ad896b14ee7 100644 --- a/vms/nftfx/transfer_operation_test.go +++ b/vms/nftfx/transfer_operation_test.go @@ -6,38 +6,35 @@ package nftfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestTransferOperationVerifyNil(t *testing.T) { op := (*TransferOperation)(nil) - if err := op.Verify(); err == nil { - t.Fatalf("nil operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, errNilTransferOperation) } func TestTransferOperationInvalid(t *testing.T) { op := TransferOperation{Input: secp256k1fx.Input{ SigIndices: []uint32{1, 0}, }} - if err := op.Verify(); err == nil { - t.Fatalf("operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, secp256k1fx.ErrInputIndicesNotSortedUnique) } func TestTransferOperationOuts(t *testing.T) { op := TransferOperation{ Output: TransferOutput{}, } - if outs := op.Outs(); len(outs) != 1 { - t.Fatalf("Wrong number of outputs returned") - } + require.Len(t, op.Outs(), 1) } func TestTransferOperationState(t *testing.T) { intf := interface{}(&TransferOperation{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/vms/nftfx/transfer_output_test.go b/vms/nftfx/transfer_output_test.go index a95a746753a..33072314410 100644 --- a/vms/nftfx/transfer_output_test.go +++ b/vms/nftfx/transfer_output_test.go @@ -6,6 +6,8 @@ package nftfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -13,18 +15,16 @@ import ( func TestTransferOutputVerifyNil(t *testing.T) { to := (*TransferOutput)(nil) - if err := to.Verify(); err == nil { - t.Fatalf("TransferOutput.Verify should have errored on nil") - } + err := to.Verify() + require.ErrorIs(t, err, errNilTransferOutput) } func TestTransferOutputLargePayload(t *testing.T) { to := TransferOutput{ Payload: make([]byte, MaxPayloadSize+1), } - if err := to.Verify(); err == nil { - t.Fatalf("TransferOutput.Verify should have errored on too large of a payload") - } + err := to.Verify() + require.ErrorIs(t, err, errPayloadTooLarge) } func TestTransferOutputInvalidSecp256k1Output(t *testing.T) { @@ -36,14 +36,12 @@ func TestTransferOutputInvalidSecp256k1Output(t *testing.T) { }, }, } - if err := to.Verify(); err == nil { - t.Fatalf("TransferOutput.Verify should have errored on too large of a payload") - } + err := to.Verify() + require.ErrorIs(t, err, secp256k1fx.ErrOutputUnoptimized) } func TestTransferOutputState(t *testing.T) { intf := interface{}(&TransferOutput{}) - if _, ok := intf.(verify.State); !ok { - t.Fatalf("should be marked as state") - } + _, ok := intf.(verify.State) + require.True(t, ok) } diff --git a/vms/platformvm/api/static_service_test.go b/vms/platformvm/api/static_service_test.go index 7006d7e345a..49822d9679d 100644 --- a/vms/platformvm/api/static_service_test.go +++ b/vms/platformvm/api/static_service_test.go @@ -243,13 +243,10 @@ func TestUTXOLess(t *testing.T) { largerAddr = ids.ShortID{1} ) smallerAddrStr, err := address.FormatBech32("avax", smallerAddr[:]) - if err != nil { - panic(err) - } + require.NoError(t, err) largerAddrStr, err := address.FormatBech32("avax", largerAddr[:]) - if err != nil { - panic(err) - } + require.NoError(t, err) + type test struct { name string utxo1 UTXO diff --git a/vms/platformvm/blocks/builder/builder_test.go b/vms/platformvm/blocks/builder/builder_test.go index eea7e161293..882122c2b9b 100644 --- a/vms/platformvm/blocks/builder/builder_test.go +++ b/vms/platformvm/blocks/builder/builder_test.go @@ -52,9 +52,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { return nil } require.NoError(env.Builder.AddUnverifiedTx(tx)) - - has := env.mempool.Has(txID) - require.True(has) + require.True(env.mempool.Has(txID)) // show that build block include that tx and removes it from mempool blkIntf, err := env.Builder.BuildBlock(context.Background()) @@ -65,8 +63,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { require.Len(blk.Txs(), 1) require.Equal(txID, blk.Txs()[0].ID()) - has = env.mempool.Has(txID) - require.False(has) + require.False(env.mempool.Has(txID)) } func TestPreviouslyDroppedTxsCanBeReAddedToMempool(t *testing.T) { diff --git a/vms/platformvm/blocks/builder/helpers_test.go b/vms/platformvm/blocks/builder/helpers_test.go index bd645dbcb31..9d80ed4a9f2 100644 --- a/vms/platformvm/blocks/builder/helpers_test.go +++ b/vms/platformvm/blocks/builder/helpers_test.go @@ -6,12 +6,13 @@ package builder import ( "context" "errors" - "fmt" "testing" "time" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" @@ -109,6 +110,8 @@ type environment struct { } func newEnvironment(t *testing.T) *environment { + require := require.New(t) + res := &environment{ isBootstrapped: &utils.Atomic[bool]{}, config: defaultConfig(), @@ -123,10 +126,10 @@ func newEnvironment(t *testing.T) *environment { res.ctx.Lock.Lock() defer res.ctx.Lock.Unlock() - res.fx = defaultFx(res.clk, res.ctx.Log, res.isBootstrapped.Get()) + res.fx = defaultFx(t, res.clk, res.ctx.Log, res.isBootstrapped.Get()) rewardsCalc := reward.NewCalculator(res.config.RewardConfig) - res.state = defaultState(res.config, res.ctx, res.baseDB, rewardsCalc) + res.state = defaultState(t, res.config, res.ctx, res.baseDB, rewardsCalc) res.atomicUTXOs = avax.NewAtomicUTXOManager(res.ctx.SharedMemory, txs.Codec) res.uptimes = uptime.NewManager(res.state) @@ -165,14 +168,11 @@ func newEnvironment(t *testing.T) *environment { res.sender = &common.SenderTest{T: t} metrics, err := metrics.New("", registerer, res.config.TrackedSubnets) - if err != nil { - panic(fmt.Errorf("failed to create metrics: %w", err)) - } + require.NoError(err) res.mempool, err = mempool.NewMempool("mempool", registerer, res) - if err != nil { - panic(fmt.Errorf("failed to create mempool: %w", err)) - } + require.NoError(err) + res.blkManager = blockexecutor.NewManager( res.mempool, metrics, @@ -191,12 +191,14 @@ func newEnvironment(t *testing.T) *environment { ) res.Builder.SetPreference(genesisID) - addSubnet(res) + addSubnet(t, res) return res } -func addSubnet(env *environment) { +func addSubnet(t *testing.T, env *environment) { + require := require.New(t) + // Create a subnet var err error testSubnet1, err = env.txBuilder.NewCreateSubnetTx( @@ -209,40 +211,34 @@ func addSubnet(env *environment) { []*secp256k1.PrivateKey{preFundedKeys[0]}, preFundedKeys[0].PublicKey().Address(), ) - if err != nil { - panic(err) - } + require.NoError(err) // store it genesisID := env.state.GetLastAccepted() stateDiff, err := state.NewDiff(genesisID, env.blkManager) - if err != nil { - panic(err) - } + require.NoError(err) executor := txexecutor.StandardTxExecutor{ Backend: &env.backend, State: stateDiff, Tx: testSubnet1, } - err = testSubnet1.Unsigned.Visit(&executor) - if err != nil { - panic(err) - } + require.NoError(testSubnet1.Unsigned.Visit(&executor)) stateDiff.AddTx(testSubnet1, status.Committed) - if err := stateDiff.Apply(env.state); err != nil { - panic(err) - } + require.NoError(stateDiff.Apply(env.state)) } func defaultState( + t *testing.T, cfg *config.Config, ctx *snow.Context, db database.Database, rewards reward.Calculator, ) state.State { - genesisBytes := buildGenesisTest(ctx) + require := require.New(t) + + genesisBytes := buildGenesisTest(t, ctx) state, err := state.New( db, genesisBytes, @@ -253,19 +249,13 @@ func defaultState( rewards, &utils.Atomic[bool]{}, ) - if err != nil { - panic(err) - } + require.NoError(err) // persist and reload to init a bunch of in-memory stuff state.SetHeight(0) - if err := state.Commit(); err != nil { - panic(err) - } + require.NoError(state.Commit()) state.SetHeight( /*height*/ 0) - if err := state.Commit(); err != nil { - panic(err) - } + require.NoError(state.Commit()) return state } @@ -355,33 +345,31 @@ func (fvi *fxVMInt) Logger() logging.Logger { return fvi.log } -func defaultFx(clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { +func defaultFx(t *testing.T, clk *mockable.Clock, log logging.Logger, isBootstrapped bool) fx.Fx { + require := require.New(t) + fxVMInt := &fxVMInt{ registry: linearcodec.NewDefault(), clk: clk, log: log, } res := &secp256k1fx.Fx{} - if err := res.Initialize(fxVMInt); err != nil { - panic(err) - } + require.NoError(res.Initialize(fxVMInt)) if isBootstrapped { - if err := res.Bootstrapped(); err != nil { - panic(err) - } + require.NoError(res.Bootstrapped()) } return res } -func buildGenesisTest(ctx *snow.Context) []byte { +func buildGenesisTest(t *testing.T, ctx *snow.Context) []byte { + require := require.New(t) + genesisUTXOs := make([]api.UTXO, len(preFundedKeys)) hrp := constants.NetworkIDToHRP[testNetworkID] for i, key := range preFundedKeys { id := key.PublicKey().Address() addr, err := address.FormatBech32(hrp, id.Bytes()) - if err != nil { - panic(err) - } + require.NoError(err) genesisUTXOs[i] = api.UTXO{ Amount: json.Uint64(defaultBalance), Address: addr, @@ -392,9 +380,7 @@ func buildGenesisTest(ctx *snow.Context) []byte { for i, key := range preFundedKeys { nodeID := ids.NodeID(key.PublicKey().Address()) addr, err := address.FormatBech32(hrp, nodeID.Bytes()) - if err != nil { - panic(err) - } + require.NoError(err) genesisValidators[i] = api.PermissionlessValidator{ Staker: api.Staker{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), @@ -426,14 +412,10 @@ func buildGenesisTest(ctx *snow.Context) []byte { buildGenesisResponse := api.BuildGenesisReply{} platformvmSS := api.StaticService{} - if err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse); err != nil { - panic(fmt.Errorf("problem while building platform chain's genesis state: %w", err)) - } + require.NoError(platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse)) genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) - if err != nil { - panic(err) - } + require.NoError(err) return genesisBytes } diff --git a/vms/platformvm/blocks/executor/block_test.go b/vms/platformvm/blocks/executor/block_test.go index d27e7e3d04e..b8cc6eb40fe 100644 --- a/vms/platformvm/blocks/executor/block_test.go +++ b/vms/platformvm/blocks/executor/block_test.go @@ -246,8 +246,8 @@ func TestBlockOptions(t *testing.T) { blk := tt.blkF() options, err := blk.Options(context.Background()) + require.ErrorIs(err, tt.expectedErr) if tt.expectedErr != nil { - require.ErrorIs(err, tt.expectedErr) return } require.IsType(tt.expectedPreferenceType, options[0].(*Block).Block) diff --git a/vms/platformvm/health_test.go b/vms/platformvm/health_test.go index 40ef4142ccc..2e638ee6414 100644 --- a/vms/platformvm/health_test.go +++ b/vms/platformvm/health_test.go @@ -20,14 +20,14 @@ const defaultMinConnectedStake = 0.8 func TestHealthCheckPrimaryNetwork(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) vm.ctx.Lock.Unlock() }() - genesisState, _ := defaultGenesis() + genesisState, _ := defaultGenesis(t) for index, validator := range genesisState.Validators { require.NoError(vm.Connected(context.Background(), validator.NodeID, version.CurrentApp)) details, err := vm.HealthCheck(context.Background()) @@ -58,7 +58,7 @@ func TestHealthCheckSubnet(t *testing.T) { t.Run(name, func(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -77,7 +77,7 @@ func TestHealthCheckSubnet(t *testing.T) { require.True(ok) // connect to all primary network validators first - genesisState, _ := defaultGenesis() + genesisState, _ := defaultGenesis(t) for _, validator := range genesisState.Validators { require.NoError(vm.Connected(context.Background(), validator.NodeID, version.CurrentApp)) } diff --git a/vms/platformvm/reward/calculator_test.go b/vms/platformvm/reward/calculator_test.go index cb16435af9c..47e1d934927 100644 --- a/vms/platformvm/reward/calculator_test.go +++ b/vms/platformvm/reward/calculator_test.go @@ -29,6 +29,8 @@ var defaultConfig = Config{ } func TestLongerDurationBonus(t *testing.T) { + require := require.New(t) + c := NewCalculator(defaultConfig) shortDuration := 24 * time.Hour totalDuration := 365 * 24 * time.Hour @@ -42,7 +44,7 @@ func TestLongerDurationBonus(t *testing.T) { longBalance := units.KiloAvax longBalance += c.Calculate(totalDuration, longBalance, 359*units.MegaAvax+longBalance) - require.Less(t, shortBalance, longBalance, "should promote stakers to stake longer") + require.Less(shortBalance, longBalance, "should promote stakers to stake longer") } func TestRewards(t *testing.T) { @@ -134,6 +136,8 @@ func TestRewards(t *testing.T) { } func TestRewardsOverflow(t *testing.T) { + require := require.New(t) + var ( maxSupply uint64 = math.MaxUint64 initialSupply uint64 = 1 @@ -149,10 +153,12 @@ func TestRewardsOverflow(t *testing.T) { maxSupply, // The staked amount is larger than the current supply initialSupply, ) - require.Equal(t, maxSupply-initialSupply, reward) + require.Equal(maxSupply-initialSupply, reward) } func TestRewardsMint(t *testing.T) { + require := require.New(t) + var ( maxSupply uint64 = 1000 initialSupply uint64 = 1 @@ -168,5 +174,5 @@ func TestRewardsMint(t *testing.T) { maxSupply, // The staked amount is larger than the current supply initialSupply, ) - require.Equal(t, maxSupply-initialSupply, rewards) + require.Equal(maxSupply-initialSupply, rewards) } diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index bed97713562..6812b20dfa5 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -68,7 +68,7 @@ var ( ) func defaultService(t *testing.T) (*Service, *mutableSharedMemory) { - vm, _, mutableSharedMemory := defaultVM() + vm, _, mutableSharedMemory := defaultVM(t) vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() ks := keystore.New(logging.NoLog{}, manager.NewMemDB(version.Semantic1_0_0)) @@ -86,32 +86,38 @@ func defaultService(t *testing.T) (*Service, *mutableSharedMemory) { // Give user [testUsername] control of [testPrivateKey] and keys[0] (which is funded) func defaultAddress(t *testing.T, service *Service) { + require := require.New(t) + service.vm.ctx.Lock.Lock() defer service.vm.ctx.Lock.Unlock() user, err := vmkeystore.NewUserFromKeystore(service.vm.ctx.Keystore, testUsername, testPassword) - require.NoError(t, err) + require.NoError(err) pk, err := testKeyFactory.ToPrivateKey(testPrivateKey) - require.NoError(t, err) + require.NoError(err) - require.NoError(t, user.PutKeys(pk, keys[0])) + require.NoError(user.PutKeys(pk, keys[0])) } func TestAddValidator(t *testing.T) { + require := require.New(t) + expectedJSONString := `{"username":"","password":"","from":null,"changeAddr":"","txID":"11111111111111111111111111111111LpoYY","startTime":"0","endTime":"0","weight":"0","nodeID":"NodeID-111111111111111111116DBWJs","rewardAddress":"","delegationFeeRate":"0.0000"}` args := AddValidatorArgs{} bytes, err := stdjson.Marshal(&args) - require.NoError(t, err) - require.Equal(t, expectedJSONString, string(bytes)) + require.NoError(err) + require.Equal(expectedJSONString, string(bytes)) } func TestCreateBlockchainArgsParsing(t *testing.T) { + require := require.New(t) + jsonString := `{"vmID":"lol","fxIDs":["secp256k1"], "name":"awesome", "username":"bob loblaw", "password":"yeet", "genesisData":"SkB92YpWm4Q2iPnLGCuDPZPgUQMxajqQQuz91oi3xD984f8r"}` args := CreateBlockchainArgs{} - require.NoError(t, stdjson.Unmarshal([]byte(jsonString), &args)) + require.NoError(stdjson.Unmarshal([]byte(jsonString), &args)) _, err := stdjson.Marshal(args.GenesisData) - require.NoError(t, err) + require.NoError(err) } func TestExportKey(t *testing.T) { @@ -334,9 +340,7 @@ func TestGetTx(t *testing.T) { commit := options[0].(*blockexecutor.Block) require.IsType(&blocks.BanffCommitBlock{}, commit.Block) - require.NoError(commit.Verify(context.Background())) - require.NoError(commit.Accept(context.Background())) } } @@ -373,7 +377,7 @@ func TestGetBalance(t *testing.T) { }() // Ensure GetStake is correct for each of the genesis validators - genesis, _ := defaultGenesis() + genesis, _ := defaultGenesis(t) for _, utxo := range genesis.UTXOs { request := GetBalanceRequest{ Addresses: []string{ @@ -402,7 +406,7 @@ func TestGetStake(t *testing.T) { }() // Ensure GetStake is correct for each of the genesis validators - genesis, _ := defaultGenesis() + genesis, _ := defaultGenesis(t) addrsStrs := []string{} for i, validator := range genesis.Validators { addr := fmt.Sprintf("P-%s", validator.RewardOwner.Addresses[0]) @@ -567,7 +571,7 @@ func TestGetCurrentValidators(t *testing.T) { service.vm.ctx.Lock.Unlock() }() - genesis, _ := defaultGenesis() + genesis, _ := defaultGenesis(t) // Call getValidators args := GetCurrentValidatorsArgs{SubnetID: constants.PrimaryNetworkID} diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index c6668ffa92c..0c6509c2fe5 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -472,47 +472,50 @@ func TestDiffUTXO(t *testing.T) { } func assertChainsEqual(t *testing.T, expected, actual Chain) { + require := require.New(t) + t.Helper() expectedCurrentStakerIterator, expectedErr := expected.GetCurrentStakerIterator() actualCurrentStakerIterator, actualErr := actual.GetCurrentStakerIterator() - require.ErrorIs(t, actualErr, expectedErr) + require.ErrorIs(actualErr, expectedErr) if expectedErr == nil { assertIteratorsEqual(t, expectedCurrentStakerIterator, actualCurrentStakerIterator) } expectedPendingStakerIterator, expectedErr := expected.GetPendingStakerIterator() actualPendingStakerIterator, actualErr := actual.GetPendingStakerIterator() - require.ErrorIs(t, actualErr, expectedErr) + require.ErrorIs(actualErr, expectedErr) if expectedErr == nil { assertIteratorsEqual(t, expectedPendingStakerIterator, actualPendingStakerIterator) } - require.Equal(t, expected.GetTimestamp(), actual.GetTimestamp()) + require.Equal(expected.GetTimestamp(), actual.GetTimestamp()) expectedCurrentSupply, err := expected.GetCurrentSupply(constants.PrimaryNetworkID) - require.NoError(t, err) + require.NoError(err) actualCurrentSupply, err := actual.GetCurrentSupply(constants.PrimaryNetworkID) - require.NoError(t, err) + require.NoError(err) - require.Equal(t, expectedCurrentSupply, actualCurrentSupply) + require.Equal(expectedCurrentSupply, actualCurrentSupply) expectedSubnets, expectedErr := expected.GetSubnets() actualSubnets, actualErr := actual.GetSubnets() - require.ErrorIs(t, actualErr, expectedErr) + require.ErrorIs(actualErr, expectedErr) if expectedErr == nil { - require.Equal(t, expectedSubnets, actualSubnets) + require.Equal(expectedSubnets, actualSubnets) for _, subnet := range expectedSubnets { subnetID := subnet.ID() expectedChains, expectedErr := expected.GetChains(subnetID) actualChains, actualErr := actual.GetChains(subnetID) - require.ErrorIs(t, actualErr, expectedErr) - if expectedErr == nil { - require.Equal(t, expectedChains, actualChains) + require.ErrorIs(actualErr, expectedErr) + if expectedErr != nil { + continue } + require.Equal(expectedChains, actualChains) } } } diff --git a/vms/platformvm/state/stakers_test.go b/vms/platformvm/state/stakers_test.go index 6b8c85eaa5d..9894d947965 100644 --- a/vms/platformvm/state/stakers_test.go +++ b/vms/platformvm/state/stakers_test.go @@ -235,17 +235,19 @@ func newTestStaker() *Staker { } func assertIteratorsEqual(t *testing.T, expected, actual StakerIterator) { + require := require.New(t) + t.Helper() for expected.Next() { - require.True(t, actual.Next()) + require.True(actual.Next()) expectedStaker := expected.Value() actualStaker := actual.Value() - require.Equal(t, expectedStaker, actualStaker) + require.Equal(expectedStaker, actualStaker) } - require.False(t, actual.Next()) + require.False(actual.Next()) expected.Release() actual.Release() diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index b54ed41c5f3..9e3a4dc0d09 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -649,9 +649,10 @@ func TestValidatorWeightDiff(t *testing.T) { errs.Add(op(diff)) } require.ErrorIs(errs.Err, tt.expectedErr) - if tt.expectedErr == nil { - require.Equal(tt.expected, diff) + if tt.expectedErr != nil { + return } + require.Equal(tt.expected, diff) }) } } diff --git a/vms/platformvm/state/validator_metadata_test.go b/vms/platformvm/state/validator_metadata_test.go index 15fa1964952..894083146a5 100644 --- a/vms/platformvm/state/validator_metadata_test.go +++ b/vms/platformvm/state/validator_metadata_test.go @@ -289,9 +289,10 @@ func TestParseValidatorMetadata(t *testing.T) { var metadata validatorMetadata err := parseValidatorMetadata(tt.bytes, &metadata) require.ErrorIs(err, tt.expectedErr) - if err == nil { - require.Equal(tt.expected, &metadata) + if tt.expectedErr != nil { + return } + require.Equal(tt.expected, &metadata) }) } } diff --git a/vms/platformvm/txs/base_tx_test.go b/vms/platformvm/txs/base_tx_test.go index 6e5b5ad75b7..a2c616b7cc4 100644 --- a/vms/platformvm/txs/base_tx_test.go +++ b/vms/platformvm/txs/base_tx_test.go @@ -14,6 +14,8 @@ import ( ) func TestBaseTxMarshalJSON(t *testing.T) { + require := require.New(t) + blockchainID := ids.ID{1} utxoTxID := ids.ID{2} assetID := ids.ID{3} @@ -40,12 +42,12 @@ func TestBaseTxMarshalJSON(t *testing.T) { }} txBytes, err := json.Marshal(tx) - require.NoError(t, err) + require.NoError(err) asString := string(txBytes) - require.Contains(t, asString, `"networkID":4`) - require.Contains(t, asString, `"blockchainID":"SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg"`) - require.Contains(t, asString, `"inputs":[{"txID":"t64jLxDRmxo8y48WjbRALPAZuSDZ6qPVaaeDzxHA4oSojhLt","outputIndex":5,"assetID":"2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU","fxID":"2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY","input":{"Err":null,"Val":100}}]`) - require.Contains(t, asString, `"outputs":[{"assetID":"2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU","fxID":"2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY","output":{"Err":null,"Val":100}}]`) + require.Contains(asString, `"networkID":4`) + require.Contains(asString, `"blockchainID":"SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg"`) + require.Contains(asString, `"inputs":[{"txID":"t64jLxDRmxo8y48WjbRALPAZuSDZ6qPVaaeDzxHA4oSojhLt","outputIndex":5,"assetID":"2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU","fxID":"2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY","input":{"Err":null,"Val":100}}]`) + require.Contains(asString, `"outputs":[{"assetID":"2KdbbWvpeAShCx5hGbtdF15FMMepq9kajsNTqVvvEbhiCRSxU","fxID":"2mB8TguRrYvbGw7G2UBqKfmL8osS7CfmzAAHSzuZK8bwpRKdY","output":{"Err":null,"Val":100}}]`) } diff --git a/vms/platformvm/txs/executor/advance_time_test.go b/vms/platformvm/txs/executor/advance_time_test.go index 4e658a623dc..bfe6d58fa08 100644 --- a/vms/platformvm/txs/executor/advance_time_test.go +++ b/vms/platformvm/txs/executor/advance_time_test.go @@ -25,7 +25,7 @@ import ( // for the primary network func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -83,7 +83,7 @@ func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { // Ensure semantic verification fails when proposed timestamp is at or before current timestamp func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) defer func() { require.NoError(shutdownEnvironment(env)) }() @@ -110,7 +110,7 @@ func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { // Ensure semantic verification fails when proposed timestamp is after next validator set change time func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() // Case: Timestamp is after next validator start time @@ -144,7 +144,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { require.NoError(shutdownEnvironment(env)) // Case: Timestamp is after next validator end time - env = newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env = newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -346,7 +346,7 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { for _, test := range tests { t.Run(test.description, func(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -448,7 +448,7 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { // is after the new timestamp func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -550,7 +550,7 @@ func TestTrackedSubnet(t *testing.T) { for _, tracked := range []bool{true, false} { t.Run(fmt.Sprintf("tracked %t", tracked), func(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -620,7 +620,7 @@ func TestTrackedSubnet(t *testing.T) { func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -729,7 +729,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -828,7 +828,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { // Test method InitiallyPrefersCommit func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -858,7 +858,7 @@ func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) { func TestAdvanceTimeTxAfterBanff(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -889,7 +889,7 @@ func TestAdvanceTimeTxAfterBanff(t *testing.T) { // Ensure marshaling/unmarshaling works func TestAdvanceTimeTxUnmarshal(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) diff --git a/vms/platformvm/txs/executor/create_chain_test.go b/vms/platformvm/txs/executor/create_chain_test.go index 97ccb8d6013..e2a3b8e0e3c 100644 --- a/vms/platformvm/txs/executor/create_chain_test.go +++ b/vms/platformvm/txs/executor/create_chain_test.go @@ -24,7 +24,7 @@ import ( // Ensure Execute fails when there are not enough control sigs func TestCreateChainTxInsufficientControlSigs(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -59,7 +59,7 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) { // Ensure Execute fails when an incorrect control signature is given func TestCreateChainTxWrongControlSig(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -102,7 +102,7 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { // its validator set doesn't exist func TestCreateChainTxNoSuchSubnet(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -136,7 +136,7 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) { // Ensure valid tx passes semanticVerify func TestCreateChainTxValid(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -195,7 +195,7 @@ func TestCreateChainTxAP3FeeChange(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) env.config.ApricotPhase3Time = ap3Time defer func() { diff --git a/vms/platformvm/txs/executor/create_subnet_test.go b/vms/platformvm/txs/executor/create_subnet_test.go index f348d7624b3..182e28ae83c 100644 --- a/vms/platformvm/txs/executor/create_subnet_test.go +++ b/vms/platformvm/txs/executor/create_subnet_test.go @@ -49,7 +49,7 @@ func TestCreateSubnetTxAP3FeeChange(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.config.ApricotPhase3Time = ap3Time env.ctx.Lock.Lock() defer func() { diff --git a/vms/platformvm/txs/executor/export_test.go b/vms/platformvm/txs/executor/export_test.go index 15f8b87fc37..af3f0618bda 100644 --- a/vms/platformvm/txs/executor/export_test.go +++ b/vms/platformvm/txs/executor/export_test.go @@ -17,7 +17,7 @@ import ( ) func TestNewExportTx(t *testing.T) { - env := newEnvironment(true /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(t, shutdownEnvironment(env)) diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index 1342c9e2630..d2bfe03f1f7 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -8,10 +8,13 @@ import ( "errors" "fmt" "math" + "testing" "time" "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/codec" @@ -112,7 +115,7 @@ func (e *environment) SetState(blkID ids.ID, chainState state.Chain) { e.states[blkID] = chainState } -func newEnvironment(postBanff, postCortina bool) *environment { +func newEnvironment(t *testing.T, postBanff, postCortina bool) *environment { var isBootstrapped utils.Atomic[bool] isBootstrapped.Set(true) @@ -170,15 +173,18 @@ func newEnvironment(postBanff, postCortina bool) *environment { backend: backend, } - addSubnet(env, txBuilder) + addSubnet(t, env, txBuilder) return env } func addSubnet( + t *testing.T, env *environment, txBuilder builder.Builder, ) { + require := require.New(t) + // Create a subnet var err error testSubnet1, err = txBuilder.NewCreateSubnetTx( @@ -191,30 +197,21 @@ func addSubnet( []*secp256k1.PrivateKey{preFundedKeys[0]}, preFundedKeys[0].PublicKey().Address(), ) - if err != nil { - panic(err) - } + require.NoError(err) // store it stateDiff, err := state.NewDiff(lastAcceptedID, env) - if err != nil { - panic(err) - } + require.NoError(err) executor := StandardTxExecutor{ Backend: &env.backend, State: stateDiff, Tx: testSubnet1, } - err = testSubnet1.Unsigned.Visit(&executor) - if err != nil { - panic(err) - } + require.NoError(testSubnet1.Unsigned.Visit(&executor)) stateDiff.AddTx(testSubnet1, status.Committed) - if err := stateDiff.Apply(env.state); err != nil { - panic(err) - } + require.NoError(stateDiff.Apply(env.state)) } func defaultState( diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index c4aac439c10..9cbe0a517ce 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -22,7 +22,7 @@ import ( ) func TestNewImportTx(t *testing.T) { - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) defer func() { require.NoError(t, shutdownEnvironment(env)) }() diff --git a/vms/platformvm/txs/executor/proposal_tx_executor_test.go b/vms/platformvm/txs/executor/proposal_tx_executor_test.go index f9f1fc333fc..c74be119785 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor_test.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor_test.go @@ -87,7 +87,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { require.NoError(t, target.state.Commit()) } - dummyH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + dummyH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) currentTimestamp := dummyH.state.GetTimestamp() type test struct { @@ -241,7 +241,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { require := require.New(t) - freshTH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + freshTH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) freshTH.config.ApricotPhase3Time = tt.AP3Time defer func() { require.NoError(shutdownEnvironment(freshTH)) @@ -282,7 +282,7 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -715,7 +715,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { func TestProposalTxExecuteAddValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) diff --git a/vms/platformvm/txs/executor/reward_validator_test.go b/vms/platformvm/txs/executor/reward_validator_test.go index 3e1ce2162ff..6081ec96a0e 100644 --- a/vms/platformvm/txs/executor/reward_validator_test.go +++ b/vms/platformvm/txs/executor/reward_validator_test.go @@ -25,7 +25,7 @@ import ( func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) defer func() { require.NoError(shutdownEnvironment(env)) }() @@ -128,7 +128,7 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) defer func() { require.NoError(shutdownEnvironment(env)) }() @@ -225,7 +225,7 @@ func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) defer func() { require.NoError(shutdownEnvironment(env)) }() @@ -349,7 +349,7 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, true /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, true /*=postCortina*/) defer func() { require.NoError(shutdownEnvironment(env)) }() @@ -569,7 +569,7 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { require := require.New(t) - env := newEnvironment(true /*=postBanff*/, true /*=postCortina*/) + env := newEnvironment(t, true /*=postBanff*/, true /*=postCortina*/) defer func() { require.NoError(shutdownEnvironment(env)) }() @@ -729,7 +729,7 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) defer func() { require.NoError(shutdownEnvironment(env)) }() diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index 4cfb1dbce12..086d8d1e7ab 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -41,7 +41,7 @@ var errTest = errors.New("non-nil error") func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -161,7 +161,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { require.NoError(t, target.state.Commit()) } - dummyH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + dummyH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) currentTimestamp := dummyH.state.GetTimestamp() type test struct { @@ -326,7 +326,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { for _, tt := range tests { t.Run(tt.description, func(t *testing.T) { require := require.New(t) - freshTH := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + freshTH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) freshTH.config.ApricotPhase3Time = tt.AP3Time defer func() { require.NoError(shutdownEnvironment(freshTH)) @@ -374,7 +374,7 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -802,7 +802,7 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { func TestStandardTxExecutorAddValidator(t *testing.T) { require := require.New(t) - env := newEnvironment(false /*=postBanff*/, false /*=postCortina*/) + env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/) env.ctx.Lock.Lock() defer func() { require.NoError(shutdownEnvironment(env)) @@ -975,6 +975,8 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { // Returns a RemoveSubnetValidatorTx that passes syntactic verification. func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *txs.Tx) { + require := require.New(t) + t.Helper() creds := []verify.Verifiable{ @@ -1029,7 +1031,7 @@ func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *tx Unsigned: unsignedTx, Creds: creds, } - require.NoError(t, tx.Initialize(txs.Codec)) + require.NoError(tx.Initialize(txs.Codec)) return unsignedTx, tx } @@ -1330,6 +1332,8 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { // Returns a TransformSubnetTx that passes syntactic verification. func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { + require := require.New(t) + t.Helper() creds := []verify.Verifiable{ @@ -1396,7 +1400,7 @@ func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { Unsigned: unsignedTx, Creds: creds, } - require.NoError(t, tx.Initialize(txs.Codec)) + require.NoError(tx.Initialize(txs.Codec)) return unsignedTx, tx } diff --git a/vms/platformvm/txs/remove_subnet_validator_tx_test.go b/vms/platformvm/txs/remove_subnet_validator_tx_test.go index 1b31e5f363e..59446f1031d 100644 --- a/vms/platformvm/txs/remove_subnet_validator_tx_test.go +++ b/vms/platformvm/txs/remove_subnet_validator_tx_test.go @@ -144,9 +144,10 @@ func TestRemoveSubnetValidatorTxSyntacticVerify(t *testing.T) { tx := tt.txFunc(ctrl) err := tx.SyntacticVerify(ctx) require.ErrorIs(err, tt.expectedErr) - if tt.expectedErr == nil { - require.True(tx.SyntacticallyVerified) + if tt.expectedErr != nil { + return } + require.True(tx.SyntacticallyVerified) }) } } diff --git a/vms/platformvm/utxo/handler_test.go b/vms/platformvm/utxo/handler_test.go index d11ec51b651..08784fe0e32 100644 --- a/vms/platformvm/utxo/handler_test.go +++ b/vms/platformvm/utxo/handler_test.go @@ -1089,7 +1089,6 @@ func TestVerifySpendUTXOs(t *testing.T) { h.clk.Set(now) t.Run(test.description, func(t *testing.T) { - require := require.New(t) err := h.VerifySpendUTXOs( &unsignedTx, test.utxos, @@ -1098,7 +1097,7 @@ func TestVerifySpendUTXOs(t *testing.T) { test.creds, test.producedAmounts, ) - require.ErrorIs(err, test.expectedErr) + require.ErrorIs(t, err, test.expectedErr) }) } } diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 6a81de2d634..2b01bb5f69d 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -40,7 +40,7 @@ import ( func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -200,7 +200,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ApricotPhase3Time = test.ap3Time vm.ctx.Lock.Lock() @@ -334,7 +334,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { // panic. func TestUnverifiedParentPanicRegression(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() + _, genesisBytes := defaultGenesis(t) baseDBManager := manager.NewMemDB(version.Semantic1_0_0) atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) @@ -352,7 +352,7 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { BanffTime: banffForkTime, }} - ctx := defaultContext() + ctx := defaultContext(t) ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -461,7 +461,7 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM() + vm, baseDB, mutableSharedMemory := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -676,7 +676,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM() + vm, baseDB, mutableSharedMemory := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -994,7 +994,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -1140,7 +1140,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator2EndTime := delegator2StartTime.Add(3 * defaultMinStakingDuration) delegator2Stake := defaultMaxValidatorStake - validatorStake - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { @@ -1224,7 +1224,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t validatorStartTime := banffForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { @@ -1341,7 +1341,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t validatorStartTime := banffForkTime.Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 5e10d73857c..a820735ca01 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -7,7 +7,6 @@ import ( "bytes" "context" "errors" - "fmt" "testing" "time" @@ -53,7 +52,6 @@ import ( "github.com/ava-labs/avalanchego/utils/timer" "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/utils/units" - "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/api" @@ -136,7 +134,9 @@ type mutableSharedMemory struct { atomic.SharedMemory } -func defaultContext() *snow.Context { +func defaultContext(t *testing.T) *snow.Context { + require := require.New(t) + ctx := snow.DefaultContextTest() ctx.NetworkID = testNetworkID ctx.XChainID = xChainID @@ -144,18 +144,13 @@ func defaultContext() *snow.Context { ctx.AVAXAssetID = avaxAssetID aliaser := ids.NewAliaser() - errs := wrappers.Errs{} - errs.Add( - aliaser.Alias(constants.PlatformChainID, "P"), - aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String()), - aliaser.Alias(xChainID, "X"), - aliaser.Alias(xChainID, xChainID.String()), - aliaser.Alias(cChainID, "C"), - aliaser.Alias(cChainID, cChainID.String()), - ) - if errs.Errored() { - panic(errs.Err) - } + require.NoError(aliaser.Alias(constants.PlatformChainID, "P")) + require.NoError(aliaser.Alias(constants.PlatformChainID, constants.PlatformChainID.String())) + require.NoError(aliaser.Alias(xChainID, "X")) + require.NoError(aliaser.Alias(xChainID, xChainID.String())) + require.NoError(aliaser.Alias(cChainID, "C")) + require.NoError(aliaser.Alias(cChainID, cChainID.String())) + ctx.BCLookup = aliaser ctx.ValidatorState = &validators.TestState{ @@ -177,15 +172,15 @@ func defaultContext() *snow.Context { // Returns: // 1) The genesis state // 2) The byte representation of the default genesis for tests -func defaultGenesis() (*api.BuildGenesisArgs, []byte) { +func defaultGenesis(t *testing.T) (*api.BuildGenesisArgs, []byte) { + require := require.New(t) + genesisUTXOs := make([]api.UTXO, len(keys)) hrp := constants.NetworkIDToHRP[testNetworkID] for i, key := range keys { id := key.PublicKey().Address() addr, err := address.FormatBech32(hrp, id.Bytes()) - if err != nil { - panic(err) - } + require.NoError(err) genesisUTXOs[i] = api.UTXO{ Amount: json.Uint64(defaultBalance), Address: addr, @@ -196,9 +191,7 @@ func defaultGenesis() (*api.BuildGenesisArgs, []byte) { for i, key := range keys { nodeID := ids.NodeID(key.PublicKey().Address()) addr, err := address.FormatBech32(hrp, nodeID.Bytes()) - if err != nil { - panic(err) - } + require.NoError(err) genesisValidators[i] = api.PermissionlessValidator{ Staker: api.Staker{ StartTime: json.Uint64(defaultValidateStartTime.Unix()), @@ -230,14 +223,10 @@ func defaultGenesis() (*api.BuildGenesisArgs, []byte) { buildGenesisResponse := api.BuildGenesisReply{} platformvmSS := api.StaticService{} - if err := platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse); err != nil { - panic(fmt.Errorf("problem while building platform chain's genesis state: %w", err)) - } + require.NoError(platformvmSS.BuildGenesis(nil, &buildGenesisArgs, &buildGenesisResponse)) genesisBytes, err := formatting.Decode(buildGenesisResponse.Encoding, buildGenesisResponse.Bytes) - if err != nil { - panic(err) - } + require.NoError(err) return &buildGenesisArgs, genesisBytes } @@ -316,7 +305,9 @@ func BuildGenesisTestWithArgs(t *testing.T, args *api.BuildGenesisArgs) (*api.Bu return &buildGenesisArgs, genesisBytes } -func defaultVM() (*VM, database.Database, *mutableSharedMemory) { +func defaultVM(t *testing.T) (*VM, database.Database, *mutableSharedMemory) { + require := require.New(t) + vdrs := validators.NewManager() primaryVdrs := validators.NewSet() _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) @@ -346,7 +337,7 @@ func defaultVM() (*VM, database.Database, *mutableSharedMemory) { vm.clock.Set(banffForkTime.Add(time.Second)) msgChan := make(chan common.Message, 1) - ctx := defaultContext() + ctx := defaultContext(t) m := atomic.NewMemory(atomicDB) msm := &mutableSharedMemory{ @@ -356,14 +347,14 @@ func defaultVM() (*VM, database.Database, *mutableSharedMemory) { ctx.Lock.Lock() defer ctx.Lock.Unlock() - _, genesisBytes := defaultGenesis() + _, genesisBytes := defaultGenesis(t) appSender := &common.SenderTest{} appSender.CantSendAppGossip = true appSender.SendAppGossipF = func(context.Context, []byte) error { return nil } - err := vm.Initialize( + require.NoError(vm.Initialize( context.Background(), ctx, chainDBManager, @@ -373,19 +364,14 @@ func defaultVM() (*VM, database.Database, *mutableSharedMemory) { msgChan, nil, appSender, - ) - if err != nil { - panic(err) - } + )) - err = vm.SetState(context.Background(), snow.NormalOp) - if err != nil { - panic(err) - } + require.NoError(vm.SetState(context.Background(), snow.NormalOp)) // Create a subnet and store it in testSubnet1 // Note: following Banff activation, block acceptance will move // chain time ahead + var err error testSubnet1, err = vm.txBuilder.NewCreateSubnetTx( 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet // control keys are keys[0], keys[1], keys[2] @@ -393,19 +379,13 @@ func defaultVM() (*VM, database.Database, *mutableSharedMemory) { []*secp256k1.PrivateKey{keys[0]}, // pays tx fee keys[0].PublicKey().Address(), // change addr ) - if err != nil { - panic(err) - } else if err := vm.Builder.AddUnverifiedTx(testSubnet1); err != nil { - panic(err) - } else if blk, err := vm.Builder.BuildBlock(context.Background()); err != nil { - panic(err) - } else if err := blk.Verify(context.Background()); err != nil { - panic(err) - } else if err := blk.Accept(context.Background()); err != nil { - panic(err) - } else if err := vm.SetPreference(context.Background(), vm.manager.LastAccepted()); err != nil { - panic(err) - } + require.NoError(err) + require.NoError(vm.Builder.AddUnverifiedTx(testSubnet1)) + blk, err := vm.Builder.BuildBlock(context.Background()) + require.NoError(err) + require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Accept(context.Background())) + require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) return vm, baseDBManager.Current().Database, msm } @@ -446,7 +426,7 @@ func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan c vm.clock.Set(defaultGenesisTime) msgChan := make(chan common.Message, 1) - ctx := defaultContext() + ctx := defaultContext(t) m := atomic.NewMemory(atomicDB) @@ -498,7 +478,7 @@ func GenesisVMWithArgs(t *testing.T, args *api.BuildGenesisArgs) ([]byte, chan c // Ensure genesis state is parsed from bytes and stored correctly func TestGenesis(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -513,7 +493,7 @@ func TestGenesis(t *testing.T) { require.NoError(err) require.Equal(choices.Accepted, genesisBlock.Status()) - genesisState, _ := defaultGenesis() + genesisState, _ := defaultGenesis(t) // Ensure all the genesis UTXOs are there for _, utxo := range genesisState.UTXOs { _, addrBytes, err := address.ParseBech32(utxo.Address) @@ -560,7 +540,7 @@ func TestGenesis(t *testing.T) { // accept proposal to add validator to primary network func TestAddValidatorCommit(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -606,7 +586,7 @@ func TestAddValidatorCommit(t *testing.T) { // verify invalid attempt to add validator to primary network func TestInvalidAddValidatorCommit(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -660,7 +640,7 @@ func TestInvalidAddValidatorCommit(t *testing.T) { // Reject attempt to add validator to primary network func TestAddValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -704,7 +684,7 @@ func TestAddValidatorReject(t *testing.T) { // Reject proposal to add validator to primary network func TestAddValidatorInvalidNotReissued(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -738,7 +718,7 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { // Accept proposal to add validator to subnet func TestAddSubnetValidatorAccept(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -784,7 +764,7 @@ func TestAddSubnetValidatorAccept(t *testing.T) { // Reject proposal to add validator to subnet func TestAddSubnetValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -829,7 +809,7 @@ func TestAddSubnetValidatorReject(t *testing.T) { // Test case where primary network validator rewarded func TestRewardValidatorAccept(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -924,7 +904,7 @@ func TestRewardValidatorAccept(t *testing.T) { // Test case where primary network validator not rewarded func TestRewardValidatorReject(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -1015,7 +995,7 @@ func TestRewardValidatorReject(t *testing.T) { // Test case where primary network validator is preferred to be rewarded func TestRewardValidatorPreferred(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -1107,7 +1087,7 @@ func TestRewardValidatorPreferred(t *testing.T) { // Ensure BuildBlock errors when there is no block to build func TestUnneededBuildBlock(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -1120,7 +1100,7 @@ func TestUnneededBuildBlock(t *testing.T) { // test acceptance of proposal to create a new chain func TestCreateChain(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -1171,7 +1151,7 @@ func TestCreateChain(t *testing.T) { // 4) Advance timestamp to validator's end time (removing validator from current) func TestCreateSubnet(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -1282,7 +1262,7 @@ func TestCreateSubnet(t *testing.T) { // test asset import func TestAtomicImport(t *testing.T) { require := require.New(t) - vm, baseDB, mutableSharedMemory := defaultVM() + vm, baseDB, mutableSharedMemory := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -1370,7 +1350,7 @@ func TestAtomicImport(t *testing.T) { // test optimistic asset import func TestOptimisticAtomicImport(t *testing.T) { require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -1431,7 +1411,7 @@ func TestOptimisticAtomicImport(t *testing.T) { // test restarting the node func TestRestartFullyAccepted(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() + _, genesisBytes := defaultGenesis(t) db := manager.NewMemDB(version.Semantic1_0_0) firstDB := db.NewPrefixDBManager([]byte{}) @@ -1448,7 +1428,7 @@ func TestRestartFullyAccepted(t *testing.T) { BanffTime: banffForkTime, }} - firstCtx := defaultContext() + firstCtx := defaultContext(t) baseDBManager := manager.NewMemDB(version.Semantic1_0_0) atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) @@ -1536,7 +1516,7 @@ func TestRestartFullyAccepted(t *testing.T) { BanffTime: banffForkTime, }} - secondCtx := defaultContext() + secondCtx := defaultContext(t) secondCtx.SharedMemory = msm secondVM.clock.Set(initialClkTime) secondCtx.Lock.Lock() @@ -1568,7 +1548,7 @@ func TestRestartFullyAccepted(t *testing.T) { func TestBootstrapPartiallyAccepted(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() + _, genesisBytes := defaultGenesis(t) baseDBManager := manager.NewMemDB(version.Semantic1_0_0) vmDBManager := baseDBManager.NewPrefixDBManager([]byte("vm")) @@ -1592,7 +1572,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { initialClkTime := banffForkTime.Add(time.Second) vm.clock.Set(initialClkTime) - ctx := defaultContext() + ctx := defaultContext(t) atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) m := atomic.NewMemory(atomicDB) @@ -1893,7 +1873,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { func TestUnverifiedParent(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() + _, genesisBytes := defaultGenesis(t) dbManager := manager.NewMemDB(version.Semantic1_0_0) vdrs := validators.NewManager() @@ -1911,7 +1891,7 @@ func TestUnverifiedParent(t *testing.T) { initialClkTime := banffForkTime.Add(time.Second) vm.clock.Set(initialClkTime) - ctx := defaultContext() + ctx := defaultContext(t) ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -2002,7 +1982,7 @@ func TestUnverifiedParent(t *testing.T) { } func TestMaxStakeAmount(t *testing.T) { - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(t, vm.Shutdown(context.Background())) @@ -2053,7 +2033,7 @@ func TestMaxStakeAmount(t *testing.T) { func TestUptimeDisallowedWithRestart(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() + _, genesisBytes := defaultGenesis(t) db := manager.NewMemDB(version.Semantic1_0_0) firstDB := db.NewPrefixDBManager([]byte{}) @@ -2069,7 +2049,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { BanffTime: banffForkTime, }} - firstCtx := defaultContext() + firstCtx := defaultContext(t) firstCtx.Lock.Lock() firstMsgChan := make(chan common.Message, 1) @@ -2110,7 +2090,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { BanffTime: banffForkTime, }} - secondCtx := defaultContext() + secondCtx := defaultContext(t) secondCtx.Lock.Lock() defer func() { require.NoError(secondVM.Shutdown(context.Background())) @@ -2227,7 +2207,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { require := require.New(t) - _, genesisBytes := defaultGenesis() + _, genesisBytes := defaultGenesis(t) db := manager.NewMemDB(version.Semantic1_0_0) vdrs := validators.NewManager() @@ -2242,7 +2222,7 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { BanffTime: banffForkTime, }} - ctx := defaultContext() + ctx := defaultContext(t) ctx.Lock.Lock() msgChan := make(chan common.Message, 1) @@ -2335,7 +2315,7 @@ func TestVM_GetValidatorSet(t *testing.T) { defer ctrl.Finish() // Setup VM - _, genesisBytes := defaultGenesis() + _, genesisBytes := defaultGenesis(t) db := manager.NewMemDB(version.Semantic1_0_0) vdrManager := validators.NewManager() @@ -2351,7 +2331,7 @@ func TestVM_GetValidatorSet(t *testing.T) { BanffTime: mockable.MaxTime, }} - ctx := defaultContext() + ctx := defaultContext(t) ctx.Lock.Lock() msgChan := make(chan common.Message, 1) @@ -2772,7 +2752,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { validatorStartTime := banffForkTime.Add(txexecutor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { diff --git a/vms/platformvm/warp/gwarp/signer_test.go b/vms/platformvm/warp/gwarp/signer_test.go index ec443415697..3876b40966a 100644 --- a/vms/platformvm/warp/gwarp/signer_test.go +++ b/vms/platformvm/warp/gwarp/signer_test.go @@ -39,9 +39,7 @@ func setupSigner(t testing.TB) *testSigner { } listener, err := grpcutils.NewListener() - if err != nil { - t.Fatalf("Failed to create listener: %s", err) - } + require.NoError(err) serverCloser := grpcutils.ServerCloser{} server := grpcutils.NewServer() diff --git a/vms/platformvm/warp/validator_test.go b/vms/platformvm/warp/validator_test.go index 1631f50d312..fb61129d0b5 100644 --- a/vms/platformvm/warp/validator_test.go +++ b/vms/platformvm/warp/validator_test.go @@ -245,9 +245,10 @@ func TestFilterValidators(t *testing.T) { vdrs, err := FilterValidators(tt.indices, tt.vdrs) require.ErrorIs(err, tt.expectedErr) - if err == nil { - require.Equal(tt.expectedVdrs, vdrs) + if tt.expectedErr != nil { + return } + require.Equal(tt.expectedVdrs, vdrs) }) } } @@ -299,9 +300,10 @@ func TestSumWeight(t *testing.T) { sum, err := SumWeight(tt.vdrs) require.ErrorIs(err, tt.expectedErr) - if err == nil { - require.Equal(tt.expectedSum, sum) + if tt.expectedErr != nil { + return } + require.Equal(tt.expectedSum, sum) }) } } diff --git a/vms/propertyfx/burn_operation_test.go b/vms/propertyfx/burn_operation_test.go index 0b5715ea03c..e9e9735efd3 100644 --- a/vms/propertyfx/burn_operation_test.go +++ b/vms/propertyfx/burn_operation_test.go @@ -6,6 +6,8 @@ package propertyfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) @@ -14,21 +16,17 @@ func TestBurnOperationInvalid(t *testing.T) { op := BurnOperation{Input: secp256k1fx.Input{ SigIndices: []uint32{1, 0}, }} - if err := op.Verify(); err == nil { - t.Fatalf("operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, secp256k1fx.ErrInputIndicesNotSortedUnique) } func TestBurnOperationNumberOfOutput(t *testing.T) { op := BurnOperation{} - if outs := op.Outs(); len(outs) != 0 { - t.Fatalf("wrong number of outputs") - } + require.Empty(t, op.Outs()) } func TestBurnOperationState(t *testing.T) { intf := interface{}(&BurnOperation{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/vms/propertyfx/credential_test.go b/vms/propertyfx/credential_test.go index d03d5b2b156..4be34acd324 100644 --- a/vms/propertyfx/credential_test.go +++ b/vms/propertyfx/credential_test.go @@ -6,12 +6,13 @@ package propertyfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" ) func TestCredentialState(t *testing.T) { intf := interface{}(&Credential{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/vms/propertyfx/factory_test.go b/vms/propertyfx/factory_test.go index 25dc8935754..ec921aef3f6 100644 --- a/vms/propertyfx/factory_test.go +++ b/vms/propertyfx/factory_test.go @@ -6,14 +6,16 @@ package propertyfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { + require := require.New(t) + factory := Factory{} - if fx, err := factory.New(logging.NoLog{}); err != nil { - t.Fatal(err) - } else if fx == nil { - t.Fatalf("Factory.New returned nil") - } + fx, err := factory.New(logging.NoLog{}) + require.NoError(err) + require.NotNil(fx) } diff --git a/vms/propertyfx/fx_test.go b/vms/propertyfx/fx_test.go index f46602ab468..fdab69bb5bf 100644 --- a/vms/propertyfx/fx_test.go +++ b/vms/propertyfx/fx_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/codec/linearcodec" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" @@ -41,21 +43,18 @@ func TestFxInitialize(t *testing.T) { Log: logging.NoLog{}, } fx := Fx{} - err := fx.Initialize(&vm) - if err != nil { - t.Fatal(err) - } + require.NoError(t, fx.Initialize(&vm)) } func TestFxInitializeInvalid(t *testing.T) { fx := Fx{} err := fx.Initialize(nil) - if err == nil { - t.Fatalf("Should have returned an error") - } + require.ErrorIs(t, err, secp256k1fx.ErrWrongVMType) } func TestFxVerifyMintOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -64,9 +63,7 @@ func TestFxVerifyMintOperation(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -94,12 +91,12 @@ func TestFxVerifyMintOperation(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { - t.Fatal(err) - } + require.NoError(fx.VerifyOperation(tx, op, cred, utxos)) } func TestFxVerifyMintOperationWrongTx(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -108,9 +105,7 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) cred := &Credential{Credential: secp256k1fx.Credential{ Sigs: [][secp256k1.SignatureLen]byte{ sigBytes, @@ -129,12 +124,13 @@ func TestFxVerifyMintOperationWrongTx(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(nil, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid tx") - } + err := fx.VerifyOperation(nil, op, cred, utxos) + require.ErrorIs(err, errWrongTxType) } func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -143,9 +139,7 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -161,12 +155,13 @@ func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { } utxos := []interface{}{} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to not enough utxos") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongNumberOfUTXOs) } func TestFxVerifyMintOperationWrongCredential(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -175,9 +170,7 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -194,12 +187,13 @@ func TestFxVerifyMintOperationWrongCredential(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, nil, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to a bad credential") - } + err := fx.VerifyOperation(tx, op, nil, utxos) + require.ErrorIs(err, errWrongCredentialType) } func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -208,9 +202,7 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -226,12 +218,13 @@ func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { } utxos := []interface{}{nil} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUTXOType) } func TestFxVerifyMintOperationFailingVerification(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -240,9 +233,7 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -265,12 +256,13 @@ func TestFxVerifyMintOperationFailingVerification(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, secp256k1fx.ErrAddrsNotSortedUnique) } func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -279,9 +271,7 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -303,12 +293,13 @@ func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { } utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid mint output") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongMintOutput) } func TestFxVerifyTransferOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -317,9 +308,7 @@ func TestFxVerifyTransferOperation(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -339,12 +328,12 @@ func TestFxVerifyTransferOperation(t *testing.T) { }} utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { - t.Fatal(err) - } + require.NoError(fx.VerifyOperation(tx, op, cred, utxos)) } func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -353,9 +342,7 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -369,12 +356,13 @@ func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { }} utxos := []interface{}{nil} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, errWrongUTXOType) } func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -383,9 +371,7 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -405,12 +391,13 @@ func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { }} utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") - } + err := fx.VerifyOperation(tx, op, cred, utxos) + require.ErrorIs(err, secp256k1fx.ErrInputIndicesNotSortedUnique) } func TestFxVerifyOperationUnknownOperation(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -419,9 +406,7 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } + require.NoError(fx.Initialize(&vm)) tx := &secp256k1fx.TestTx{ UnsignedBytes: txBytes, } @@ -438,12 +423,13 @@ func TestFxVerifyOperationUnknownOperation(t *testing.T) { }} utxos := []interface{}{utxo} - if err := fx.VerifyOperation(tx, nil, cred, utxos); err == nil { - t.Fatalf("VerifyOperation should have errored due to an unknown operation") - } + err := fx.VerifyOperation(tx, nil, cred, utxos) + require.ErrorIs(err, errWrongOperationType) } func TestFxVerifyTransfer(t *testing.T) { + require := require.New(t) + vm := secp256k1fx.TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, @@ -452,10 +438,7 @@ func TestFxVerifyTransfer(t *testing.T) { vm.Clk.Set(date) fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - if err := fx.VerifyTransfer(nil, nil, nil, nil); err == nil { - t.Fatalf("this Fx doesn't support transfers") - } + require.NoError(fx.Initialize(&vm)) + err := fx.VerifyTransfer(nil, nil, nil, nil) + require.ErrorIs(err, errCantTransfer) } diff --git a/vms/propertyfx/mint_operation_test.go b/vms/propertyfx/mint_operation_test.go index 80e5cc2417b..138d989d329 100644 --- a/vms/propertyfx/mint_operation_test.go +++ b/vms/propertyfx/mint_operation_test.go @@ -6,15 +6,16 @@ package propertyfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestMintOperationVerifyNil(t *testing.T) { op := (*MintOperation)(nil) - if err := op.Verify(); err == nil { - t.Fatalf("nil operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, errNilMintOperation) } func TestMintOperationVerifyInvalidOutput(t *testing.T) { @@ -25,21 +26,17 @@ func TestMintOperationVerifyInvalidOutput(t *testing.T) { }, }, } - if err := op.Verify(); err == nil { - t.Fatalf("operation should have failed verification") - } + err := op.Verify() + require.ErrorIs(t, err, secp256k1fx.ErrOutputUnspendable) } func TestMintOperationOuts(t *testing.T) { op := MintOperation{} - if outs := op.Outs(); len(outs) != 2 { - t.Fatalf("Wrong number of outputs returned") - } + require.Len(t, op.Outs(), 2) } func TestMintOperationState(t *testing.T) { intf := interface{}(&MintOperation{}) - if _, ok := intf.(verify.State); ok { - t.Fatalf("shouldn't be marked as state") - } + _, ok := intf.(verify.State) + require.False(t, ok) } diff --git a/vms/propertyfx/mint_output_test.go b/vms/propertyfx/mint_output_test.go index 9e79f6a2dae..0b4b76c55f8 100644 --- a/vms/propertyfx/mint_output_test.go +++ b/vms/propertyfx/mint_output_test.go @@ -6,12 +6,13 @@ package propertyfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" ) func TestMintOutputState(t *testing.T) { intf := interface{}(&MintOutput{}) - if _, ok := intf.(verify.State); !ok { - t.Fatalf("should be marked as state") - } + _, ok := intf.(verify.State) + require.True(t, ok) } diff --git a/vms/propertyfx/owned_output_test.go b/vms/propertyfx/owned_output_test.go index c08c382f8a3..dbc7bea6369 100644 --- a/vms/propertyfx/owned_output_test.go +++ b/vms/propertyfx/owned_output_test.go @@ -6,12 +6,13 @@ package propertyfx import ( "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/vms/components/verify" ) func TestOwnedOutputState(t *testing.T) { intf := interface{}(&OwnedOutput{}) - if _, ok := intf.(verify.State); !ok { - t.Fatalf("should be marked as state") - } + _, ok := intf.(verify.State) + require.True(t, ok) } diff --git a/vms/proposervm/batched_vm_test.go b/vms/proposervm/batched_vm_test.go index 70e462cffed..64b3760d05f 100644 --- a/vms/proposervm/batched_vm_test.go +++ b/vms/proposervm/batched_vm_test.go @@ -95,7 +95,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) @@ -122,7 +122,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) // ...Call GetAncestors on them ... // Note: we assumed that if blkID is not known, that's NOT an error. @@ -160,8 +160,8 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { ) // ... and check returned values are as expected - require.NoError(err, "Error calling GetAncestors: %v", err) - require.Len(res, 3, "GetAncestor returned %v entries instead of %v", len(res), 3) + require.NoError(err) + require.Len(res, 3) require.Equal(res[0], builtBlk3.Bytes()) require.Equal(res[1], builtBlk2.Bytes()) require.Equal(res[2], builtBlk1.Bytes()) @@ -177,7 +177,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { ) require.NoError(err, "Error calling GetAncestors: %v", err) require.Len(res, 1, "GetAncestor returned %v entries instead of %v", len(res), 1) - require.Equal(res[0], builtBlk1.Bytes()) + require.Equal(builtBlk1.Bytes(), res[0]) // a faulty call reqBlkID = ids.Empty @@ -232,7 +232,7 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) // prepare build of next block require.NoError(builtBlk2.Verify(context.Background())) @@ -253,7 +253,7 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) require.NoError(builtBlk3.Verify(context.Background())) require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) @@ -326,7 +326,7 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { ) require.NoError(err, "Error calling GetAncestors: %v", err) require.Len(res, 1, "GetAncestor returned %v entries instead of %v", len(res), 1) - require.Equal(res[0], builtBlk1.Bytes()) + require.Equal(builtBlk1.Bytes(), res[0]) // a faulty call reqBlkID = ids.Empty @@ -394,7 +394,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) require.IsType(&preForkBlock{}, builtBlk2) // prepare build of next block @@ -424,7 +424,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) require.IsType(&postForkBlock{}, builtBlk3) // prepare build of next block @@ -446,7 +446,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { return coreBlk4, nil } builtBlk4, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) require.IsType(&postForkBlock{}, builtBlk4) require.NoError(builtBlk4.Verify(context.Background())) @@ -492,12 +492,12 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { ) // ... and check returned values are as expected - require.NoError(err, "Error calling GetAncestors") - require.Len(res, 4, "Wrong GetAncestor response") - require.Equal(res[0], builtBlk4.Bytes()) - require.Equal(res[1], builtBlk3.Bytes()) - require.Equal(res[2], builtBlk2.Bytes()) - require.Equal(res[3], builtBlk1.Bytes()) + require.NoError(err) + require.Len(res, 4) + require.Equal(builtBlk4.Bytes(), res[0]) + require.Equal(builtBlk3.Bytes(), res[1]) + require.Equal(builtBlk2.Bytes(), res[2]) + require.Equal(builtBlk1.Bytes(), res[3]) // Regression case: load some prefork and some postfork blocks. reqBlkID = builtBlk4.ID() @@ -513,9 +513,9 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { // ... and check returned values are as expected require.NoError(err, "Error calling GetAncestors") require.Len(res, 3, "Wrong GetAncestor response") - require.Equal(res[0], builtBlk4.Bytes()) - require.Equal(res[1], builtBlk3.Bytes()) - require.Equal(res[2], builtBlk2.Bytes()) + require.Equal(builtBlk4.Bytes(), res[0]) + require.Equal(builtBlk3.Bytes(), res[1]) + require.Equal(builtBlk2.Bytes(), res[2]) // another good call reqBlkID = builtBlk1.ID() @@ -528,7 +528,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { ) require.NoError(err, "Error calling GetAncestors") require.Len(res, 1, "Wrong GetAncestor response") - require.Equal(res[0], builtBlk1.Bytes()) + require.Equal(builtBlk1.Bytes(), res[0]) // a faulty call reqBlkID = ids.Empty @@ -589,7 +589,7 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) // prepare build of next block require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) @@ -616,7 +616,7 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { @@ -654,8 +654,8 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { builtBlk3.Bytes(), } res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) - require.NoError(err, "Error calling BatchedParseBlock: %v", err) - require.Len(res, 3, "BatchedParseBlock returned %v entries instead of %v", len(res), 3) + require.NoError(err) + require.Len(res, 3) require.Equal(res[0].ID(), builtBlk1.ID()) require.Equal(res[1].ID(), builtBlk2.ID()) require.Equal(res[2].ID(), builtBlk3.ID()) @@ -680,7 +680,7 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { return coreBlk1, nil } builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build preFork block") + require.NoError(err) // prepare build of next block require.NoError(builtBlk1.Verify(context.Background())) @@ -701,7 +701,7 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) // prepare build of next block require.NoError(builtBlk2.Verify(context.Background())) @@ -722,7 +722,7 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { @@ -760,8 +760,8 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { builtBlk3.Bytes(), } res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) - require.NoError(err, "Error calling BatchedParseBlock: %v", err) - require.Len(res, 3, "BatchedParseBlock returned %v entries instead of %v", len(res), 3) + require.NoError(err) + require.Len(res, 3) require.Equal(res[0].ID(), builtBlk1.ID()) require.Equal(res[1].ID(), builtBlk2.ID()) require.Equal(res[2].ID(), builtBlk3.ID()) @@ -820,7 +820,7 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { return coreBlk2, nil } builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) require.IsType(&preForkBlock{}, builtBlk2) // prepare build of next block @@ -850,7 +850,7 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { return coreBlk3, nil } builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) require.IsType(&postForkBlock{}, builtBlk3) // prepare build of next block @@ -872,7 +872,7 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { return coreBlk4, nil } builtBlk4, err := proRemoteVM.BuildBlock(context.Background()) - require.NoError(err, "Could not build proposer block") + require.NoError(err) require.IsType(&postForkBlock{}, builtBlk4) require.NoError(builtBlk4.Verify(context.Background())) @@ -918,8 +918,8 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { } res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) - require.NoError(err, "Error calling BatchedParseBlock: %v", err) - require.Len(res, 4, "BatchedParseBlock returned %v entries instead of %v", len(res), 4) + require.NoError(err) + require.Len(res, 4) require.Equal(res[0].ID(), builtBlk4.ID()) require.Equal(res[1].ID(), builtBlk3.ID()) require.Equal(res[2].ID(), builtBlk2.ID()) @@ -939,6 +939,8 @@ func initTestRemoteProposerVM( *VM, *snowman.TestBlock, ) { + require := require.New(t) + coreGenBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -1036,7 +1038,7 @@ func initTestRemoteProposerVM( dummyDBManager := manager.NewMemDB(version.Semantic1_0_0) // make sure that DBs are compressed correctly dummyDBManager = dummyDBManager.NewPrefixDBManager([]byte{}) - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, dummyDBManager, @@ -1046,21 +1048,14 @@ func initTestRemoteProposerVM( nil, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) // Initialize shouldn't be called again coreVM.InitializeF = nil - if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) return coreVM, proVM, coreGenBlk } diff --git a/vms/proposervm/indexer/block_server_test.go b/vms/proposervm/indexer/block_server_test.go index 5bf742581c5..e132926c811 100644 --- a/vms/proposervm/indexer/block_server_test.go +++ b/vms/proposervm/indexer/block_server_test.go @@ -8,6 +8,8 @@ import ( "errors" "testing" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/consensus/snowman" ) @@ -35,7 +37,7 @@ func (tsb *TestBlockServer) GetFullPostForkBlock(ctx context.Context, blkID ids. return tsb.GetFullPostForkBlockF(ctx, blkID) } if tsb.CantGetFullPostForkBlock && tsb.T != nil { - tsb.T.Fatal(errGetWrappingBlk) + require.FailNow(tsb.T, errGetWrappingBlk.Error()) } return nil, errGetWrappingBlk } @@ -45,7 +47,7 @@ func (tsb *TestBlockServer) Commit() error { return tsb.CommitF() } if tsb.CantCommit && tsb.T != nil { - tsb.T.Fatal(errCommit) + require.FailNow(tsb.T, errCommit.Error()) } return errCommit } diff --git a/vms/proposervm/post_fork_block_test.go b/vms/proposervm/post_fork_block_test.go index 441fb79979c..911e16e32de 100644 --- a/vms/proposervm/post_fork_block_test.go +++ b/vms/proposervm/post_fork_block_test.go @@ -24,6 +24,8 @@ var errDuplicateVerify = errors.New("duplicate verify") // ProposerBlock Option interface tests section func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { + require := require.New(t) + // setup proBlk := postForkBlock{ postForkCommonComponents: postForkCommonComponents{ @@ -33,9 +35,7 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { // test _, err := proBlk.Options(context.Background()) - if err != snowman.ErrNotOracle { - t.Fatal("Proposer block should signal that it wraps a block not implementing Options interface with ErrNotOracleBlock error") - } + require.ErrorIs(err, snowman.ErrNotOracle) // setup _, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks @@ -71,9 +71,7 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) proBlk = postForkBlock{ SignedBlock: slb, postForkCommonComponents: postForkCommonComponents{ @@ -85,13 +83,13 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { // test _, err = proBlk.Options(context.Background()) - if err != nil { - t.Fatal("Proposer block should forward wrapped block options if this implements Option interface") - } + require.NoError(err) } // ProposerBlock.Verify tests section func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { + require := require.New(t) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks pChainHeight := uint64(100) valState.GetCurrentHeightF = func(context.Context) (uint64, error) { @@ -134,16 +132,10 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { proVM.Set(proVM.Time().Add(proposer.MaxDelay)) prntProBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Could not build proposer block: %s", err) - } + require.NoError(err) - if err := prntProBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), prntProBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(prntProBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), prntProBlk.ID())) // .. create child block ... childCoreBlk := &snowman.TestBlock{ @@ -160,9 +152,7 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk := postForkBlock{ SignedBlock: childSlb, postForkCommonComponents: postForkCommonComponents{ @@ -174,9 +164,7 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { // child block referring unknown parent does not verify err = childProBlk.Verify(context.Background()) - if err == nil { - t.Fatal("Block with unknown parent should not verify") - } + require.ErrorIs(err, database.ErrNotFound) // child block referring known parent does verify childSlb, err = block.BuildUnsigned( @@ -185,21 +173,17 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { pChainHeight, childCoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err != nil { - t.Fatal("could not sign parent block") - } + require.NoError(err) proVM.Set(proVM.Time().Add(proposer.MaxDelay)) - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatalf("Block with known parent should verify: %s", err) - } + require.NoError(childProBlk.Verify(context.Background())) } func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { + require := require.New(t) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks pChainHeight := uint64(100) valState.GetCurrentHeightF = func(context.Context) (uint64, error) { @@ -241,16 +225,10 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { } prntProBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proposer block") - } + require.NoError(err) - if err := prntProBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), prntProBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(prntProBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), prntProBlk.ID())) prntTimestamp := prntProBlk.Timestamp() @@ -275,9 +253,7 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk := postForkBlock{ SignedBlock: childSlb, postForkCommonComponents: postForkCommonComponents{ @@ -288,15 +264,11 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { } err = childProBlk.Verify(context.Background()) - if err == nil { - t.Fatal("Proposer block timestamp too old should not verify") - } + require.ErrorIs(err, errTimeNotMonotonic) // block cannot arrive before its creator window starts blkWinDelay, err := proVM.Delay(context.Background(), childCoreBlk.Height(), pChainHeight, proVM.ctx.NodeID) - if err != nil { - t.Fatal("Could not calculate submission window") - } + require.NoError(err) beforeWinStart := prntTimestamp.Add(blkWinDelay).Add(-1 * time.Second) proVM.Clock.Set(beforeWinStart) childSlb, err = block.Build( @@ -308,14 +280,11 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err == nil { - t.Fatal("Proposer block timestamp before submission window should not verify") - } + err = childProBlk.Verify(context.Background()) + require.ErrorIs(err, errProposerWindowNotStarted) // block can arrive at its creator window starts atWindowStart := prntTimestamp.Add(blkWinDelay) @@ -329,14 +298,10 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatalf("Proposer block timestamp at submission window start should verify") - } + require.NoError(childProBlk.Verify(context.Background())) // block can arrive after its creator window starts afterWindowStart := prntTimestamp.Add(blkWinDelay).Add(5 * time.Second) @@ -350,13 +315,9 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("Proposer block timestamp after submission window start should verify") - } + require.NoError(childProBlk.Verify(context.Background())) // block can arrive within submission window atSubWindowEnd := proVM.Time().Add(proposer.MaxDelay) @@ -367,13 +328,9 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { pChainHeight, childCoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("Proposer block timestamp within submission window should verify") - } + require.NoError(childProBlk.Verify(context.Background())) // block timestamp cannot be too much in the future afterSubWinEnd := proVM.Time().Add(maxSkew).Add(time.Second) @@ -386,18 +343,15 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err == nil { - t.Fatal("Proposer block timestamp after submission window should not verify") - } else if err == nil { - t.Fatal("Proposer block timestamp after submission window should have different error") - } + err = childProBlk.Verify(context.Background()) + require.ErrorIs(err, errTimeTooAdvanced) } func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { + require := require.New(t) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks pChainHeight := uint64(100) valState.GetCurrentHeightF = func(context.Context) (uint64, error) { @@ -439,16 +393,10 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { } prntProBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proposer block") - } + require.NoError(err) - if err := prntProBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), prntProBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(prntProBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), prntProBlk.ID())) prntBlkPChainHeight := pChainHeight @@ -472,9 +420,7 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk := postForkBlock{ SignedBlock: childSlb, postForkCommonComponents: postForkCommonComponents{ @@ -484,11 +430,8 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { }, } - if err := childProBlk.Verify(context.Background()); err == nil { - t.Fatal("ProBlock's P-Chain-Height cannot be lower than parent ProBlock's one") - } else if err == nil { - t.Fatal("Proposer block has wrong height should have different error") - } + err = childProBlk.Verify(context.Background()) + require.ErrorIs(err, errTimeTooAdvanced) // child P-Chain height can be equal to parent P-Chain height childSlb, err = block.BuildUnsigned( @@ -497,15 +440,11 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { prntBlkPChainHeight, childCoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb proVM.Set(childCoreBlk.Timestamp()) - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatalf("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one: %s", err) - } + require.NoError(childProBlk.Verify(context.Background())) // child P-Chain height may follow parent P-Chain height pChainHeight = prntBlkPChainHeight * 2 // move ahead pChainHeight @@ -515,13 +454,9 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { prntBlkPChainHeight+1, childCoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one") - } + require.NoError(childProBlk.Verify(context.Background())) // block P-Chain height can be equal to current P-Chain height currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) @@ -531,13 +466,9 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { currPChainHeight, childCoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("ProBlock's P-Chain-Height can be equal to current p chain height") - } + require.NoError(childProBlk.Verify(context.Background())) // block P-Chain height cannot be at higher than current P-Chain height childSlb, err = block.BuildUnsigned( @@ -546,13 +477,10 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { currPChainHeight*2, childCoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != errPChainHeightNotReached { - t.Fatal("ProBlock's P-Chain-Height cannot be larger than current p chain height") - } + err = childProBlk.Verify(context.Background()) + require.ErrorIs(err, errPChainHeightNotReached) } func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) { @@ -631,32 +559,20 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) } oracleBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) - if err := oracleBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), oracleBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(oracleBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), oracleBlk.ID())) // retrieve one option and verify block built on it require.IsType(&postForkBlock{}, oracleBlk) postForkOracleBlk := oracleBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) parentBlk := opts[0] - if err := parentBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) prntBlkPChainHeight := pChainHeight @@ -680,9 +596,7 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk := postForkBlock{ SignedBlock: childSlb, postForkCommonComponents: postForkCommonComponents{ @@ -692,9 +606,8 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) }, } - if err := childProBlk.Verify(context.Background()); err == nil { - t.Fatal("ProBlock's P-Chain-Height cannot be lower than parent ProBlock's one") - } + err = childProBlk.Verify(context.Background()) + require.ErrorIs(err, errTimeTooAdvanced) // child P-Chain height can be equal to parent P-Chain height childSlb, err = block.BuildUnsigned( @@ -703,15 +616,11 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) prntBlkPChainHeight, childCoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb proVM.Set(childCoreBlk.Timestamp()) - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatalf("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one: %s", err) - } + require.NoError(childProBlk.Verify(context.Background())) // child P-Chain height may follow parent P-Chain height pChainHeight = prntBlkPChainHeight * 2 // move ahead pChainHeight @@ -721,13 +630,9 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) prntBlkPChainHeight+1, childCoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("ProBlock's P-Chain-Height can be larger or equal than parent ProBlock's one") - } + require.NoError(childProBlk.Verify(context.Background())) // block P-Chain height can be equal to current P-Chain height currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) @@ -737,13 +642,9 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) currPChainHeight, childCoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("ProBlock's P-Chain-Height can be equal to current p chain height") - } + require.NoError(childProBlk.Verify(context.Background())) // block P-Chain height cannot be at higher than current P-Chain height childSlb, err = block.BuildUnsigned( @@ -752,16 +653,15 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) currPChainHeight*2, childCoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) childProBlk.SignedBlock = childSlb - if err := childProBlk.Verify(context.Background()); err != errPChainHeightNotReached { - t.Fatal("ProBlock's P-Chain-Height cannot be larger than current p chain height") - } + err = childProBlk.Verify(context.Background()) + require.ErrorIs(err, errPChainHeightNotReached) } func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { + require := require.New(t) + // Verify a block once (in this test by building it). // Show that other verify call would not call coreBlk.Verify() coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks @@ -804,29 +704,24 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build block") - } + require.NoError(err) - if err := builtBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(builtBlk.Verify(context.Background())) // set error on coreBlock.Verify and recall Verify() coreBlk.VerifyV = errDuplicateVerify - if err := builtBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(builtBlk.Verify(context.Background())) // rebuild a block with the same core block pChainHeight++ - if _, err := proVM.BuildBlock(context.Background()); err != nil { - t.Fatal("could not build block with same core block") - } + _, err = proVM.BuildBlock(context.Background()) + require.NoError(err) } // ProposerBlock.Accept tests section func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { + require := require.New(t) + // setup coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks pChainHeight := uint64(2000) @@ -868,14 +763,10 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } + require.NoError(err) // test - if err := builtBlk.Accept(context.Background()); err != nil { - t.Fatal("could not accept block") - } + require.NoError(builtBlk.Accept(context.Background())) coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if coreBlk.Status() == choices.Accepted { @@ -883,14 +774,14 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { } return coreGenBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { - t.Fatal("could not retrieve last accepted block") - } else if acceptedID != builtBlk.ID() { - t.Fatal("unexpected last accepted ID") - } + acceptedID, err := proVM.LastAccepted(context.Background()) + require.NoError(err) + require.Equal(builtBlk.ID(), acceptedID) } func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t *testing.T) { + require := require.New(t) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks var minimumHeight uint64 valState.GetMinimumHeightF = func(context.Context) (uint64, error) { @@ -915,32 +806,20 @@ func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t minimumHeight = coreGenBlk.Height() proBlk1, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build proBlk1") - } + require.NoError(err) minimumHeight++ proBlk2, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build proBlk2") - } - if proBlk1.ID() == proBlk2.ID() { - t.Fatal("proBlk1 and proBlk2 should be different for this test") - } + require.NoError(err) + require.NotEqual(proBlk2.ID(), proBlk1.ID()) // set proBlk1 as preferred - if err := proBlk1.Accept(context.Background()); err != nil { - t.Fatal("could not accept proBlk1") - } - if coreBlk.Status() != choices.Accepted { - t.Fatal("coreBlk should have been accepted") - } + require.NoError(proBlk1.Accept(context.Background())) + require.Equal(choices.Accepted, coreBlk.Status()) - if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { - t.Fatal("could not retrieve last accepted block") - } else if acceptedID != proBlk1.ID() { - t.Fatal("unexpected last accepted ID") - } + acceptedID, err := proVM.LastAccepted(context.Background()) + require.NoError(err) + require.Equal(proBlk1.ID(), acceptedID) } // ProposerBlock.Reject tests section @@ -963,23 +842,14 @@ func TestBlockReject_PostForkBlock_InnerBlockIsNotRejected(t *testing.T) { } sb, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build block") - } + require.NoError(err) require.IsType(&postForkBlock{}, sb) proBlk := sb.(*postForkBlock) - if err := proBlk.Reject(context.Background()); err != nil { - t.Fatal("could not reject block") - } - - if proBlk.Status() != choices.Rejected { - t.Fatal("block rejection did not set state properly") - } + require.NoError(proBlk.Reject(context.Background())) - if proBlk.innerBlk.Status() == choices.Rejected { - t.Fatal("block rejection unduly changed inner block status") - } + require.Equal(choices.Rejected, proBlk.Status()) + require.NotEqual(choices.Rejected, proBlk.innerBlk.Status()) } func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { @@ -1056,33 +926,21 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) - if err := parentBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) // retrieve options ... require.IsType(&postForkBlock{}, parentBlk) postForkOracleBlk := parentBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) require.IsType(&postForkOption{}, opts[0]) // ... and verify them the first time - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option 0 should verify") - } - if err := opts[1].Verify(context.Background()); err != nil { - t.Fatal("option 1 should verify") - } + require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[1].Verify(context.Background())) // Build the child statelessChild, err := block.Build( @@ -1094,9 +952,7 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("failed to build new child block") - } + require.NoError(err) invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) if err != nil { @@ -1105,12 +961,12 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { } err = invalidChild.Verify(context.Background()) - if err == nil { - t.Fatal("Should have failed to verify a child that was signed when it should be an oracle block") - } + require.ErrorIs(err, errUnexpectedBlockType) } func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { + require := require.New(t) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 5) proVM.Set(coreGenBlk.Timestamp()) @@ -1151,9 +1007,7 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { 4, coreBlk.Bytes(), ) - if err != nil { - t.Fatal("failed to build new child block") - } + require.NoError(err) invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) if err != nil { @@ -1162,7 +1016,5 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { } err = invalidChild.Verify(context.Background()) - if err == nil { - t.Fatal("Should have failed to verify a child that was signed when it should be an oracle block") - } + require.ErrorIs(err, errPChainHeightTooLow) } diff --git a/vms/proposervm/post_fork_option_test.go b/vms/proposervm/post_fork_option_test.go index 690fdf75f71..06f8aa28be7 100644 --- a/vms/proposervm/post_fork_option_test.go +++ b/vms/proposervm/post_fork_option_test.go @@ -108,38 +108,24 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) - if err := parentBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) // retrieve options ... require.IsType(&postForkBlock{}, parentBlk) postForkOracleBlk := parentBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) require.IsType(&postForkOption{}, opts[0]) // ... and verify them - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option 0 should verify") - } - if err := opts[1].Verify(context.Background()); err != nil { - t.Fatal("option 1 should verify") - } + require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[1].Verify(context.Background())) // show we can build on options - if err := proVM.SetPreference(context.Background(), opts[0].ID()); err != nil { - t.Fatal("could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) childCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -156,13 +142,9 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { proVM.Set(childCoreBlk.Timestamp()) proChild, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build on top of option") - } + require.NoError(err) require.IsType(&postForkBlock{}, proChild) - if err := proChild.Verify(context.Background()); err != nil { - t.Fatal("block built on option does not verify") - } + require.NoError(proChild.Verify(context.Background())) } // ProposerBlock.Accept tests section @@ -241,45 +223,29 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) - if err := parentBlk.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := proVM.SetPreference(context.Background(), parentBlk.ID()); err != nil { - t.Fatal(err) - } + require.NoError(parentBlk.Verify(context.Background())) + require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) // retrieve options ... require.IsType(&postForkBlock{}, parentBlk) postForkOracleBlk := parentBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) require.IsType(&postForkOption{}, opts[0]) // ... and verify them the first time - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option 0 should verify") - } - if err := opts[1].Verify(context.Background()); err != nil { - t.Fatal("option 1 should verify") - } + require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[1].Verify(context.Background())) // set error on coreBlock.Verify and recall Verify() coreOpt0.VerifyV = errDuplicateVerify coreOpt1.VerifyV = errDuplicateVerify // ... and verify them again. They verify without call to innerBlk - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option 0 should verify") - } - if err := opts[1].Verify(context.Background()); err != nil { - t.Fatal("option 1 should verify") - } + require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[1].Verify(context.Background())) } func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { @@ -354,14 +320,10 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) // accept oracle block - if err := parentBlk.Accept(context.Background()); err != nil { - t.Fatal("could not accept block") - } + require.NoError(parentBlk.Accept(context.Background())) coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if oracleCoreBlk.Status() == choices.Accepted { @@ -369,23 +331,17 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { } return coreGenBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { - t.Fatal("could not retrieve last accepted block") - } else if acceptedID != parentBlk.ID() { - t.Fatal("unexpected last accepted ID") - } + acceptedID, err := proVM.LastAccepted(context.Background()) + require.NoError(err) + require.Equal(parentBlk.ID(), acceptedID) // accept one of the options require.IsType(&postForkBlock{}, parentBlk) postForkOracleBlk := parentBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) - if err := opts[0].Accept(context.Background()); err != nil { - t.Fatal("could not accept option") - } + require.NoError(opts[0].Accept(context.Background())) coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if oracleCoreBlk.opts[0].Status() == choices.Accepted { @@ -393,11 +349,9 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { } return oracleCoreBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { - t.Fatal("could not retrieve last accepted block") - } else if acceptedID != opts[0].ID() { - t.Fatal("unexpected last accepted ID") - } + acceptedID, err = proVM.LastAccepted(context.Background()) + require.NoError(err) + require.Equal(opts[0].ID(), acceptedID) } // ProposerBlock.Reject tests section @@ -473,46 +427,30 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) // reject oracle block - if err := builtBlk.Reject(context.Background()); err != nil { - t.Fatal("could not reject block") - } + require.NoError(builtBlk.Reject(context.Background())) require.IsType(&postForkBlock{}, builtBlk) proBlk := builtBlk.(*postForkBlock) - if proBlk.Status() != choices.Rejected { - t.Fatal("block rejection did not set state properly") - } + require.Equal(choices.Rejected, proBlk.Status()) - if proBlk.innerBlk.Status() == choices.Rejected { - t.Fatal("block rejection unduly changed inner block status") - } + require.NotEqual(choices.Rejected, proBlk.innerBlk.Status()) // reject an option require.IsType(&postForkBlock{}, builtBlk) postForkOracleBlk := builtBlk.(*postForkBlock) opts, err := postForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) - if err := opts[0].Reject(context.Background()); err != nil { - t.Fatal("could not accept option") - } + require.NoError(opts[0].Reject(context.Background())) require.IsType(&postForkOption{}, opts[0]) proOpt := opts[0].(*postForkOption) - if proOpt.Status() != choices.Rejected { - t.Fatal("block rejection did not set state properly") - } + require.Equal(choices.Rejected, proOpt.Status()) - if proOpt.innerBlk.Status() == choices.Rejected { - t.Fatal("block rejection unduly changed inner block status") - } + require.NotEqual(choices.Rejected, proOpt.innerBlk.Status()) } func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { @@ -575,25 +513,19 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) require.IsType(&postForkBlock{}, parentBlk) postForkBlk := parentBlk.(*postForkBlock) _, err = postForkBlk.Options(context.Background()) - if err != snowman.ErrNotOracle { - t.Fatal("should have reported that the block isn't an oracle block") - } + require.ErrorIs(err, snowman.ErrNotOracle) // Build the child statelessChild, err := block.BuildOption( postForkBlk.ID(), coreChildBlk.Bytes(), ) - if err != nil { - t.Fatal("failed to build new child block") - } + require.NoError(err) invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) if err != nil { @@ -602,12 +534,12 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { } err = invalidChild.Verify(context.Background()) - if err == nil { - t.Fatal("Should have failed to verify a child that should have been signed") - } + require.ErrorIs(err, database.ErrNotFound) } func TestOptionTimestampValidity(t *testing.T) { + require := require.New(t) + coreVM, _, proVM, coreGenBlk, db := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks coreOracleBlkID := ids.GenerateTestID() @@ -649,9 +581,7 @@ func TestOptionTimestampValidity(t *testing.T) { 0, coreOracleBlk.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -683,50 +613,34 @@ func TestOptionTimestampValidity(t *testing.T) { } statefulBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := statefulBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock.Verify(context.Background())) statefulOracleBlock, ok := statefulBlock.(snowman.OracleBlock) - if !ok { - t.Fatal("should have reported as an oracle block") - } + require.True(ok) options, err := statefulOracleBlock.Options(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) option := options[0] - if err := option.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(option.Verify(context.Background())) - if err := statefulBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock.Accept(context.Background())) coreVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { - t.Fatal("called GetBlock when unable to handle the error") + require.FailNow("called GetBlock when unable to handle the error") return nil, nil } coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { - t.Fatal("called ParseBlock when unable to handle the error") + require.FailNow("called ParseBlock when unable to handle the error") return nil, nil } expectedTime := coreGenBlk.Timestamp() - if optionTime := option.Timestamp(); !optionTime.Equal(expectedTime) { - t.Fatalf("wrong time returned expected %s got %s", expectedTime, optionTime) - } + require.Equal(expectedTime, option.Timestamp()) - if err := option.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(option.Accept(context.Background())) // Restart the node. @@ -786,7 +700,7 @@ func TestOptionTimestampValidity(t *testing.T) { } } - err = proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, db, @@ -796,30 +710,21 @@ func TestOptionTimestampValidity(t *testing.T) { nil, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) statefulOptionBlock, err := proVM.ParseBlock(context.Background(), option.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if status := statefulOptionBlock.Status(); status != choices.Accepted { - t.Fatalf("wrong status returned expected %s got %s", choices.Accepted, status) - } + require.Equal(choices.Accepted, statefulOptionBlock.Status()) coreVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { - t.Fatal("called GetBlock when unable to handle the error") + require.FailNow("called GetBlock when unable to handle the error") return nil, nil } coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { - t.Fatal("called ParseBlock when unable to handle the error") + require.FailNow("called ParseBlock when unable to handle the error") return nil, nil } - if optionTime := statefulOptionBlock.Timestamp(); !optionTime.Equal(expectedTime) { - t.Fatalf("wrong time returned expected %s got %s", expectedTime, optionTime) - } + require.Equal(expectedTime, statefulOptionBlock.Timestamp()) } diff --git a/vms/proposervm/pre_fork_block_test.go b/vms/proposervm/pre_fork_block_test.go index ada0048c079..00f0ef08eb2 100644 --- a/vms/proposervm/pre_fork_block_test.go +++ b/vms/proposervm/pre_fork_block_test.go @@ -27,6 +27,8 @@ import ( ) func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { + require := require.New(t) + // setup proBlk := preForkBlock{ Block: &snowman.TestBlock{}, @@ -34,9 +36,7 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { // test _, err := proBlk.Options(context.Background()) - if err != snowman.ErrNotOracle { - t.Fatal("Proposer block should signal that it wraps a block not implementing Options interface with ErrNotOracleBlock error") - } + require.ErrorIs(err, snowman.ErrNotOracle) // setup proBlk = preForkBlock{ @@ -45,9 +45,7 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { // test _, err = proBlk.Options(context.Background()) - if err != nil { - t.Fatal("Proposer block should forward wrapped block options if this implements Option interface") - } + require.NoError(err) } func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { @@ -104,25 +102,17 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build pre fork oracle block") - } + require.NoError(err) // retrieve options ... require.IsType(&preForkBlock{}, parentBlk) preForkOracleBlk := parentBlk.(*preForkBlock) opts, err := preForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from pre fork oracle block") - } - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option should verify") - } + require.NoError(err) + require.NoError(opts[0].Verify(context.Background())) // ... show a block can be built on top of an option - if err := proVM.SetPreference(context.Background(), opts[0].ID()); err != nil { - t.Fatal("could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) lastCoreBlk := &TestOptionsBlock{ TestBlock: snowman.TestBlock{ @@ -139,9 +129,7 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { } preForkChild, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build pre fork block on pre fork option block") - } + require.NoError(err) require.IsType(&preForkBlock{}, preForkChild) } @@ -205,25 +193,17 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { } parentBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build pre fork oracle block") - } + require.NoError(err) // retrieve options ... require.IsType(&preForkBlock{}, parentBlk) preForkOracleBlk := parentBlk.(*preForkBlock) opts, err := preForkOracleBlk.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from pre fork oracle block") - } - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal("option should verify") - } + require.NoError(err) + require.NoError(opts[0].Verify(context.Background())) // ... show a block can be built on top of an option - if err := proVM.SetPreference(context.Background(), opts[0].ID()); err != nil { - t.Fatal("could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) lastCoreBlk := &TestOptionsBlock{ TestBlock: snowman.TestBlock{ @@ -240,19 +220,17 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { } postForkChild, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build pre fork block on pre fork option block") - } + require.NoError(err) require.IsType(&postForkBlock{}, postForkChild) } func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { + require := require.New(t) + activationTime := genesisTimestamp.Add(10 * time.Second) coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) - if !coreGenBlk.Timestamp().Before(activationTime) { - t.Fatal("This test requires parent block 's timestamp to be before fork activation time") - } + require.True(coreGenBlk.Timestamp().Before(activationTime)) // create parent block ... prntCoreBlk := &snowman.TestBlock{ @@ -290,9 +268,7 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { proVM.Set(proVM.Time().Add(proposer.MaxDelay)) prntProBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proposer block") - } + require.NoError(err) // .. create child block ... childCoreBlk := &snowman.TestBlock{ @@ -311,15 +287,11 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { // child block referring unknown parent does not verify childCoreBlk.ParentV = ids.Empty err = childProBlk.Verify(context.Background()) - if err == nil { - t.Fatal("Block with unknown parent should not verify") - } + require.ErrorIs(err, database.ErrNotFound) // child block referring known parent does verify childCoreBlk.ParentV = prntProBlk.ID() - if err := childProBlk.Verify(context.Background()); err != nil { - t.Fatal("Block with known parent should verify") - } + require.NoError(childProBlk.Verify(context.Background())) } func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { @@ -327,9 +299,7 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { activationTime := genesisTimestamp.Add(10 * time.Second) coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) - if !coreGenBlk.Timestamp().Before(activationTime) { - t.Fatal("This test requires parent block 's timestamp to be before fork activation time") - } + require.True(coreGenBlk.Timestamp().Before(activationTime)) preActivationTime := activationTime.Add(-1 * time.Second) proVM.Set(preActivationTime) @@ -349,14 +319,10 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { // preFork block verifies if parent is before fork activation time preForkChild, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } + require.NoError(err) require.IsType(&preForkBlock{}, preForkChild) - if err := preForkChild.Verify(context.Background()); err != nil { - t.Fatal("pre Fork blocks should verify before fork") - } + require.NoError(preForkChild.Verify(context.Background())) // postFork block does NOT verify if parent is before fork activation time postForkStatelessChild, err := block.Build( @@ -368,9 +334,7 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } + require.NoError(err) postForkChild := &postForkBlock{ SignedBlock: postForkStatelessChild, postForkCommonComponents: postForkCommonComponents{ @@ -380,12 +344,9 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { }, } - if !postForkChild.Timestamp().Before(activationTime) { - t.Fatal("This test requires postForkChild to be before fork activation time") - } - if err := postForkChild.Verify(context.Background()); err == nil { - t.Fatal("post Fork blocks should NOT verify before fork") - } + require.True(postForkChild.Timestamp().Before(activationTime)) + err = postForkChild.Verify(context.Background()) + require.ErrorIs(err, errProposersNotActivated) // once activation time is crossed postForkBlock are produced postActivationTime := activationTime.Add(time.Second) @@ -394,9 +355,7 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { coreVM.SetPreferenceF = func(_ context.Context, id ids.ID) error { return nil } - if err := proVM.SetPreference(context.Background(), preForkChild.ID()); err != nil { - t.Fatal("could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), preForkChild.ID())) secondCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -417,24 +376,18 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { case coreBlk.ID(): return coreBlk, nil default: - t.Fatal("attempt to get unknown block") + require.FailNow("attempt to get unknown block") return nil, nil } } lastPreForkBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } + require.NoError(err) require.IsType(&preForkBlock{}, lastPreForkBlk) - if err := lastPreForkBlk.Verify(context.Background()); err != nil { - t.Fatal("pre Fork blocks should verify before fork") - } + require.NoError(lastPreForkBlk.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), lastPreForkBlk.ID()); err != nil { - t.Fatal("could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), lastPreForkBlk.ID())) thirdCoreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.Empty.Prefix(333), @@ -456,20 +409,16 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { case secondCoreBlk.ID(): return secondCoreBlk, nil default: - t.Fatal("attempt to get unknown block") + require.FailNow("attempt to get unknown block") return nil, nil } } firstPostForkBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } + require.NoError(err) require.IsType(&postForkBlock{}, firstPostForkBlk) - if err := firstPostForkBlk.Verify(context.Background()); err != nil { - t.Fatal("pre Fork blocks should verify before fork") - } + require.NoError(firstPostForkBlk.Verify(context.Background())) } func TestBlockVerify_BlocksBuiltOnPostForkGenesis(t *testing.T) { @@ -496,26 +445,23 @@ func TestBlockVerify_BlocksBuiltOnPostForkGenesis(t *testing.T) { // postFork block verifies if parent is after fork activation time postForkChild, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } + require.NoError(err) require.IsType(&postForkBlock{}, postForkChild) - if err := postForkChild.Verify(context.Background()); err != nil { - t.Fatal("post Fork blocks should verify after fork") - } + require.NoError(postForkChild.Verify(context.Background())) // preFork block does NOT verify if parent is after fork activation time preForkChild := preForkBlock{ Block: coreBlock, vm: proVM, } - if err := preForkChild.Verify(context.Background()); err == nil { - t.Fatal("pre Fork blocks should NOT verify after fork") - } + err = preForkChild.Verify(context.Background()) + require.ErrorIs(err, errUnexpectedBlockType) } func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { + require := require.New(t) + // setup coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) @@ -552,14 +498,10 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } + require.NoError(err) // test - if err := builtBlk.Accept(context.Background()); err != nil { - t.Fatal("could not accept block") - } + require.NoError(builtBlk.Accept(context.Background())) coreVM.LastAcceptedF = func(context.Context) (ids.ID, error) { if coreBlk.Status() == choices.Accepted { @@ -567,11 +509,9 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { } return coreGenBlk.ID(), nil } - if acceptedID, err := proVM.LastAccepted(context.Background()); err != nil { - t.Fatal("could not retrieve last accepted block") - } else if acceptedID != builtBlk.ID() { - t.Fatal("unexpected last accepted ID") - } + acceptedID, err := proVM.LastAccepted(context.Background()) + require.NoError(err) + require.Equal(builtBlk.ID(), acceptedID) } // ProposerBlock.Reject tests section @@ -593,31 +533,21 @@ func TestBlockReject_PreForkBlock_InnerBlockIsRejected(t *testing.T) { } sb, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build block") - } + require.NoError(err) require.IsType(&preForkBlock{}, sb) proBlk := sb.(*preForkBlock) - if err := proBlk.Reject(context.Background()); err != nil { - t.Fatal("could not reject block") - } - - if proBlk.Status() != choices.Rejected { - t.Fatal("block rejection did not set state properly") - } - - if proBlk.Block.Status() != choices.Rejected { - t.Fatal("block rejection did not set state properly") - } + require.NoError(proBlk.Reject(context.Background())) + require.Equal(choices.Rejected, proBlk.Status()) + require.Equal(choices.Rejected, proBlk.Block.Status()) } func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { + require := require.New(t) + activationTime := genesisTimestamp.Add(10 * time.Second) coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) - if !coreGenBlk.Timestamp().Before(activationTime) { - t.Fatal("This test requires parent block 's timestamp to be before fork activation time") - } + require.True(coreGenBlk.Timestamp().Before(activationTime)) postActivationTime := activationTime.Add(time.Second) proVM.Set(postActivationTime) @@ -684,39 +614,27 @@ func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { } firstBlock, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := firstBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(firstBlock.Verify(context.Background())) oracleBlock, ok := firstBlock.(snowman.OracleBlock) - if !ok { - t.Fatal("should have returned an oracle block") - } + require.True(ok) options, err := oracleBlock.Options(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := options[0].Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(options[0].Verify(context.Background())) - if err := options[1].Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(options[1].Verify(context.Background())) } func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { + require := require.New(t) + activationTime := genesisTimestamp.Add(10 * time.Second) coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, activationTime, 0) - if !coreGenBlk.Timestamp().Before(activationTime) { - t.Fatal("This test requires parent block 's timestamp to be before fork activation time") - } + require.True(coreGenBlk.Timestamp().Before(activationTime)) postActivationTime := activationTime.Add(time.Second) proVM.Set(postActivationTime) @@ -783,13 +701,9 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { } firstBlock, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := firstBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(firstBlock.Verify(context.Background())) slb, err := block.Build( firstBlock.ID(), // refer unknown parent @@ -800,9 +714,7 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) invalidChild, err := proVM.ParseBlock(context.Background(), slb.Bytes()) if err != nil { @@ -811,9 +723,7 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { } err = invalidChild.Verify(context.Background()) - if err == nil { - t.Fatal("Should have failed to verify a child that was signed when it should be a pre fork block") - } + require.ErrorIs(err, errUnexpectedBlockType) } // Assert that when the underlying VM implements ChainVMWithBuildBlockContext diff --git a/vms/proposervm/scheduler/scheduler_test.go b/vms/proposervm/scheduler/scheduler_test.go index 74693657732..238982a18ef 100644 --- a/vms/proposervm/scheduler/scheduler_test.go +++ b/vms/proposervm/scheduler/scheduler_test.go @@ -7,6 +7,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -22,9 +24,7 @@ func TestDelayFromNew(t *testing.T) { fromVM <- common.PendingTxs <-toEngine - if time.Until(startTime) > 0 { - t.Fatalf("passed message too soon") - } + require.Negative(t, time.Until(startTime)) } func TestDelayFromSetTime(t *testing.T) { @@ -41,9 +41,7 @@ func TestDelayFromSetTime(t *testing.T) { fromVM <- common.PendingTxs <-toEngine - if time.Until(startTime) > 0 { - t.Fatalf("passed message too soon") - } + require.Negative(t, time.Until(startTime)) } func TestReceipt(*testing.T) { diff --git a/vms/proposervm/vm_byzantine_test.go b/vms/proposervm/vm_byzantine_test.go index 4ce02a2afd6..5bdf03d198f 100644 --- a/vms/proposervm/vm_byzantine_test.go +++ b/vms/proposervm/vm_byzantine_test.go @@ -7,7 +7,6 @@ import ( "bytes" "context" "encoding/hex" - "errors" "testing" "time" @@ -32,6 +31,8 @@ import ( // | // Y func TestInvalidByzantineProposerParent(t *testing.T) { + require := require.New(t) + forkTime := time.Unix(0, 0) // enable ProBlks coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) @@ -50,19 +51,12 @@ func TestInvalidByzantineProposerParent(t *testing.T) { } aBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("proposerVM could not build block due to %s", err) - } + require.NoError(err) coreVM.BuildBlockF = nil - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } - - if err := aBlock.Accept(context.Background()); err != nil { - t.Fatalf("could not accept valid block due to %s", err) - } + require.NoError(aBlock.Verify(context.Background())) + require.NoError(aBlock.Accept(context.Background())) yBlockBytes := []byte{2} yBlock := &snowman.TestBlock{ @@ -90,9 +84,8 @@ func TestInvalidByzantineProposerParent(t *testing.T) { } // If there wasn't an error parsing - verify must return an error - if err := parsedBlock.Verify(context.Background()); err == nil { - t.Fatal("should have marked the parsed block as invalid") - } + err = parsedBlock.Verify(context.Background()) + require.ErrorIs(err, errUnknownBlock) } // Ensure that a byzantine node issuing an invalid PreForkBlock (Y or Z) when @@ -176,43 +169,29 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { } aBlockIntf, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) require.IsType(&postForkBlock{}, aBlockIntf) aBlock := aBlockIntf.(*postForkBlock) opts, err := aBlock.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := opts[0].Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := opts[1].Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(aBlock.Verify(context.Background())) + require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[1].Verify(context.Background())) yBlock, err := proVM.ParseBlock(context.Background(), xBlock.opts[0].Bytes()) if err != nil { // It's okay for this block not to be parsed return } - if err := yBlock.Verify(context.Background()); err == nil { - t.Fatal("unexpectedly passed block verification") - } + err = yBlock.Verify(context.Background()) + require.ErrorIs(err, errUnexpectedBlockType) - if err := aBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(aBlock.Accept(context.Background())) - if err := yBlock.Verify(context.Background()); err == nil { - t.Fatal("unexpectedly passed block verification") - } + err = yBlock.Verify(context.Background()) + require.ErrorIs(err, errUnexpectedBlockType) } // Ensure that a byzantine node issuing an invalid PostForkBlock (B) when the @@ -225,6 +204,8 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { // / | // B - Y func TestInvalidByzantineProposerPreForkParent(t *testing.T) { + require := require.New(t) + forkTime := time.Unix(0, 0) // enable ProBlks coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) @@ -243,9 +224,7 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { } aBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("proposerVM could not build block due to %s", err) - } + require.NoError(err) coreVM.BuildBlockF = nil @@ -292,9 +271,7 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { 0, yBlockBytes, ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) bBlock, err := proVM.ParseBlock(context.Background(), bStatelessBlock.Bytes()) if err != nil { @@ -302,23 +279,17 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { return } - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } + require.NoError(aBlock.Verify(context.Background())) // If there wasn't an error parsing - verify must return an error - if err := bBlock.Verify(context.Background()); err == nil { - t.Fatal("should have marked the parsed block as invalid") - } + err = bBlock.Verify(context.Background()) + require.ErrorIs(err, errUnexpectedBlockType) - if err := aBlock.Accept(context.Background()); err != nil { - t.Fatalf("could not accept valid block due to %s", err) - } + require.NoError(aBlock.Accept(context.Background())) // If there wasn't an error parsing - verify must return an error - if err := bBlock.Verify(context.Background()); err == nil { - t.Fatal("should have marked the parsed block as invalid") - } + err = bBlock.Verify(context.Background()) + require.ErrorIs(err, errUnexpectedBlockType) } // Ensure that a byzantine node issuing an invalid OptionBlock (B) which @@ -401,26 +372,18 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { } aBlockIntf, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) require.IsType(&postForkBlock{}, aBlockIntf) aBlock := aBlockIntf.(*postForkBlock) opts, err := aBlock.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } - if err := opts[0].Verify(context.Background()); err == nil { - t.Fatal("option 0 has invalid parent, should not verify") - } - if err := opts[1].Verify(context.Background()); err == nil { - t.Fatal("option 1 has invalid parent, should not verify") - } + require.NoError(aBlock.Verify(context.Background())) + err = opts[0].Verify(context.Background()) + require.ErrorIs(err, errInnerParentMismatch) + err = opts[1].Verify(context.Background()) + require.ErrorIs(err, errInnerParentMismatch) } // ,--G ----. @@ -435,6 +398,8 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { // O2.parent = A (original), O2.inner = first option of X (valid) // O3.parent = C (Oracle), O3.inner = first option of X (invalid parent) func TestBlockVerify_InvalidPostForkOption(t *testing.T) { + require := require.New(t) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) proVM.Set(coreGenBlk.Timestamp()) @@ -473,9 +438,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { } xInnerOptions, err := xBlock.Options(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) xInnerOption := xInnerOptions[0] // create a non-Oracle pre-fork block Y @@ -496,9 +459,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { uint64(2000), yBlock.Bytes(), ) - if err != nil { - t.Fatalf("fail to manually build a block due to %s", err) - } + require.NoError(err) // create post-fork block B from Y bBlock := postForkBlock{ @@ -510,18 +471,14 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err := bBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(bBlock.Verify(context.Background())) // generate O1 statelessOuterOption, err := block.BuildOption( bBlock.ID(), xInnerOption.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) outerOption := &postForkOption{ Block: statelessOuterOption, @@ -532,30 +489,23 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err := outerOption.Verify(context.Background()); !errors.Is(err, errUnexpectedBlockType) { - t.Fatal(err) - } + err = outerOption.Verify(context.Background()) + require.ErrorIs(err, errUnexpectedBlockType) // generate A from X and O2 coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } aBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.BuildBlockF = nil - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(aBlock.Verify(context.Background())) statelessOuterOption, err = block.BuildOption( aBlock.ID(), xInnerOption.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) outerOption = &postForkOption{ Block: statelessOuterOption, @@ -566,9 +516,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err := outerOption.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(outerOption.Verify(context.Background())) // create an Oracle pre-fork block Z // create post-fork block B from Y @@ -609,22 +557,16 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { return zBlock, nil } cBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.BuildBlockF = nil - if err := cBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(cBlock.Verify(context.Background())) // generate O3 statelessOuterOption, err = block.BuildOption( cBlock.ID(), xInnerOption.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) outerOption = &postForkOption{ Block: statelessOuterOption, @@ -635,12 +577,13 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - if err := outerOption.Verify(context.Background()); err != errInnerParentMismatch { - t.Fatal(err) - } + err = outerOption.Verify(context.Background()) + require.ErrorIs(err, errInnerParentMismatch) } func TestGetBlock_MutatedSignature(t *testing.T) { + require := require.New(t) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // Make sure that we will be sampled to perform the proposals. @@ -709,17 +652,11 @@ func TestGetBlock_MutatedSignature(t *testing.T) { } builtBlk0, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("could not build post fork block %s", err) - } + require.NoError(err) - if err := builtBlk0.Verify(context.Background()); err != nil { - t.Fatalf("failed to verify newly created block %s", err) - } + require.NoError(builtBlk0.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), builtBlk0.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), builtBlk0.ID())) // The second propsal block will need to be signed because the timestamp // hasn't moved forward @@ -730,9 +667,7 @@ func TestGetBlock_MutatedSignature(t *testing.T) { // Invalid Bytes: 000000000000fd81ce4f1ab2650176d46a3d1fbb593af5717a2ada7dabdcef19622325a8ce8400000000000003e800000000000006d0000004a13082049d30820285a003020102020100300d06092a864886f70d01010b050030003020170d3939313233313030303030305a180f32313231313132333130313030305a300030820222300d06092a864886f70d01010105000382020f003082020a0282020100b9c3615c42d501f3b9d21ed127b31855827dbe12652e6e6f278991a3ad1ca55e2241b1cac69a0aeeefdd913db8ae445ff847789fdcbc1cbe6cce0a63109d1c1fb9d441c524a6eb1412f9b8090f1507e3e50a725f9d0a9d5db424ea229a7c11d8b91c73fecbad31c7b216bb2ac5e4d5ff080a80fabc73b34beb8fa46513ab59d489ce3f273c0edab43ded4d4914e081e6e850f9e502c3c4a54afc8a3a89d889aec275b7162a7616d53a61cd3ee466394212e5bef307790100142ad9e0b6c95ad2424c6e84d06411ad066d0c37d4d14125bae22b49ad2a761a09507bbfe43d023696d278d9fbbaf06c4ff677356113d3105e248078c33caed144d85929b1dd994df33c5d3445675104659ca9642c269b5cfa39c7bad5e399e7ebce3b5e6661f989d5f388006ebd90f0e035d533f5662cb925df8744f61289e66517b51b9a2f54792dca9078d5e12bf8ad79e35a68d4d661d15f0d3029d6c5903c845323d5426e49deaa2be2bc261423a9cd77df9a2706afaca27f589cc2c8f53e2a1f90eb5a3f8bcee0769971db6bacaec265d86b39380f69e3e0e06072de986feede26fe856c55e24e88ee5ac342653ac55a04e21b8517310c717dff0e22825c0944c6ba263f8f060099ea6e44a57721c7aa54e2790a4421fb85e3347e4572cba44e62b2cad19c1623c1cab4a715078e56458554cef8442769e6d5dd7f99a6234653a46828804f0203010001a320301e300e0603551d0f0101ff0404030204b0300c0603551d130101ff04023000300d06092a864886f70d01010b050003820201004ee2229d354720a751e2d2821134994f5679997113192626cf61594225cfdf51e6479e2c17e1013ab9dceb713bc0f24649e5cab463a8cf8617816ed736ac5251a853ff35e859ac6853ebb314f967ff7867c53512d42e329659375682c854ca9150cfa4c3964680e7650beb93e8b4a0d6489a9ca0ce0104752ba4d9cf3e2dc9436b56ecd0bd2e33cbbeb5a107ec4fd6f41a943c8bee06c0b32f4291a3e3759a7984d919a97d5d6517b841053df6e795ed33b52ed5e41357c3e431beb725e4e4f2ef956c44fd1f76fa4d847602e491c3585a90cdccfff982405d388b83d6f32ea16da2f5e4595926a7d26078e32992179032d30831b1f1b42de1781c507536a49adb4c95bad04c171911eed30d63c73712873d1e8094355efb9aeee0c16f8599575fd7f8bb027024bad63b097d2230d8f0ba12a8ed23e618adc3d7cb6a63e02b82a6d4d74b21928dbcb6d3788c6fd45022d69f3ab94d914d97cd651db662e92918a5d891ef730a813f03aade2fe385b61f44840f8925ad3345df1c82c9de882bb7184b4cd0bbd9db8322aaedb4ff86e5be9635987e6c40455ab9b063cdb423bee2edcac47cf654487e9286f33bdbad10018f4db9564cee6e048570e1517a2e396501b5978a53d10a548aed26938c2f9aada3ae62d3fdae486deb9413dffb6524666453633d665c3712d0fec9f844632b2b3eaf0267ca495eb41dba8273862609de00000001020000000101 invalidBlkBytesHex := "000000000000fd81ce4f1ab2650176d46a3d1fbb593af5717a2ada7dabdcef19622325a8ce8400000000000003e800000000000006d0000004a13082049d30820285a003020102020100300d06092a864886f70d01010b050030003020170d3939313233313030303030305a180f32313231313132333130313030305a300030820222300d06092a864886f70d01010105000382020f003082020a0282020100b9c3615c42d501f3b9d21ed127b31855827dbe12652e6e6f278991a3ad1ca55e2241b1cac69a0aeeefdd913db8ae445ff847789fdcbc1cbe6cce0a63109d1c1fb9d441c524a6eb1412f9b8090f1507e3e50a725f9d0a9d5db424ea229a7c11d8b91c73fecbad31c7b216bb2ac5e4d5ff080a80fabc73b34beb8fa46513ab59d489ce3f273c0edab43ded4d4914e081e6e850f9e502c3c4a54afc8a3a89d889aec275b7162a7616d53a61cd3ee466394212e5bef307790100142ad9e0b6c95ad2424c6e84d06411ad066d0c37d4d14125bae22b49ad2a761a09507bbfe43d023696d278d9fbbaf06c4ff677356113d3105e248078c33caed144d85929b1dd994df33c5d3445675104659ca9642c269b5cfa39c7bad5e399e7ebce3b5e6661f989d5f388006ebd90f0e035d533f5662cb925df8744f61289e66517b51b9a2f54792dca9078d5e12bf8ad79e35a68d4d661d15f0d3029d6c5903c845323d5426e49deaa2be2bc261423a9cd77df9a2706afaca27f589cc2c8f53e2a1f90eb5a3f8bcee0769971db6bacaec265d86b39380f69e3e0e06072de986feede26fe856c55e24e88ee5ac342653ac55a04e21b8517310c717dff0e22825c0944c6ba263f8f060099ea6e44a57721c7aa54e2790a4421fb85e3347e4572cba44e62b2cad19c1623c1cab4a715078e56458554cef8442769e6d5dd7f99a6234653a46828804f0203010001a320301e300e0603551d0f0101ff0404030204b0300c0603551d130101ff04023000300d06092a864886f70d01010b050003820201004ee2229d354720a751e2d2821134994f5679997113192626cf61594225cfdf51e6479e2c17e1013ab9dceb713bc0f24649e5cab463a8cf8617816ed736ac5251a853ff35e859ac6853ebb314f967ff7867c53512d42e329659375682c854ca9150cfa4c3964680e7650beb93e8b4a0d6489a9ca0ce0104752ba4d9cf3e2dc9436b56ecd0bd2e33cbbeb5a107ec4fd6f41a943c8bee06c0b32f4291a3e3759a7984d919a97d5d6517b841053df6e795ed33b52ed5e41357c3e431beb725e4e4f2ef956c44fd1f76fa4d847602e491c3585a90cdccfff982405d388b83d6f32ea16da2f5e4595926a7d26078e32992179032d30831b1f1b42de1781c507536a49adb4c95bad04c171911eed30d63c73712873d1e8094355efb9aeee0c16f8599575fd7f8bb027024bad63b097d2230d8f0ba12a8ed23e618adc3d7cb6a63e02b82a6d4d74b21928dbcb6d3788c6fd45022d69f3ab94d914d97cd651db662e92918a5d891ef730a813f03aade2fe385b61f44840f8925ad3345df1c82c9de882bb7184b4cd0bbd9db8322aaedb4ff86e5be9635987e6c40455ab9b063cdb423bee2edcac47cf654487e9286f33bdbad10018f4db9564cee6e048570e1517a2e396501b5978a53d10a548aed26938c2f9aada3ae62d3fdae486deb9413dffb6524666453633d665c3712d0fec9f844632b2b3eaf0267ca495eb41dba8273862609de00000001020000000101" invalidBlkBytes, err := hex.DecodeString(invalidBlkBytesHex) - if err != nil { - t.Fatal(err) - } + require.NoError(err) invalidBlk, err := proVM.ParseBlock(context.Background(), invalidBlkBytes) if err != nil { @@ -740,20 +675,14 @@ func TestGetBlock_MutatedSignature(t *testing.T) { t.Skip(err) } - if err := invalidBlk.Verify(context.Background()); err == nil { - t.Fatalf("verified block without valid signature") - } + err = invalidBlk.Verify(context.Background()) + require.ErrorIs(err, database.ErrNotFound) // Note that the invalidBlk.ID() is the same as the correct blk ID because // the signature isn't part of the blk ID. blkID, err := ids.FromString("2R3Uz98YmxHUJARWv6suApPdAbbZ7X7ipat1gZuZNNhC5wPwJW") - if err != nil { - t.Fatal(err) - } - - if blkID != invalidBlk.ID() { - t.Fatalf("unexpected block ID; expected = %s , got = %s", blkID, invalidBlk.ID()) - } + require.NoError(err) + require.Equal(blkID, invalidBlk.ID()) // GetBlock shouldn't really be able to succeed, as we don't have a valid // representation of [blkID] @@ -765,7 +694,5 @@ func TestGetBlock_MutatedSignature(t *testing.T) { // GetBlock returned, so it must have somehow gotten a valid representation // of [blkID]. - if err := fetchedBlk.Verify(context.Background()); err != nil { - t.Fatalf("GetBlock returned an invalid block when the ID represented a potentially valid block: %s", err) - } + require.NoError(fetchedBlk.Verify(context.Background())) } diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index 1c9f339a37f..e7192d2fa85 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -79,6 +79,8 @@ func initTestProposerVM( *snowman.TestBlock, manager.Manager, ) { + require := require.New(t) + coreGenBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -181,7 +183,7 @@ func initTestProposerVM( return nil } - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, dummyDBManager, @@ -191,21 +193,13 @@ func initTestProposerVM( nil, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) // Initialize shouldn't be called again coreVM.InitializeF = nil - if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } - - if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) return coreVM, valState, proVM, coreGenBlk, dummyDBManager } @@ -213,6 +207,8 @@ func initTestProposerVM( // VM.BuildBlock tests section func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { + require := require.New(t) + // given the same core block, BuildBlock returns the same proposer block coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks skewedTimestamp := time.Now().Truncate(time.Second).Add(time.Millisecond) @@ -234,16 +230,14 @@ func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { // test builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } + require.NoError(err) - if builtBlk.Timestamp().Truncate(time.Second) != builtBlk.Timestamp() { - t.Fatal("Timestamp should be rounded to second") - } + require.Equal(builtBlk.Timestamp().Truncate(time.Second), builtBlk.Timestamp()) } func TestBuildBlockIsIdempotent(t *testing.T) { + require := require.New(t) + // given the same core block, BuildBlock returns the same proposer block coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks @@ -263,18 +257,12 @@ func TestBuildBlockIsIdempotent(t *testing.T) { // test builtBlk1, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } + require.NoError(err) builtBlk2, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } + require.NoError(err) - if !bytes.Equal(builtBlk1.Bytes(), builtBlk2.Bytes()) { - t.Fatal("proposer blocks wrapping the same core block are different") - } + require.Equal(builtBlk1.Bytes(), builtBlk2.Bytes()) } func TestFirstProposerBlockIsBuiltOnTopOfGenesis(t *testing.T) { @@ -299,21 +287,19 @@ func TestFirstProposerBlockIsBuiltOnTopOfGenesis(t *testing.T) { // test snowBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build block") - } + require.NoError(err) // checks require.IsType(&postForkBlock{}, snowBlock) proBlock := snowBlock.(*postForkBlock) - if proBlock.innerBlk != coreBlk { - t.Fatal("different block was expected to be built") - } + require.Equal(coreBlk, proBlock.innerBlk) } // both core blocks and pro blocks must be built on preferred func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { + require := require.New(t) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks // add two proBlks... @@ -331,9 +317,7 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { return coreBlk1, nil } proBlk1, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Could not build proBlk1 due to %s", err) - } + require.NoError(err) coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -349,16 +333,9 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { return coreBlk2, nil } proBlk2, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proBlk2") - } - if proBlk1.ID() == proBlk2.ID() { - t.Fatal("proBlk1 and proBlk2 should be different for this test") - } - - if err := proBlk2.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(err) + require.NotEqual(proBlk2.ID(), proBlk1.ID()) + require.NoError(proBlk2.Verify(context.Background())) // ...and set one as preferred var prefcoreBlk *snowman.TestBlock @@ -371,7 +348,7 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { prefcoreBlk = coreBlk2 return nil default: - t.Fatal("Unknown core Blocks set as preferred") + require.FailNow("prefID does not match coreBlk1 or coreBlk2") return nil } } @@ -382,14 +359,12 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { case bytes.Equal(b, coreBlk2.Bytes()): return coreBlk2, nil default: - t.Fatalf("Wrong bytes") + require.FailNow("bytes do not match coreBlk1 or coreBlk2") return nil, nil } } - if err := proVM.SetPreference(context.Background(), proBlk2.ID()); err != nil { - t.Fatal("Could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), proBlk2.ID())) // build block... coreBlk3 := &snowman.TestBlock{ @@ -408,17 +383,15 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { proVM.Set(proVM.Time().Add(proposer.MaxDelay)) builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("unexpectedly could not build block due to %s", err) - } + require.NoError(err) // ...show that parent is the preferred one - if builtBlk.Parent() != proBlk2.ID() { - t.Fatal("proposer block not built on preferred parent") - } + require.Equal(proBlk2.ID(), builtBlk.Parent()) } func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { + require := require.New(t) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks coreBlk1 := &snowman.TestBlock{ @@ -435,9 +408,7 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { return coreBlk1, nil } proBlk1, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proBlk1") - } + require.NoError(err) coreBlk2 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -453,16 +424,10 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { return coreBlk2, nil } proBlk2, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proBlk2") - } - if proBlk1.ID() == proBlk2.ID() { - t.Fatal("proBlk1 and proBlk2 should be different for this test") - } + require.NoError(err) + require.NotEqual(proBlk1.ID(), proBlk2.ID()) - if err := proBlk2.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(proBlk2.Verify(context.Background())) // ...and set one as preferred var wronglyPreferredcoreBlk *snowman.TestBlock @@ -475,7 +440,7 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { wronglyPreferredcoreBlk = coreBlk1 return nil default: - t.Fatal("Unknown core Blocks set as preferred") + require.FailNow("Unknown core Blocks set as preferred") return nil } } @@ -486,14 +451,12 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { case bytes.Equal(b, coreBlk2.Bytes()): return coreBlk2, nil default: - t.Fatalf("Wrong bytes") + require.FailNow("Wrong bytes") return nil, nil } } - if err := proVM.SetPreference(context.Background(), proBlk2.ID()); err != nil { - t.Fatal("Could not set preference") - } + require.NoError(proVM.SetPreference(context.Background(), proBlk2.ID())) // build block... coreBlk3 := &snowman.TestBlock{ @@ -512,17 +475,16 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { proVM.Set(proVM.Time().Add(proposer.MaxDelay)) blk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := blk.Verify(context.Background()); err == nil { - t.Fatal("coreVM does not build on preferred coreBlock. It should err") - } + err = blk.Verify(context.Background()) + require.ErrorIs(err, errInnerParentMismatch) } // VM.ParseBlock tests section func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { + require := require.New(t) + coreVM, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks innerBlk := &snowman.TestBlock{ @@ -541,9 +503,7 @@ func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) proBlk := postForkBlock{ SignedBlock: slb, postForkCommonComponents: postForkCommonComponents{ @@ -554,13 +514,13 @@ func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { } // test - - if _, err := proVM.ParseBlock(context.Background(), proBlk.Bytes()); err == nil { - t.Fatal("failed parsing proposervm.Block. Error:", err) - } + _, err = proVM.ParseBlock(context.Background(), proBlk.Bytes()) + require.ErrorIs(err, errMarshallingFailed) } func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { + require := require.New(t) + coreVM, _, proVM, gencoreBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks // create two Proposer blocks at the same height @@ -571,9 +531,7 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { TimestampV: proVM.Time(), } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - if !bytes.Equal(b, innerBlk.Bytes()) { - t.Fatalf("Wrong bytes") - } + require.Equal(innerBlk.Bytes(), b) return innerBlk, nil } @@ -586,9 +544,7 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) proBlk1 := postForkBlock{ SignedBlock: slb1, postForkCommonComponents: postForkCommonComponents{ @@ -607,9 +563,7 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { proVM.ctx.ChainID, proVM.stakingLeafSigner, ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) proBlk2 := postForkBlock{ SignedBlock: slb2, postForkCommonComponents: postForkCommonComponents{ @@ -619,30 +573,22 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { }, } - if proBlk1.ID() == proBlk2.ID() { - t.Fatal("Test requires proBlk1 and proBlk2 to be different") - } + require.NotEqual(proBlk1.ID(), proBlk2.ID()) // Show that both can be parsed and retrieved parsedBlk1, err := proVM.ParseBlock(context.Background(), proBlk1.Bytes()) - if err != nil { - t.Fatal("proposerVM could not parse parsedBlk1") - } + require.NoError(err) parsedBlk2, err := proVM.ParseBlock(context.Background(), proBlk2.Bytes()) - if err != nil { - t.Fatal("proposerVM could not parse parsedBlk2") - } + require.NoError(err) - if parsedBlk1.ID() != proBlk1.ID() { - t.Fatal("error in parsing block") - } - if parsedBlk2.ID() != proBlk2.ID() { - t.Fatal("error in parsing block") - } + require.Equal(proBlk1.ID(), parsedBlk1.ID()) + require.Equal(proBlk2.ID(), parsedBlk2.ID()) } // VM.BuildBlock and VM.ParseBlock interoperability tests section func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { + require := require.New(t) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks // one block is built from this proVM @@ -657,12 +603,8 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build block") - } - if err := builtBlk.Verify(context.Background()); err != nil { - t.Fatal("Built block does not verify") - } + require.NoError(err) + require.NoError(builtBlk.Verify(context.Background())) // another block with same parent comes from network and is parsed netcoreBlk := &snowman.TestBlock{ @@ -680,15 +622,13 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { case bytes.Equal(b, netcoreBlk.Bytes()): return netcoreBlk, nil default: - t.Fatalf("Unknown bytes") + require.FailNow("Unknown bytes") return nil, nil } } pChainHeight, err := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) - if err != nil { - t.Fatal("could not retrieve pChain height") - } + require.NoError(err) netSlb, err := statelessblock.BuildUnsigned( proVM.preferred, @@ -696,9 +636,7 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { pChainHeight, netcoreBlk.Bytes(), ) - if err != nil { - t.Fatal("could not build stateless block") - } + require.NoError(err) netProBlk := postForkBlock{ SignedBlock: netSlb, postForkCommonComponents: postForkCommonComponents{ @@ -709,9 +647,7 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { } // prove that also block from network verifies - if err := netProBlk.Verify(context.Background()); err != nil { - t.Fatal("block from network does not verify") - } + require.NoError(netProBlk.Verify(context.Background())) } // Pre Fork tests section @@ -722,19 +658,13 @@ func TestPreFork_Initialize(t *testing.T) { // checks blkID, err := proVM.LastAccepted(context.Background()) - if err != nil { - t.Fatal("failed to retrieve last accepted block") - } + require.NoError(err) rtvdBlk, err := proVM.GetBlock(context.Background(), blkID) - if err != nil { - t.Fatal("Block should be returned without calling core vm") - } + require.NoError(err) require.IsType(&preForkBlock{}, rtvdBlk) - if !bytes.Equal(rtvdBlk.Bytes(), coreGenBlk.Bytes()) { - t.Fatal("Stored block is not genesis") - } + require.Equal(coreGenBlk.Bytes(), rtvdBlk.Bytes()) } func TestPreFork_BuildBlock(t *testing.T) { @@ -758,28 +688,18 @@ func TestPreFork_BuildBlock(t *testing.T) { // test builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("proposerVM could not build block") - } + require.NoError(err) require.IsType(&preForkBlock{}, builtBlk) - if builtBlk.ID() != coreBlk.ID() { - t.Fatal("unexpected built block") - } - if !bytes.Equal(builtBlk.Bytes(), coreBlk.Bytes()) { - t.Fatal("unexpected built block") - } + require.Equal(coreBlk.ID(), builtBlk.ID()) + require.Equal(coreBlk.Bytes(), builtBlk.Bytes()) // test coreVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { return coreBlk, nil } storedBlk, err := proVM.GetBlock(context.Background(), builtBlk.ID()) - if err != nil { - t.Fatal("proposerVM has not cached built block") - } - if storedBlk.ID() != builtBlk.ID() { - t.Fatal("proposerVM retrieved wrong block") - } + require.NoError(err) + require.Equal(builtBlk.ID(), storedBlk.ID()) } func TestPreFork_ParseBlock(t *testing.T) { @@ -796,40 +716,28 @@ func TestPreFork_ParseBlock(t *testing.T) { } coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { - if !bytes.Equal(b, coreBlk.Bytes()) { - t.Fatalf("Wrong bytes") - } + require.Equal(coreBlk.Bytes(), b) return coreBlk, nil } parsedBlk, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) - if err != nil { - t.Fatal("Could not parse naked core block") - } + require.NoError(err) require.IsType(&preForkBlock{}, parsedBlk) - if parsedBlk.ID() != coreBlk.ID() { - t.Fatal("Parsed block does not match expected block") - } - if !bytes.Equal(parsedBlk.Bytes(), coreBlk.Bytes()) { - t.Fatal("Parsed block does not match expected block") - } + require.Equal(coreBlk.ID(), parsedBlk.ID()) + require.Equal(coreBlk.Bytes(), parsedBlk.Bytes()) coreVM.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { - if id != coreBlk.ID() { - t.Fatalf("Unknown core block") - } + require.Equal(coreBlk.ID(), id) return coreBlk, nil } storedBlk, err := proVM.GetBlock(context.Background(), parsedBlk.ID()) - if err != nil { - t.Fatal("proposerVM has not cached parsed block") - } - if storedBlk.ID() != parsedBlk.ID() { - t.Fatal("proposerVM retrieved wrong block") - } + require.NoError(err) + require.Equal(parsedBlk.ID(), storedBlk.ID()) } func TestPreFork_SetPreference(t *testing.T) { + require := require.New(t) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, mockable.MaxTime, 0) // disable ProBlks coreBlk0 := &snowman.TestBlock{ @@ -846,9 +754,7 @@ func TestPreFork_SetPreference(t *testing.T) { return coreBlk0, nil } builtBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("Could not build proposer block") - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -870,9 +776,7 @@ func TestPreFork_SetPreference(t *testing.T) { return nil, errUnknownBlock } } - if err := proVM.SetPreference(context.Background(), builtBlk.ID()); err != nil { - t.Fatal("Could not set preference on proposer Block") - } + require.NoError(proVM.SetPreference(context.Background(), builtBlk.ID())) coreBlk1 := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -888,15 +792,13 @@ func TestPreFork_SetPreference(t *testing.T) { return coreBlk1, nil } nextBlk, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("Could not build proposer block %s", err) - } - if nextBlk.Parent() != builtBlk.ID() { - t.Fatal("Preferred block should be parent of next built block") - } + require.NoError(err) + require.Equal(builtBlk.ID(), nextBlk.Parent()) } func TestExpiredBuildBlock(t *testing.T) { + require := require.New(t) + coreGenBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -981,7 +883,7 @@ func TestExpiredBuildBlock(t *testing.T) { } // make sure that DBs are compressed correctly - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, dbManager, @@ -991,21 +893,14 @@ func TestExpiredBuildBlock(t *testing.T) { toEngine, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) // Initialize shouldn't be called again coreVM.InitializeF = nil - if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) // Make sure that passing a message works toScheduler <- common.PendingTxs @@ -1030,9 +925,7 @@ func TestExpiredBuildBlock(t *testing.T) { 0, coreBlk.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1058,29 +951,22 @@ func TestExpiredBuildBlock(t *testing.T) { proVM.Clock.Set(statelessBlock.Timestamp()) parsedBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := parsedBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlock.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), parsedBlock.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), parsedBlock.ID())) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { - t.Fatal("unexpectedly called build block") - panic("unexpectedly called build block") + require.FailNow("unexpectedly called build block") + return nil, nil } // The first notification will be read from the consensus engine <-toEngine - if _, err := proVM.BuildBlock(context.Background()); err == nil { - t.Fatal("build block when the proposer window hasn't started") - } + _, err = proVM.BuildBlock(context.Background()) + require.ErrorIs(err, errProposerWindowNotStarted) proVM.Set(statelessBlock.Timestamp().Add(proposer.MaxDelay)) proVM.Scheduler.SetBuildBlockTime(time.Now()) @@ -1111,6 +997,8 @@ func (b *wrappedBlock) Verify(ctx context.Context) error { } func TestInnerBlockDeduplication(t *testing.T) { + require := require.New(t) + coreVM, _, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // disable ProBlks coreBlk := &snowman.TestBlock{ @@ -1135,18 +1023,14 @@ func TestInnerBlockDeduplication(t *testing.T) { 0, coreBlk.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) statelessBlock1, err := statelessblock.BuildUnsigned( coreGenBlk.ID(), coreBlk.Timestamp(), 1, coreBlk.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1170,17 +1054,11 @@ func TestInnerBlockDeduplication(t *testing.T) { } parsedBlock0, err := proVM.ParseBlock(context.Background(), statelessBlock0.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := parsedBlock0.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlock0.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), parsedBlock0.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), parsedBlock0.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1204,24 +1082,18 @@ func TestInnerBlockDeduplication(t *testing.T) { } parsedBlock1, err := proVM.ParseBlock(context.Background(), statelessBlock1.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := parsedBlock1.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlock1.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), parsedBlock1.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), parsedBlock1.ID())) - if err := parsedBlock1.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlock1.Accept(context.Background())) } func TestInnerVMRollback(t *testing.T) { + require := require.New(t) + coreGenBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -1299,7 +1171,7 @@ func TestInnerVMRollback(t *testing.T) { pTestCert.Leaf, ) - err := proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, dbManager, @@ -1309,18 +1181,11 @@ func TestInnerVMRollback(t *testing.T) { nil, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) - if err := proVM.SetState(context.Background(), snow.NormalOp); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - if err := proVM.SetPreference(context.Background(), coreGenBlk.IDV); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ @@ -1338,9 +1203,7 @@ func TestInnerVMRollback(t *testing.T) { 0, coreBlk.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1366,34 +1229,20 @@ func TestInnerVMRollback(t *testing.T) { proVM.Clock.Set(statelessBlock.Timestamp()) parsedBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if status := parsedBlock.Status(); status != choices.Processing { - t.Fatalf("expected status to be %s but was %s", choices.Processing, status) - } + require.Equal(choices.Processing, parsedBlock.Status()) - if err := parsedBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlock.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), parsedBlock.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), parsedBlock.ID())) - if err := parsedBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(parsedBlock.Accept(context.Background())) fetchedBlock, err := proVM.GetBlock(context.Background(), parsedBlock.ID()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if status := fetchedBlock.Status(); status != choices.Accepted { - t.Fatalf("unexpected status %s. Expected %s", status, choices.Accepted) - } + require.Equal(choices.Accepted, fetchedBlock.Status()) // Restart the node and have the inner VM rollback state. @@ -1408,7 +1257,7 @@ func TestInnerVMRollback(t *testing.T) { pTestCert.Leaf, ) - err = proVM.Initialize( + require.NoError(proVM.Initialize( context.Background(), ctx, dbManager, @@ -1418,31 +1267,22 @@ func TestInnerVMRollback(t *testing.T) { nil, nil, nil, - ) - if err != nil { - t.Fatalf("failed to initialize proposerVM with %s", err) - } + )) lastAcceptedID, err := proVM.LastAccepted(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if lastAcceptedID != coreGenBlk.IDV { - t.Fatalf("failed to roll back the VM to the last accepted block") - } + require.Equal(coreGenBlk.IDV, lastAcceptedID) parsedBlock, err = proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if status := parsedBlock.Status(); status != choices.Processing { - t.Fatalf("expected status to be %s but was %s", choices.Processing, status) - } + require.Equal(choices.Processing, parsedBlock.Status()) } func TestBuildBlockDuringWindow(t *testing.T) { + require := require.New(t) + coreVM, valState, proVM, coreGenBlk, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { @@ -1480,9 +1320,7 @@ func TestBuildBlockDuringWindow(t *testing.T) { 0, coreBlk0.Bytes(), ) - if err != nil { - t.Fatal(err) - } + require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1512,42 +1350,26 @@ func TestBuildBlockDuringWindow(t *testing.T) { proVM.Clock.Set(statelessBlock0.Timestamp()) statefulBlock0, err := proVM.ParseBlock(context.Background(), statelessBlock0.Bytes()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := statefulBlock0.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock0.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), statefulBlock0.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), statefulBlock0.ID())) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } statefulBlock1, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal(err) - } + require.NoError(err) - if err := statefulBlock1.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock1.Verify(context.Background())) - if err := proVM.SetPreference(context.Background(), statefulBlock1.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), statefulBlock1.ID())) - if err := statefulBlock0.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock0.Accept(context.Background())) - if err := statefulBlock1.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(statefulBlock1.Accept(context.Background())) } // Ensure that Accepting a PostForkBlock (A) containing core block (X) causes @@ -1559,6 +1381,8 @@ func TestBuildBlockDuringWindow(t *testing.T) { // | // C(Z) func TestTwoForks_OneIsAccepted(t *testing.T) { + require := require.New(t) + forkTime := time.Unix(0, 0) coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) @@ -1578,13 +1402,9 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { return xBlock, nil } aBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("proposerVM could not build block due to %s", err) - } + require.NoError(err) coreVM.BuildBlockF = nil - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } + require.NoError(aBlock.Verify(context.Background())) // use a different way to construct pre-fork block Y and post-fork block B yBlock := &snowman.TestBlock{ @@ -1604,9 +1424,7 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { defaultPChainHeight, yBlock.Bytes(), ) - if err != nil { - t.Fatalf("fail to manually build a block due to %s", err) - } + require.NoError(err) bBlock := postForkBlock{ SignedBlock: ySlb, @@ -1617,9 +1435,7 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { }, } - if err := bBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } + require.NoError(bBlock.Verify(context.Background())) // append Z/C to Y/B zBlock := &snowman.TestBlock{ @@ -1636,47 +1452,30 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return zBlock, nil } - if err := proVM.SetPreference(context.Background(), bBlock.ID()); err != nil { - t.Fatal(err) - } + require.NoError(proVM.SetPreference(context.Background(), bBlock.ID())) cBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("proposerVM could not build block due to %s", err) - } + require.NoError(err) coreVM.BuildBlockF = nil - if err := cBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } + require.NoError(cBlock.Verify(context.Background())) - if aBlock.Parent() != bBlock.Parent() || - zBlock.Parent() != yBlock.ID() || - cBlock.Parent() != bBlock.ID() { - t.Fatal("inconsistent parent") - } + require.Equal(bBlock.Parent(), aBlock.Parent()) + require.Equal(yBlock.ID(), zBlock.Parent()) + require.Equal(bBlock.ID(), cBlock.Parent()) - if yBlock.Status() == choices.Rejected { - t.Fatal("yBlock should not be rejected") - } + require.NotEqual(choices.Rejected, yBlock.Status()) // accept A - if err := aBlock.Accept(context.Background()); err != nil { - t.Fatalf("could not accept valid block due to %s", err) - } - - if xBlock.Status() != choices.Accepted { - t.Fatal("xBlock should be accepted because aBlock is accepted") - } + require.NoError(aBlock.Accept(context.Background())) - if yBlock.Status() != choices.Rejected { - t.Fatal("yBlock should be rejected") - } - if zBlock.Status() != choices.Rejected { - t.Fatal("zBlock should be rejected") - } + require.Equal(choices.Accepted, xBlock.Status()) + require.Equal(choices.Rejected, yBlock.Status()) + require.Equal(choices.Rejected, zBlock.Status()) } func TestTooFarAdvanced(t *testing.T) { + require := require.New(t) + forkTime := time.Unix(0, 0) coreVM, _, proVM, gBlock, _ := initTestProposerVM(t, forkTime, 0) @@ -1706,12 +1505,8 @@ func TestTooFarAdvanced(t *testing.T) { return xBlock, nil } aBlock, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatalf("proposerVM could not build block due to %s", err) - } - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatalf("could not verify valid block due to %s", err) - } + require.NoError(err) + require.NoError(aBlock.Verify(context.Background())) ySlb, err := statelessblock.BuildUnsigned( aBlock.ID(), @@ -1719,9 +1514,7 @@ func TestTooFarAdvanced(t *testing.T) { defaultPChainHeight, yBlock.Bytes(), ) - if err != nil { - t.Fatalf("fail to manually build a block due to %s", err) - } + require.NoError(err) bBlock := postForkBlock{ SignedBlock: ySlb, @@ -1732,9 +1525,8 @@ func TestTooFarAdvanced(t *testing.T) { }, } - if err := bBlock.Verify(context.Background()); err != errProposerWindowNotStarted { - t.Fatal("should have errored errProposerWindowNotStarted") - } + err = bBlock.Verify(context.Background()) + require.ErrorIs(err, errProposerWindowNotStarted) ySlb, err = statelessblock.BuildUnsigned( aBlock.ID(), @@ -1743,9 +1535,7 @@ func TestTooFarAdvanced(t *testing.T) { yBlock.Bytes(), ) - if err != nil { - t.Fatalf("fail to manually build a block due to %s", err) - } + require.NoError(err) bBlock = postForkBlock{ SignedBlock: ySlb, @@ -1756,9 +1546,8 @@ func TestTooFarAdvanced(t *testing.T) { }, } - if err := bBlock.Verify(context.Background()); err != errTimeTooAdvanced { - t.Fatal("should have errored errTimeTooAdvanced") - } + err = bBlock.Verify(context.Background()) + require.ErrorIs(err, errTimeTooAdvanced) } // Ensure that Accepting a PostForkOption (B) causes both the other option and @@ -1815,51 +1604,31 @@ func TestTwoOptions_OneIsAccepted(t *testing.T) { return xBlock, nil } aBlockIntf, err := proVM.BuildBlock(context.Background()) - if err != nil { - t.Fatal("could not build post fork oracle block") - } + require.NoError(err) require.IsType(&postForkBlock{}, aBlockIntf) aBlock := aBlockIntf.(*postForkBlock) opts, err := aBlock.Options(context.Background()) - if err != nil { - t.Fatal("could not retrieve options from post fork oracle block") - } + require.NoError(err) - if err := aBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(aBlock.Verify(context.Background())) bBlock := opts[0] - if err := bBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(bBlock.Verify(context.Background())) cBlock := opts[1] - if err := cBlock.Verify(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(cBlock.Verify(context.Background())) - if err := aBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(aBlock.Accept(context.Background())) - if err := bBlock.Accept(context.Background()); err != nil { - t.Fatal(err) - } + require.NoError(bBlock.Accept(context.Background())) // the other pre-fork option should be rejected - if xBlock.opts[1].Status() != choices.Rejected { - t.Fatal("the pre-fork option block should have be rejected") - } + require.Equal(choices.Rejected, xBlock.opts[1].Status()) // the other post-fork option should also be rejected - if err := cBlock.Reject(context.Background()); err != nil { - t.Fatal("the post-fork option block should have be rejected") - } + require.NoError(cBlock.Reject(context.Background())) - if cBlock.Status() != choices.Rejected { - t.Fatal("cBlock status should not be accepted") - } + require.Equal(choices.Rejected, cBlock.Status()) } // Ensure that given the chance, built blocks will reference a lagged P-chain diff --git a/vms/registry/vm_registry_test.go b/vms/registry/vm_registry_test.go index fd8f096a547..6b04329f3bc 100644 --- a/vms/registry/vm_registry_test.go +++ b/vms/registry/vm_registry_test.go @@ -24,6 +24,8 @@ var ( // Tests the happy case where Reload succeeds. func TestReload_Success(t *testing.T) { + require := require.New(t) + resources := initVMRegistryTest(t) defer resources.ctrl.Finish() @@ -56,26 +58,30 @@ func TestReload_Success(t *testing.T) { Return(nil) installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) - require.ElementsMatch(t, []ids.ID{id3, id4}, installedVMs) - require.Empty(t, failedVMs) - require.NoError(t, err) + require.ElementsMatch([]ids.ID{id3, id4}, installedVMs) + require.Empty(failedVMs) + require.NoError(err) } // Tests that we fail if we're not able to get the vms on disk func TestReload_GetNewVMsFails(t *testing.T) { + require := require.New(t) + resources := initVMRegistryTest(t) defer resources.ctrl.Finish() resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest) installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) - require.Empty(t, installedVMs) - require.Empty(t, failedVMs) - require.ErrorIs(t, err, errTest) + require.Empty(installedVMs) + require.Empty(failedVMs) + require.ErrorIs(err, errTest) } // Tests that if we fail to register a VM, we fail. func TestReload_PartialRegisterFailure(t *testing.T) { + require := require.New(t) + resources := initVMRegistryTest(t) defer resources.ctrl.Finish() @@ -109,15 +115,17 @@ func TestReload_PartialRegisterFailure(t *testing.T) { installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) - require.Len(t, failedVMs, 1) - require.ErrorIs(t, failedVMs[id3], errTest) - require.Len(t, installedVMs, 1) - require.Equal(t, id4, installedVMs[0]) - require.NoError(t, err) + require.Len(failedVMs, 1) + require.ErrorIs(failedVMs[id3], errTest) + require.Len(installedVMs, 1) + require.Equal(id4, installedVMs[0]) + require.NoError(err) } // Tests the happy case where Reload succeeds. func TestReloadWithReadLock_Success(t *testing.T) { + require := require.New(t) + resources := initVMRegistryTest(t) defer resources.ctrl.Finish() @@ -150,26 +158,30 @@ func TestReloadWithReadLock_Success(t *testing.T) { Return(nil) installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) - require.ElementsMatch(t, []ids.ID{id3, id4}, installedVMs) - require.Empty(t, failedVMs) - require.NoError(t, err) + require.ElementsMatch([]ids.ID{id3, id4}, installedVMs) + require.Empty(failedVMs) + require.NoError(err) } // Tests that we fail if we're not able to get the vms on disk func TestReloadWithReadLock_GetNewVMsFails(t *testing.T) { + require := require.New(t) + resources := initVMRegistryTest(t) defer resources.ctrl.Finish() resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest) installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) - require.Empty(t, installedVMs) - require.Empty(t, failedVMs) - require.ErrorIs(t, err, errTest) + require.Empty(installedVMs) + require.Empty(failedVMs) + require.ErrorIs(err, errTest) } // Tests that if we fail to register a VM, we fail. func TestReloadWithReadLock_PartialRegisterFailure(t *testing.T) { + require := require.New(t) + resources := initVMRegistryTest(t) defer resources.ctrl.Finish() @@ -203,11 +215,11 @@ func TestReloadWithReadLock_PartialRegisterFailure(t *testing.T) { installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) - require.Len(t, failedVMs, 1) - require.ErrorIs(t, failedVMs[id3], errTest) - require.Len(t, installedVMs, 1) - require.Equal(t, id4, installedVMs[0]) - require.NoError(t, err) + require.Len(failedVMs, 1) + require.ErrorIs(failedVMs[id3], errTest) + require.Len(installedVMs, 1) + require.Equal(id4, installedVMs[0]) + require.NoError(err) } type registryTestResources struct { diff --git a/vms/rpcchainvm/vm_test.go b/vms/rpcchainvm/vm_test.go index 968a04fe0b0..e82af0f713c 100644 --- a/vms/rpcchainvm/vm_test.go +++ b/vms/rpcchainvm/vm_test.go @@ -118,9 +118,7 @@ func TestVMServerInterface(t *testing.T) { } slices.Sort(gotMethods) - if !reflect.DeepEqual(gotMethods, wantMethods) { - t.Errorf("\ngot: %q\nwant: %q", gotMethods, wantMethods) - } + require.Equal(t, wantMethods, gotMethods) } func TestRuntimeSubprocessBootstrap(t *testing.T) { diff --git a/vms/secp256k1fx/credential_test.go b/vms/secp256k1fx/credential_test.go index 382a51e9f9a..15496e1d7fa 100644 --- a/vms/secp256k1fx/credential_test.go +++ b/vms/secp256k1fx/credential_test.go @@ -15,16 +15,14 @@ import ( ) func TestCredentialVerify(t *testing.T) { - require := require.New(t) cred := Credential{} - require.NoError(cred.Verify()) + require.NoError(t, cred.Verify()) } func TestCredentialVerifyNil(t *testing.T) { - require := require.New(t) cred := (*Credential)(nil) err := cred.Verify() - require.ErrorIs(err, ErrNilCredential) + require.ErrorIs(t, err, ErrNilCredential) } func TestCredentialSerialize(t *testing.T) { @@ -91,8 +89,7 @@ func TestCredentialSerialize(t *testing.T) { } func TestCredentialNotState(t *testing.T) { - require := require.New(t) intf := interface{}(&Credential{}) _, ok := intf.(verify.State) - require.False(ok) + require.False(t, ok) } diff --git a/vms/secp256k1fx/fx_test.go b/vms/secp256k1fx/fx_test.go index fdc082c246e..c0e2663e6a2 100644 --- a/vms/secp256k1fx/fx_test.go +++ b/vms/secp256k1fx/fx_test.go @@ -52,20 +52,18 @@ func init() { } func TestFxInitialize(t *testing.T) { - require := require.New(t) vm := TestVM{ Codec: linearcodec.NewDefault(), Log: logging.NoLog{}, } fx := Fx{} - require.NoError(fx.Initialize(&vm)) + require.NoError(t, fx.Initialize(&vm)) } func TestFxInitializeInvalid(t *testing.T) { - require := require.New(t) fx := Fx{} err := fx.Initialize(nil) - require.ErrorIs(err, ErrWrongVMType) + require.ErrorIs(t, err, ErrWrongVMType) } func TestFxVerifyTransfer(t *testing.T) { From e43660513ed9e8726fe5ac3afd01f72016c78453 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sun, 14 May 2023 13:13:54 -0400 Subject: [PATCH 45/79] nits --- database/test_database.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/database/test_database.go b/database/test_database.go index e3c05892749..bf974be9c7f 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -178,9 +178,9 @@ func TestSimpleKeyValueClosed(t *testing.T, db Database) { _, err = db.Get(key) require.ErrorIs(err, ErrClosed) - require.Equal(ErrClosed, db.Put(key, value)) - require.Equal(ErrClosed, db.Delete(key)) - require.Equal(ErrClosed, db.Close()) + require.ErrorIs(db.Put(key, value), ErrClosed) + require.ErrorIs(db.Delete(key), ErrClosed) + require.ErrorIs(db.Close(), ErrClosed) } // TestMemorySafetyDatabase ensures it is safe to modify a key after passing it @@ -232,7 +232,7 @@ func TestNewBatchClosed(t *testing.T, db Database) { require.NoError(batch.Put(key, value)) require.Positive(batch.Size()) - require.Equal(ErrClosed, batch.Write()) + require.ErrorIs(batch.Write(), ErrClosed) } // TestBatchPut tests to make sure that batched writes work as expected. @@ -264,7 +264,7 @@ func TestBatchPut(t *testing.T, db Database) { require.NoError(batch.Put(key, value)) require.NoError(db.Close()) - require.Equal(ErrClosed, batch.Write()) + require.ErrorIs(batch.Write(), ErrClosed) } // TestBatchDelete tests to make sure that batched deletes work as expected. @@ -489,7 +489,7 @@ func TestBatchReplayPropagateError(t *testing.T, db Database) { gomock.InOrder( mockBatch.EXPECT().Put(key1, value1).Return(ErrClosed).Times(1), ) - require.Equal(ErrClosed, batch.Replay(mockBatch)) + require.ErrorIs(batch.Replay(mockBatch), ErrClosed) mockBatch = NewMockBatch(ctrl) gomock.InOrder( @@ -804,7 +804,7 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.Equal(ErrClosed, iterator.Error()) + require.ErrorIs(iterator.Error(), ErrClosed) } { @@ -816,7 +816,7 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.Equal(ErrClosed, iterator.Error()) + require.ErrorIs(iterator.Error(), ErrClosed) } { @@ -828,7 +828,7 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.Equal(ErrClosed, iterator.Error()) + require.ErrorIs(iterator.Error(), ErrClosed) } { @@ -840,7 +840,7 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.Equal(ErrClosed, iterator.Error()) + require.ErrorIs(iterator.Error(), ErrClosed) } } @@ -877,7 +877,7 @@ func TestIteratorError(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.Equal(ErrClosed, iterator.Error()) + require.ErrorIs(iterator.Error(), ErrClosed) } // TestIteratorErrorAfterRelease tests to make sure that an iterator that was @@ -899,7 +899,7 @@ func TestIteratorErrorAfterRelease(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.Equal(ErrClosed, iterator.Error()) + require.ErrorIs(iterator.Error(), ErrClosed) } // TestCompactNoPanic tests to make sure compact never panics. From b4aa566de40711a0f3a35e11718795fb02a6a98c Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sun, 14 May 2023 13:27:50 -0400 Subject: [PATCH 46/79] nit --- database/test_database.go | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) diff --git a/database/test_database.go b/database/test_database.go index bf974be9c7f..018d4b3fea7 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -178,9 +178,12 @@ func TestSimpleKeyValueClosed(t *testing.T, db Database) { _, err = db.Get(key) require.ErrorIs(err, ErrClosed) - require.ErrorIs(db.Put(key, value), ErrClosed) - require.ErrorIs(db.Delete(key), ErrClosed) - require.ErrorIs(db.Close(), ErrClosed) + err = db.Put(key, value) + require.ErrorIs(err, ErrClosed) + err = db.Delete(key) + require.ErrorIs(err, ErrClosed) + err = db.Close() + require.ErrorIs(err, ErrClosed) } // TestMemorySafetyDatabase ensures it is safe to modify a key after passing it @@ -232,7 +235,8 @@ func TestNewBatchClosed(t *testing.T, db Database) { require.NoError(batch.Put(key, value)) require.Positive(batch.Size()) - require.ErrorIs(batch.Write(), ErrClosed) + err := batch.Write() + require.ErrorIs(err, ErrClosed) } // TestBatchPut tests to make sure that batched writes work as expected. @@ -264,7 +268,8 @@ func TestBatchPut(t *testing.T, db Database) { require.NoError(batch.Put(key, value)) require.NoError(db.Close()) - require.ErrorIs(batch.Write(), ErrClosed) + err = batch.Write() + require.ErrorIs(err, ErrClosed) } // TestBatchDelete tests to make sure that batched deletes work as expected. @@ -489,7 +494,8 @@ func TestBatchReplayPropagateError(t *testing.T, db Database) { gomock.InOrder( mockBatch.EXPECT().Put(key1, value1).Return(ErrClosed).Times(1), ) - require.ErrorIs(batch.Replay(mockBatch), ErrClosed) + err := batch.Replay(mockBatch) + require.ErrorIs(err, ErrClosed) mockBatch = NewMockBatch(ctrl) gomock.InOrder( @@ -804,7 +810,8 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.ErrorIs(iterator.Error(), ErrClosed) + err := iterator.Error() + require.ErrorIs(err, ErrClosed) } { @@ -816,7 +823,8 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.ErrorIs(iterator.Error(), ErrClosed) + err := iterator.Error() + require.ErrorIs(err, ErrClosed) } { @@ -828,7 +836,8 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.ErrorIs(iterator.Error(), ErrClosed) + err := iterator.Error() + require.ErrorIs(err, ErrClosed) } { @@ -840,7 +849,8 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.ErrorIs(iterator.Error(), ErrClosed) + err := iterator.Error() + require.ErrorIs(err, ErrClosed) } } @@ -877,7 +887,8 @@ func TestIteratorError(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.ErrorIs(iterator.Error(), ErrClosed) + err := iterator.Error() + require.ErrorIs(err, ErrClosed) } // TestIteratorErrorAfterRelease tests to make sure that an iterator that was @@ -899,7 +910,8 @@ func TestIteratorErrorAfterRelease(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - require.ErrorIs(iterator.Error(), ErrClosed) + err := iterator.Error() + require.ErrorIs(err, ErrClosed) } // TestCompactNoPanic tests to make sure compact never panics. From 1013be576f99e156e2e8296dffe0117795d31f75 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sun, 14 May 2023 13:36:56 -0400 Subject: [PATCH 47/79] export errors --- codec/manager.go | 6 +++--- ids/aliases.go | 4 ++-- utils/formatting/address/address.go | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/codec/manager.go b/codec/manager.go index 96d763520b0..d1180e5eee8 100644 --- a/codec/manager.go +++ b/codec/manager.go @@ -23,13 +23,13 @@ const ( ) var ( - ErrUnknownVersion = errors.New("unknown codec version") + ErrUnknownVersion = errors.New("unknown codec version") + ErrCantUnpackVersion = errors.New("couldn't unpack codec version") errMarshalNil = errors.New("can't marshal nil pointer or interface") errUnmarshalNil = errors.New("can't unmarshal nil") errUnmarshalTooBig = errors.New("byte array exceeds maximum length") errCantPackVersion = errors.New("couldn't pack codec version") - errCantUnpackVersion = errors.New("couldn't unpack codec version") errDuplicatedVersion = errors.New("duplicated codec version") ) @@ -147,7 +147,7 @@ func (m *manager) Unmarshal(bytes []byte, dest interface{}) (uint16, error) { } version := p.UnpackShort() if p.Errored() { // Make sure the codec version is correct - return 0, errCantUnpackVersion + return 0, ErrCantUnpackVersion } m.lock.RLock() diff --git a/ids/aliases.go b/ids/aliases.go index f765a0c4286..c7958e1c425 100644 --- a/ids/aliases.go +++ b/ids/aliases.go @@ -10,7 +10,7 @@ import ( ) var ( - errNoIDWithAlias = errors.New("there is no ID with alias") + ErrNoIDWithAlias = errors.New("there is no ID with alias") errNoAliasForID = errors.New("there is no alias for ID") errAliasAlreadyMapped = errors.New("alias already mapped to an ID") ) @@ -68,7 +68,7 @@ func (a *aliaser) Lookup(alias string) (ID, error) { if id, ok := a.dealias[alias]; ok { return id, nil } - return ID{}, fmt.Errorf("%w: %s", errNoIDWithAlias, alias) + return ID{}, fmt.Errorf("%w: %s", ErrNoIDWithAlias, alias) } func (a *aliaser) PrimaryAlias(id ID) (string, error) { diff --git a/utils/formatting/address/address.go b/utils/formatting/address/address.go index c0c6cc24868..321fe692bf5 100644 --- a/utils/formatting/address/address.go +++ b/utils/formatting/address/address.go @@ -14,7 +14,7 @@ import ( const addressSep = "-" var ( - errNoSeparator = errors.New("no separator found in address") + ErrNoSeparator = errors.New("no separator found in address") errBits5To8 = errors.New("unable to convert address from 5-bit to 8-bit formatting") errBits8To5 = errors.New("unable to convert address from 8-bit to 5-bit formatting") ) @@ -25,7 +25,7 @@ var ( func Parse(addrStr string) (string, string, []byte, error) { addressParts := strings.SplitN(addrStr, addressSep, 2) if len(addressParts) < 2 { - return "", "", nil, errNoSeparator + return "", "", nil, ErrNoSeparator } chainID := addressParts[0] rawAddr := addressParts[1] From 689082f09b5a0bc156f473002a203ace8f3e72e5 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Sun, 14 May 2023 13:38:56 -0400 Subject: [PATCH 48/79] nit --- database/test_database.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/database/test_database.go b/database/test_database.go index 018d4b3fea7..e6ce5eb121f 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -180,8 +180,10 @@ func TestSimpleKeyValueClosed(t *testing.T, db Database) { err = db.Put(key, value) require.ErrorIs(err, ErrClosed) + err = db.Delete(key) require.ErrorIs(err, ErrClosed) + err = db.Close() require.ErrorIs(err, ErrClosed) } From be063bb3b5ce89fd39f09674150da6d29bc7a9fc Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon, 15 May 2023 06:34:38 -0400 Subject: [PATCH 49/79] nit --- vms/avm/service_test.go | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 17966c324df..312164e49b3 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -2400,7 +2400,7 @@ func TestServiceGetBlock(t *testing.T) { type test struct { name string - serviceAndExpectedBlockFunc func(ctrl *gomock.Controller) (*Service, interface{}) + serviceAndExpectedBlockFunc func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) encoding formatting.Encoding expectedErr error } @@ -2408,7 +2408,7 @@ func TestServiceGetBlock(t *testing.T) { tests := []test{ { name: "chain not linearized", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { return &Service{ vm: &VM{ ctx: &snow.Context{ @@ -2422,7 +2422,7 @@ func TestServiceGetBlock(t *testing.T) { }, { name: "block not found", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { manager := executor.NewMockManager(ctrl) manager.EXPECT().GetStatelessBlock(blockID).Return(nil, database.ErrNotFound) return &Service{ @@ -2439,7 +2439,7 @@ func TestServiceGetBlock(t *testing.T) { }, { name: "JSON format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { block := blocks.NewMockBlock(ctrl) block.EXPECT().InitCtx(gomock.Any()) block.EXPECT().Txs().Return(nil) @@ -2460,7 +2460,7 @@ func TestServiceGetBlock(t *testing.T) { }, { name: "hex format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { block := blocks.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) @@ -2484,7 +2484,7 @@ func TestServiceGetBlock(t *testing.T) { }, { name: "hexc format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { block := blocks.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) @@ -2508,7 +2508,7 @@ func TestServiceGetBlock(t *testing.T) { }, { name: "hexnc format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { block := blocks.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) @@ -2536,7 +2536,7 @@ func TestServiceGetBlock(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - service, expected := tt.serviceAndExpectedBlockFunc(ctrl) + service, expected := tt.serviceAndExpectedBlockFunc(t, ctrl) args := &api.GetBlockArgs{ BlockID: blockID, @@ -2563,7 +2563,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { type test struct { name string - serviceAndExpectedBlockFunc func(ctrl *gomock.Controller) (*Service, interface{}) + serviceAndExpectedBlockFunc func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) encoding formatting.Encoding expectedErr error } @@ -2571,7 +2571,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { tests := []test{ { name: "chain not linearized", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { return &Service{ vm: &VM{ ctx: &snow.Context{ @@ -2585,7 +2585,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "block height not found", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { state := states.NewMockState(ctrl) state.EXPECT().GetBlockID(blockHeight).Return(ids.Empty, database.ErrNotFound) @@ -2605,7 +2605,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "block not found", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { state := states.NewMockState(ctrl) state.EXPECT().GetBlockID(blockHeight).Return(blockID, nil) @@ -2626,7 +2626,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "JSON format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(_ *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { block := blocks.NewMockBlock(ctrl) block.EXPECT().InitCtx(gomock.Any()) block.EXPECT().Txs().Return(nil) @@ -2651,7 +2651,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "hex format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { block := blocks.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) @@ -2679,7 +2679,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "hexc format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { block := blocks.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) @@ -2707,7 +2707,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { }, { name: "hexnc format", - serviceAndExpectedBlockFunc: func(ctrl *gomock.Controller) (*Service, interface{}) { + serviceAndExpectedBlockFunc: func(t *testing.T, ctrl *gomock.Controller) (*Service, interface{}) { block := blocks.NewMockBlock(ctrl) blockBytes := []byte("hi mom") block.EXPECT().Bytes().Return(blockBytes) @@ -2739,7 +2739,7 @@ func TestServiceGetBlockByHeight(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - service, expected := tt.serviceAndExpectedBlockFunc(ctrl) + service, expected := tt.serviceAndExpectedBlockFunc(t, ctrl) args := &api.GetBlockByHeightArgs{ Height: json.Uint64(blockHeight), From 463b2da8b11488e111c4c93f122bbd8c7c11ff77 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon, 15 May 2023 06:40:56 -0400 Subject: [PATCH 50/79] nit --- vms/avm/service_test.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 312164e49b3..960d2c63822 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -2748,10 +2748,11 @@ func TestServiceGetBlockByHeight(t *testing.T) { reply := &api.GetBlockResponse{} err := service.GetBlockByHeight(nil, args, reply) require.ErrorIs(err, tt.expectedErr) - if err == nil { - require.Equal(tt.encoding, reply.Encoding) - require.Equal(expected, reply.Block) + if tt.expectedErr != nil { + return } + require.Equal(tt.encoding, reply.Encoding) + require.Equal(expected, reply.Block) }) } } From 6709b1ca69b4180c119477e62baf966288ce1ad2 Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 16 May 2023 12:34:16 -0400 Subject: [PATCH 51/79] Update scripts/lint.sh Signed-off-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index f294ac188a5..0a38e06538a 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -108,7 +108,7 @@ function test_require_nil { return 1 fi - if grep -R -zo -P 'require\.ErrorIs.+?nil\)\n' .; then + if grep -R -o -P 'require\.ErrorIs.+?nil\)' .; then echo "" echo "Use require.NoError instead of require.ErrorIs when testing for nil error." echo "" From 5d2f1e2d096ad484723ac8b27afec520c811a7fd Mon Sep 17 00:00:00 2001 From: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 16 May 2023 15:19:04 -0400 Subject: [PATCH 52/79] Update scripts/lint.sh Signed-off-by: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index ac9c1e4c365..8051ff3e57d 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -85,7 +85,7 @@ function test_require_len_zero { } function test_require_equal_len { - if grep -R -o -P 'require\.Equal\((t, )?.*, len\([^,]*$' .; then + if grep -R -o -P 'require\.Equal\(.*, len\(' .; then echo "" echo "Use require.Len instead of require.Equal when testing for length." echo "" From f4f0c1536305c3f6836682bea9c8116db6e7958e Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 16 May 2023 15:25:45 -0400 Subject: [PATCH 53/79] nit --- utils/window/window_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/utils/window/window_test.go b/utils/window/window_test.go index 9e36658850b..ad35ea1eafb 100644 --- a/utils/window/window_test.go +++ b/utils/window/window_test.go @@ -55,7 +55,8 @@ func TestAdd(t *testing.T) { window.Add(test.newlyAdded) - require.Equal(t, len(test.window)+1, window.Length()) + expectedLen := len(test.window) + 1 + require.Equal(t, expectedLen, window.Length()) oldest, ok := window.Oldest() require.Equal(t, test.expectedOldest, oldest) require.True(t, ok) From 8fb3d2af2102c26988318a38bead7367e038a179 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 16 May 2023 15:39:49 -0400 Subject: [PATCH 54/79] increase negative diff woooooo --- snow/engine/avalanche/vertex/test_builder.go | 3 +- snow/engine/avalanche/vertex/test_parser.go | 3 +- snow/engine/avalanche/vertex/test_storage.go | 9 +- snow/engine/avalanche/vertex/test_vm.go | 12 +- snow/engine/common/queue/test_job.go | 15 +-- snow/engine/common/queue/test_parser.go | 3 +- snow/engine/common/test_bootstrap_tracker.go | 9 +- snow/engine/common/test_bootstrapable.go | 6 +- snow/engine/common/test_engine.go | 120 ++++++------------ snow/engine/common/test_sender.go | 69 ++++------ snow/engine/common/test_timer.go | 3 +- snow/engine/common/test_vm.go | 48 +++---- snow/engine/snowman/block/test_batched_vm.go | 6 +- .../snowman/block/test_height_indexed_vm.go | 6 +- .../snowman/block/test_state_summary.go | 3 +- .../snowman/block/test_state_syncable_vm.go | 15 +-- snow/engine/snowman/block/test_vm.go | 15 +-- snow/engine/snowman/test_engine.go | 3 +- snow/networking/benchlist/test_benchable.go | 6 +- snow/validators/test_state.go | 12 +- 20 files changed, 122 insertions(+), 244 deletions(-) diff --git a/snow/engine/avalanche/vertex/test_builder.go b/snow/engine/avalanche/vertex/test_builder.go index c8203138e48..0bd63b26bcb 100644 --- a/snow/engine/avalanche/vertex/test_builder.go +++ b/snow/engine/avalanche/vertex/test_builder.go @@ -35,8 +35,7 @@ func (b *TestBuilder) BuildStopVtx(ctx context.Context, parentIDs []ids.ID) (ava return b.BuildStopVtxF(ctx, parentIDs) } if b.CantBuildVtx && b.T != nil { - require := require.New(b.T) - require.FailNow(errBuild.Error()) + require.FailNow(b.T, errBuild.Error()) } return nil, errBuild } diff --git a/snow/engine/avalanche/vertex/test_parser.go b/snow/engine/avalanche/vertex/test_parser.go index 56a2fdae2fe..3ca17b3440f 100644 --- a/snow/engine/avalanche/vertex/test_parser.go +++ b/snow/engine/avalanche/vertex/test_parser.go @@ -34,8 +34,7 @@ func (p *TestParser) ParseVtx(ctx context.Context, b []byte) (avalanche.Vertex, return p.ParseVtxF(ctx, b) } if p.CantParseVtx && p.T != nil { - require := require.New(p.T) - require.FailNow(errParse.Error()) + require.FailNow(p.T, errParse.Error()) } return nil, errParse } diff --git a/snow/engine/avalanche/vertex/test_storage.go b/snow/engine/avalanche/vertex/test_storage.go index 9fc4b3d2274..b5250ee1fca 100644 --- a/snow/engine/avalanche/vertex/test_storage.go +++ b/snow/engine/avalanche/vertex/test_storage.go @@ -40,8 +40,7 @@ func (s *TestStorage) GetVtx(ctx context.Context, vtxID ids.ID) (avalanche.Verte return s.GetVtxF(ctx, vtxID) } if s.CantGetVtx && s.T != nil { - require := require.New(s.T) - require.FailNow(errGet.Error()) + require.FailNow(s.T, errGet.Error()) } return nil, errGet } @@ -51,8 +50,7 @@ func (s *TestStorage) Edge(ctx context.Context) []ids.ID { return s.EdgeF(ctx) } if s.CantEdge && s.T != nil { - require := require.New(s.T) - require.FailNow(errEdge.Error()) + require.FailNow(s.T, errEdge.Error()) } return nil } @@ -62,8 +60,7 @@ func (s *TestStorage) StopVertexAccepted(ctx context.Context) (bool, error) { return s.StopVertexAcceptedF(ctx) } if s.CantStopVertexAccepted && s.T != nil { - require := require.New(s.T) - require.FailNow(errStopVertexAccepted.Error()) + require.FailNow(s.T, errStopVertexAccepted.Error()) } return false, nil } diff --git a/snow/engine/avalanche/vertex/test_vm.go b/snow/engine/avalanche/vertex/test_vm.go index 1a3a346db86..06ba509e048 100644 --- a/snow/engine/avalanche/vertex/test_vm.go +++ b/snow/engine/avalanche/vertex/test_vm.go @@ -45,8 +45,7 @@ func (vm *TestVM) Linearize(ctx context.Context, stopVertexID ids.ID) error { return vm.LinearizeF(ctx, stopVertexID) } if vm.CantLinearize && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errLinearize.Error()) + require.FailNow(vm.T, errLinearize.Error()) } return errLinearize } @@ -56,8 +55,7 @@ func (vm *TestVM) PendingTxs(ctx context.Context) []snowstorm.Tx { return vm.PendingTxsF(ctx) } if vm.CantPendingTxs && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errPending.Error()) + require.FailNow(vm.T, errPending.Error()) } return nil } @@ -67,8 +65,7 @@ func (vm *TestVM) ParseTx(ctx context.Context, b []byte) (snowstorm.Tx, error) { return vm.ParseTxF(ctx, b) } if vm.CantParse && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errParse.Error()) + require.FailNow(vm.T, errParse.Error()) } return nil, errParse } @@ -78,8 +75,7 @@ func (vm *TestVM) GetTx(ctx context.Context, txID ids.ID) (snowstorm.Tx, error) return vm.GetTxF(ctx, txID) } if vm.CantGet && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errGet.Error()) + require.FailNow(vm.T, errGet.Error()) } return nil, errGet } diff --git a/snow/engine/common/queue/test_job.go b/snow/engine/common/queue/test_job.go index 66ff7990011..98ea33614b5 100644 --- a/snow/engine/common/queue/test_job.go +++ b/snow/engine/common/queue/test_job.go @@ -49,8 +49,7 @@ func (j *TestJob) ID() ids.ID { return j.IDF() } if j.CantID && j.T != nil { - require := require.New(j.T) - require.FailNow("Unexpectedly called ID") + require.FailNow(j.T, "Unexpectedly called ID") } return ids.ID{} } @@ -60,8 +59,7 @@ func (j *TestJob) MissingDependencies(ctx context.Context) (set.Set[ids.ID], err return j.MissingDependenciesF(ctx) } if j.CantMissingDependencies && j.T != nil { - require := require.New(j.T) - require.FailNow("Unexpectedly called MissingDependencies") + require.FailNow(j.T, "Unexpectedly called MissingDependencies") } return set.Set[ids.ID]{}, nil } @@ -71,8 +69,7 @@ func (j *TestJob) Execute(ctx context.Context) error { return j.ExecuteF(ctx) } if j.CantExecute && j.T != nil { - require := require.New(j.T) - require.FailNow(errExecute.Error()) + require.FailNow(j.T, errExecute.Error()) } return errExecute } @@ -82,8 +79,7 @@ func (j *TestJob) Bytes() []byte { return j.BytesF() } if j.CantBytes && j.T != nil { - require := require.New(j.T) - require.FailNow("Unexpectedly called Bytes") + require.FailNow(j.T, "Unexpectedly called Bytes") } return nil } @@ -93,8 +89,7 @@ func (j *TestJob) HasMissingDependencies(ctx context.Context) (bool, error) { return j.HasMissingDependenciesF(ctx) } if j.CantHasMissingDependencies && j.T != nil { - require := require.New(j.T) - require.FailNow(errHasMissingDependencies.Error()) + require.FailNow(j.T, errHasMissingDependencies.Error()) } return false, errHasMissingDependencies } diff --git a/snow/engine/common/queue/test_parser.go b/snow/engine/common/queue/test_parser.go index d71f9730396..85a079cc143 100644 --- a/snow/engine/common/queue/test_parser.go +++ b/snow/engine/common/queue/test_parser.go @@ -31,8 +31,7 @@ func (p *TestParser) Parse(ctx context.Context, b []byte) (Job, error) { return p.ParseF(ctx, b) } if p.CantParse && p.T != nil { - require := require.New(p.T) - require.FailNow(errParse.Error()) + require.FailNow(p.T, errParse.Error()) } return nil, errParse } diff --git a/snow/engine/common/test_bootstrap_tracker.go b/snow/engine/common/test_bootstrap_tracker.go index 3e6670cb40b..ba377b39dce 100644 --- a/snow/engine/common/test_bootstrap_tracker.go +++ b/snow/engine/common/test_bootstrap_tracker.go @@ -38,8 +38,7 @@ func (s *BootstrapTrackerTest) IsBootstrapped() bool { return s.IsBootstrappedF() } if s.CantIsBootstrapped && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called IsBootstrapped") + require.FailNow(s.T, "Unexpectedly called IsBootstrapped") } return false } @@ -51,8 +50,7 @@ func (s *BootstrapTrackerTest) Bootstrapped(chainID ids.ID) { if s.BootstrappedF != nil { s.BootstrappedF(chainID) } else if s.CantBootstrapped && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called Bootstrapped") + require.FailNow(s.T, "Unexpectedly called Bootstrapped") } } @@ -60,8 +58,7 @@ func (s *BootstrapTrackerTest) OnBootstrapCompleted() chan struct{} { if s.OnBootstrapCompletedF != nil { return s.OnBootstrapCompletedF() } else if s.CantOnBootstrapCompleted && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called OnBootstrapCompleted") + require.FailNow(s.T, "Unexpectedly called OnBootstrapCompleted") } return nil } diff --git a/snow/engine/common/test_bootstrapable.go b/snow/engine/common/test_bootstrapable.go index c9829ab793d..bf9cc5ed8e6 100644 --- a/snow/engine/common/test_bootstrapable.go +++ b/snow/engine/common/test_bootstrapable.go @@ -39,8 +39,7 @@ func (b *BootstrapableTest) Clear() error { if b.ClearF != nil { return b.ClearF() } else if b.CantClear && b.T != nil { - require := require.New(b.T) - require.FailNow(errClear.Error()) + require.FailNow(b.T, errClear.Error()) } return errClear } @@ -49,8 +48,7 @@ func (b *BootstrapableTest) ForceAccepted(ctx context.Context, containerIDs []id if b.ForceAcceptedF != nil { return b.ForceAcceptedF(ctx, containerIDs) } else if b.CantForceAccepted && b.T != nil { - require := require.New(b.T) - require.FailNow(errForceAccepted.Error()) + require.FailNow(b.T, errForceAccepted.Error()) } return errForceAccepted } diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go index e9f51b4652b..76fefce93c1 100644 --- a/snow/engine/common/test_engine.go +++ b/snow/engine/common/test_engine.go @@ -189,8 +189,7 @@ func (e *EngineTest) Start(ctx context.Context, startReqID uint32) error { return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errStart.Error()) + require.FailNow(e.T, errStart.Error()) } return errStart } @@ -203,8 +202,7 @@ func (e *EngineTest) Context() *snow.ConsensusContext { return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow("Unexpectedly called Context") + require.FailNow(e.T, "Unexpectedly called Context") } return nil } @@ -217,8 +215,7 @@ func (e *EngineTest) Timeout(ctx context.Context) error { return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errTimeout.Error()) + require.FailNow(e.T, errTimeout.Error()) } return errTimeout } @@ -231,8 +228,7 @@ func (e *EngineTest) Gossip(ctx context.Context) error { return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGossip.Error()) + require.FailNow(e.T, errGossip.Error()) } return errGossip } @@ -246,8 +242,7 @@ func (e *EngineTest) Halt(ctx context.Context) { return } if e.T != nil { - require := require.New(e.T) - require.FailNow("Unexpectedly called Halt") + require.FailNow(e.T, "Unexpectedly called Halt") } } @@ -259,8 +254,7 @@ func (e *EngineTest) Shutdown(ctx context.Context) error { return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errShutdown.Error()) + require.FailNow(e.T, errShutdown.Error()) } return errShutdown } @@ -273,8 +267,7 @@ func (e *EngineTest) Notify(ctx context.Context, msg Message) error { return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errNotify.Error()) + require.FailNow(e.T, errNotify.Error()) } return errNotify } @@ -287,8 +280,7 @@ func (e *EngineTest) GetStateSummaryFrontier(ctx context.Context, validatorID id return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGetStateSummaryFrontier.Error()) + require.FailNow(e.T, errGetStateSummaryFrontier.Error()) } return errGetStateSummaryFrontier } @@ -301,8 +293,7 @@ func (e *EngineTest) StateSummaryFrontier(ctx context.Context, validatorID ids.N return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errStateSummaryFrontier.Error()) + require.FailNow(e.T, errStateSummaryFrontier.Error()) } return errStateSummaryFrontier } @@ -315,8 +306,7 @@ func (e *EngineTest) GetStateSummaryFrontierFailed(ctx context.Context, validato return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGetStateSummaryFrontierFailed.Error()) + require.FailNow(e.T, errGetStateSummaryFrontierFailed.Error()) } return errGetStateSummaryFrontierFailed } @@ -329,8 +319,7 @@ func (e *EngineTest) GetAcceptedStateSummary(ctx context.Context, validatorID id return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGetAcceptedStateSummary.Error()) + require.FailNow(e.T, errGetAcceptedStateSummary.Error()) } return errGetAcceptedStateSummary } @@ -343,8 +332,7 @@ func (e *EngineTest) AcceptedStateSummary(ctx context.Context, validatorID ids.N return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errAcceptedStateSummary.Error()) + require.FailNow(e.T, errAcceptedStateSummary.Error()) } return errAcceptedStateSummary } @@ -357,8 +345,7 @@ func (e *EngineTest) GetAcceptedStateSummaryFailed(ctx context.Context, validato return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGetAcceptedStateSummaryFailed.Error()) + require.FailNow(e.T, errGetAcceptedStateSummaryFailed.Error()) } return errGetAcceptedStateSummaryFailed } @@ -371,8 +358,7 @@ func (e *EngineTest) GetAcceptedFrontier(ctx context.Context, nodeID ids.NodeID, return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGetAcceptedFrontier.Error()) + require.FailNow(e.T, errGetAcceptedFrontier.Error()) } return errGetAcceptedFrontier } @@ -385,8 +371,7 @@ func (e *EngineTest) GetAcceptedFrontierFailed(ctx context.Context, nodeID ids.N return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGetAcceptedFrontierFailed.Error()) + require.FailNow(e.T, errGetAcceptedFrontierFailed.Error()) } return errGetAcceptedFrontierFailed } @@ -399,8 +384,7 @@ func (e *EngineTest) AcceptedFrontier(ctx context.Context, nodeID ids.NodeID, re return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errAcceptedFrontier.Error()) + require.FailNow(e.T, errAcceptedFrontier.Error()) } return errAcceptedFrontier } @@ -413,8 +397,7 @@ func (e *EngineTest) GetAccepted(ctx context.Context, nodeID ids.NodeID, request return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGetAccepted.Error()) + require.FailNow(e.T, errGetAccepted.Error()) } return errGetAccepted } @@ -427,8 +410,7 @@ func (e *EngineTest) GetAcceptedFailed(ctx context.Context, nodeID ids.NodeID, r return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGetAcceptedFailed.Error()) + require.FailNow(e.T, errGetAcceptedFailed.Error()) } return errGetAcceptedFailed } @@ -441,8 +423,7 @@ func (e *EngineTest) Accepted(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errAccepted.Error()) + require.FailNow(e.T, errAccepted.Error()) } return errAccepted } @@ -455,8 +436,7 @@ func (e *EngineTest) Get(ctx context.Context, nodeID ids.NodeID, requestID uint3 return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGet.Error()) + require.FailNow(e.T, errGet.Error()) } return errGet } @@ -469,8 +449,7 @@ func (e *EngineTest) GetAncestors(ctx context.Context, nodeID ids.NodeID, reques return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGetAncestors.Error()) + require.FailNow(e.T, errGetAncestors.Error()) } return errGetAncestors } @@ -483,8 +462,7 @@ func (e *EngineTest) GetFailed(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGetFailed.Error()) + require.FailNow(e.T, errGetFailed.Error()) } return errGetFailed } @@ -497,8 +475,7 @@ func (e *EngineTest) GetAncestorsFailed(ctx context.Context, nodeID ids.NodeID, return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errGetAncestorsFailed.Error()) + require.FailNow(e.T, errGetAncestorsFailed.Error()) } return errGetAncestorsFailed } @@ -511,8 +488,7 @@ func (e *EngineTest) Put(ctx context.Context, nodeID ids.NodeID, requestID uint3 return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errPut.Error()) + require.FailNow(e.T, errPut.Error()) } return errPut } @@ -525,8 +501,7 @@ func (e *EngineTest) Ancestors(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errAncestors.Error()) + require.FailNow(e.T, errAncestors.Error()) } return errAncestors } @@ -539,8 +514,7 @@ func (e *EngineTest) PushQuery(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errPushQuery.Error()) + require.FailNow(e.T, errPushQuery.Error()) } return errPushQuery } @@ -553,8 +527,7 @@ func (e *EngineTest) PullQuery(ctx context.Context, nodeID ids.NodeID, requestID return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errPullQuery.Error()) + require.FailNow(e.T, errPullQuery.Error()) } return errPullQuery } @@ -567,8 +540,7 @@ func (e *EngineTest) QueryFailed(ctx context.Context, nodeID ids.NodeID, request return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errQueryFailed.Error()) + require.FailNow(e.T, errQueryFailed.Error()) } return errQueryFailed } @@ -581,8 +553,7 @@ func (e *EngineTest) CrossChainAppRequest(ctx context.Context, chainID ids.ID, r return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errCrossChainAppRequest.Error()) + require.FailNow(e.T, errCrossChainAppRequest.Error()) } return errCrossChainAppRequest } @@ -595,8 +566,7 @@ func (e *EngineTest) CrossChainAppRequestFailed(ctx context.Context, chainID ids return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errCrossChainAppRequestFailed.Error()) + require.FailNow(e.T, errCrossChainAppRequestFailed.Error()) } return errCrossChainAppRequestFailed } @@ -609,8 +579,7 @@ func (e *EngineTest) CrossChainAppResponse(ctx context.Context, chainID ids.ID, return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errCrossChainAppResponse.Error()) + require.FailNow(e.T, errCrossChainAppResponse.Error()) } return errCrossChainAppResponse } @@ -623,8 +592,7 @@ func (e *EngineTest) AppRequest(ctx context.Context, nodeID ids.NodeID, requestI return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errAppRequest.Error()) + require.FailNow(e.T, errAppRequest.Error()) } return errAppRequest } @@ -637,8 +605,7 @@ func (e *EngineTest) AppResponse(ctx context.Context, nodeID ids.NodeID, request return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errAppResponse.Error()) + require.FailNow(e.T, errAppResponse.Error()) } return errAppResponse } @@ -651,8 +618,7 @@ func (e *EngineTest) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, re return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errAppRequestFailed.Error()) + require.FailNow(e.T, errAppRequestFailed.Error()) } return errAppRequestFailed } @@ -665,8 +631,7 @@ func (e *EngineTest) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byt return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errAppGossip.Error()) + require.FailNow(e.T, errAppGossip.Error()) } return errAppGossip } @@ -679,8 +644,7 @@ func (e *EngineTest) Chits(ctx context.Context, nodeID ids.NodeID, requestID uin return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errChits.Error()) + require.FailNow(e.T, errChits.Error()) } return errChits } @@ -693,8 +657,7 @@ func (e *EngineTest) Connected(ctx context.Context, nodeID ids.NodeID, nodeVersi return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errConnected.Error()) + require.FailNow(e.T, errConnected.Error()) } return errConnected } @@ -707,8 +670,7 @@ func (e *EngineTest) Disconnected(ctx context.Context, nodeID ids.NodeID) error return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errDisconnected.Error()) + require.FailNow(e.T, errDisconnected.Error()) } return errDisconnected } @@ -721,8 +683,7 @@ func (e *EngineTest) HealthCheck(ctx context.Context) (interface{}, error) { return nil, nil } if e.T != nil { - require := require.New(e.T) - require.FailNow(errHealthCheck.Error()) + require.FailNow(e.T, errHealthCheck.Error()) } return nil, errHealthCheck } @@ -735,8 +696,7 @@ func (e *EngineTest) GetVM() VM { return nil } if e.T != nil { - require := require.New(e.T) - require.FailNow("Unexpectedly called GetVM") + require.FailNow(e.T, "Unexpectedly called GetVM") } return nil } diff --git a/snow/engine/common/test_sender.go b/snow/engine/common/test_sender.go index 79fda55cc9e..c5b3ca4296e 100644 --- a/snow/engine/common/test_sender.go +++ b/snow/engine/common/test_sender.go @@ -103,8 +103,7 @@ func (s *SenderTest) Accept(ctx *snow.ConsensusContext, containerID ids.ID, cont return nil } if s.T != nil { - require := require.New(s.T) - require.FailNow(errAccept.Error()) + require.FailNow(s.T, errAccept.Error()) } return errAccept } @@ -116,8 +115,7 @@ func (s *SenderTest) SendGetStateSummaryFrontier(ctx context.Context, validatorI if s.SendGetStateSummaryFrontierF != nil { s.SendGetStateSummaryFrontierF(ctx, validatorIDs, requestID) } else if s.CantSendGetStateSummaryFrontier && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendGetStateSummaryFrontier") + require.FailNow(s.T, "Unexpectedly called SendGetStateSummaryFrontier") } } @@ -128,8 +126,7 @@ func (s *SenderTest) SendStateSummaryFrontier(ctx context.Context, validatorID i if s.SendStateSummaryFrontierF != nil { s.SendStateSummaryFrontierF(ctx, validatorID, requestID, summary) } else if s.CantSendStateSummaryFrontier && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendStateSummaryFrontier") + require.FailNow(s.T, "Unexpectedly called SendStateSummaryFrontier") } } @@ -140,8 +137,7 @@ func (s *SenderTest) SendGetAcceptedStateSummary(ctx context.Context, nodeIDs se if s.SendGetAcceptedStateSummaryF != nil { s.SendGetAcceptedStateSummaryF(ctx, nodeIDs, requestID, heights) } else if s.CantSendGetAcceptedStateSummary && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendGetAcceptedStateSummaryF") + require.FailNow(s.T, "Unexpectedly called SendGetAcceptedStateSummaryF") } } @@ -152,8 +148,7 @@ func (s *SenderTest) SendAcceptedStateSummary(ctx context.Context, validatorID i if s.SendAcceptedStateSummaryF != nil { s.SendAcceptedStateSummaryF(ctx, validatorID, requestID, summaryIDs) } else if s.CantSendAcceptedStateSummary && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendAcceptedStateSummary") + require.FailNow(s.T, "Unexpectedly called SendAcceptedStateSummary") } } @@ -164,8 +159,7 @@ func (s *SenderTest) SendGetAcceptedFrontier(ctx context.Context, validatorIDs s if s.SendGetAcceptedFrontierF != nil { s.SendGetAcceptedFrontierF(ctx, validatorIDs, requestID) } else if s.CantSendGetAcceptedFrontier && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendGetAcceptedFrontier") + require.FailNow(s.T, "Unexpectedly called SendGetAcceptedFrontier") } } @@ -176,8 +170,7 @@ func (s *SenderTest) SendAcceptedFrontier(ctx context.Context, validatorID ids.N if s.SendAcceptedFrontierF != nil { s.SendAcceptedFrontierF(ctx, validatorID, requestID, containerIDs) } else if s.CantSendAcceptedFrontier && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendAcceptedFrontier") + require.FailNow(s.T, "Unexpectedly called SendAcceptedFrontier") } } @@ -188,8 +181,7 @@ func (s *SenderTest) SendGetAccepted(ctx context.Context, nodeIDs set.Set[ids.No if s.SendGetAcceptedF != nil { s.SendGetAcceptedF(ctx, nodeIDs, requestID, containerIDs) } else if s.CantSendGetAccepted && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendGetAccepted") + require.FailNow(s.T, "Unexpectedly called SendGetAccepted") } } @@ -200,8 +192,7 @@ func (s *SenderTest) SendAccepted(ctx context.Context, validatorID ids.NodeID, r if s.SendAcceptedF != nil { s.SendAcceptedF(ctx, validatorID, requestID, containerIDs) } else if s.CantSendAccepted && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendAccepted") + require.FailNow(s.T, "Unexpectedly called SendAccepted") } } @@ -212,8 +203,7 @@ func (s *SenderTest) SendGet(ctx context.Context, vdr ids.NodeID, requestID uint if s.SendGetF != nil { s.SendGetF(ctx, vdr, requestID, vtxID) } else if s.CantSendGet && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendGet") + require.FailNow(s.T, "Unexpectedly called SendGet") } } @@ -224,8 +214,7 @@ func (s *SenderTest) SendGetAncestors(ctx context.Context, validatorID ids.NodeI if s.SendGetAncestorsF != nil { s.SendGetAncestorsF(ctx, validatorID, requestID, vtxID) } else if s.CantSendGetAncestors && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendCantSendGetAncestors") + require.FailNow(s.T, "Unexpectedly called SendCantSendGetAncestors") } } @@ -236,8 +225,7 @@ func (s *SenderTest) SendPut(ctx context.Context, vdr ids.NodeID, requestID uint if s.SendPutF != nil { s.SendPutF(ctx, vdr, requestID, vtx) } else if s.CantSendPut && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendPut") + require.FailNow(s.T, "Unexpectedly called SendPut") } } @@ -248,8 +236,7 @@ func (s *SenderTest) SendAncestors(ctx context.Context, vdr ids.NodeID, requestI if s.SendAncestorsF != nil { s.SendAncestorsF(ctx, vdr, requestID, vtxs) } else if s.CantSendAncestors && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendAncestors") + require.FailNow(s.T, "Unexpectedly called SendAncestors") } } @@ -260,8 +247,7 @@ func (s *SenderTest) SendPushQuery(ctx context.Context, vdrs set.Set[ids.NodeID] if s.SendPushQueryF != nil { s.SendPushQueryF(ctx, vdrs, requestID, vtx) } else if s.CantSendPushQuery && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendPushQuery") + require.FailNow(s.T, "Unexpectedly called SendPushQuery") } } @@ -272,8 +258,7 @@ func (s *SenderTest) SendPullQuery(ctx context.Context, vdrs set.Set[ids.NodeID] if s.SendPullQueryF != nil { s.SendPullQueryF(ctx, vdrs, requestID, vtxID) } else if s.CantSendPullQuery && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendPullQuery") + require.FailNow(s.T, "Unexpectedly called SendPullQuery") } } @@ -284,8 +269,7 @@ func (s *SenderTest) SendChits(ctx context.Context, vdr ids.NodeID, requestID ui if s.SendChitsF != nil { s.SendChitsF(ctx, vdr, requestID, votes, accepted) } else if s.CantSendChits && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendChits") + require.FailNow(s.T, "Unexpectedly called SendChits") } } @@ -296,8 +280,7 @@ func (s *SenderTest) SendGossip(ctx context.Context, container []byte) { if s.SendGossipF != nil { s.SendGossipF(ctx, container) } else if s.CantSendGossip && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendGossip") + require.FailNow(s.T, "Unexpectedly called SendGossip") } } @@ -305,8 +288,7 @@ func (s *SenderTest) SendCrossChainAppRequest(ctx context.Context, chainID ids.I if s.SendCrossChainAppRequestF != nil { s.SendCrossChainAppRequestF(ctx, chainID, requestID, appRequestBytes) } else if s.CantSendCrossChainAppRequest && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendCrossChainAppRequest") + require.FailNow(s.T, "Unexpectedly called SendCrossChainAppRequest") } return nil } @@ -315,8 +297,7 @@ func (s *SenderTest) SendCrossChainAppResponse(ctx context.Context, chainID ids. if s.SendCrossChainAppResponseF != nil { s.SendCrossChainAppResponseF(ctx, chainID, requestID, appResponseBytes) } else if s.CantSendCrossChainAppResponse && s.T != nil { - require := require.New(s.T) - require.FailNow("Unexpectedly called SendCrossChainAppResponse") + require.FailNow(s.T, "Unexpectedly called SendCrossChainAppResponse") } return nil } @@ -329,8 +310,7 @@ func (s *SenderTest) SendAppRequest(ctx context.Context, nodeIDs set.Set[ids.Nod case s.SendAppRequestF != nil: return s.SendAppRequestF(ctx, nodeIDs, requestID, appRequestBytes) case s.CantSendAppRequest && s.T != nil: - require := require.New(s.T) - require.FailNow(errSendAppRequest.Error()) + require.FailNow(s.T, errSendAppRequest.Error()) } return errSendAppRequest } @@ -343,8 +323,7 @@ func (s *SenderTest) SendAppResponse(ctx context.Context, nodeID ids.NodeID, req case s.SendAppResponseF != nil: return s.SendAppResponseF(ctx, nodeID, requestID, appResponseBytes) case s.CantSendAppResponse && s.T != nil: - require := require.New(s.T) - require.FailNow(errSendAppResponse.Error()) + require.FailNow(s.T, errSendAppResponse.Error()) } return errSendAppResponse } @@ -357,8 +336,7 @@ func (s *SenderTest) SendAppGossip(ctx context.Context, appGossipBytes []byte) e case s.SendAppGossipF != nil: return s.SendAppGossipF(ctx, appGossipBytes) case s.CantSendAppGossip && s.T != nil: - require := require.New(s.T) - require.FailNow(errSendAppGossip.Error()) + require.FailNow(s.T, errSendAppGossip.Error()) } return errSendAppGossip } @@ -371,8 +349,7 @@ func (s *SenderTest) SendAppGossipSpecific(ctx context.Context, nodeIDs set.Set[ case s.SendAppGossipSpecificF != nil: return s.SendAppGossipSpecificF(ctx, nodeIDs, appGossipBytes) case s.CantSendAppGossipSpecific && s.T != nil: - require := require.New(s.T) - require.FailNow(errSendAppGossipSpecific.Error()) + require.FailNow(s.T, errSendAppGossipSpecific.Error()) } return errSendAppGossipSpecific } diff --git a/snow/engine/common/test_timer.go b/snow/engine/common/test_timer.go index bca461caf02..e5e2b232d39 100644 --- a/snow/engine/common/test_timer.go +++ b/snow/engine/common/test_timer.go @@ -30,7 +30,6 @@ func (t *TimerTest) RegisterTimeout(delay time.Duration) { if t.RegisterTimeoutF != nil { t.RegisterTimeoutF(delay) } else if t.CantRegisterTimout && t.T != nil { - require := require.New(t.T) - require.FailNow("Unexpectedly called RegisterTimeout") + require.FailNow(t.T, "Unexpectedly called RegisterTimeout") } } diff --git a/snow/engine/common/test_vm.go b/snow/engine/common/test_vm.go index aeeb3c0b845..fa547b4317f 100644 --- a/snow/engine/common/test_vm.go +++ b/snow/engine/common/test_vm.go @@ -110,8 +110,7 @@ func (vm *TestVM) Initialize( ) } if vm.CantInitialize && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errInitialize.Error()) + require.FailNow(vm.T, errInitialize.Error()) } return errInitialize } @@ -122,8 +121,7 @@ func (vm *TestVM) SetState(ctx context.Context, state snow.State) error { } if vm.CantSetState { if vm.T != nil { - require := require.New(vm.T) - require.FailNow(errSetState.Error()) + require.FailNow(vm.T, errSetState.Error()) } return errSetState } @@ -136,8 +134,7 @@ func (vm *TestVM) Shutdown(ctx context.Context) error { } if vm.CantShutdown { if vm.T != nil { - require := require.New(vm.T) - require.FailNow(errShutdown.Error()) + require.FailNow(vm.T, errShutdown.Error()) } return errShutdown } @@ -149,8 +146,7 @@ func (vm *TestVM) CreateHandlers(ctx context.Context) (map[string]*HTTPHandler, return vm.CreateHandlersF(ctx) } if vm.CantCreateHandlers && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errCreateHandlers.Error()) + require.FailNow(vm.T, errCreateHandlers.Error()) } return nil, nil } @@ -160,8 +156,7 @@ func (vm *TestVM) CreateStaticHandlers(ctx context.Context) (map[string]*HTTPHan return vm.CreateStaticHandlersF(ctx) } if vm.CantCreateStaticHandlers && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errCreateStaticHandlers.Error()) + require.FailNow(vm.T, errCreateStaticHandlers.Error()) } return nil, nil } @@ -171,8 +166,7 @@ func (vm *TestVM) HealthCheck(ctx context.Context) (interface{}, error) { return vm.HealthCheckF(ctx) } if vm.CantHealthCheck && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errHealthCheck.Error()) + require.FailNow(vm.T, errHealthCheck.Error()) } return nil, errHealthCheck } @@ -185,8 +179,7 @@ func (vm *TestVM) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID u return nil } if vm.T != nil { - require := require.New(vm.T) - require.FailNow(errAppRequest.Error()) + require.FailNow(vm.T, errAppRequest.Error()) } return errAppRequest } @@ -199,8 +192,7 @@ func (vm *TestVM) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, reque return nil } if vm.T != nil { - require := require.New(vm.T) - require.FailNow(errAppRequestFailed.Error()) + require.FailNow(vm.T, errAppRequestFailed.Error()) } return errAppRequestFailed } @@ -213,8 +205,7 @@ func (vm *TestVM) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID return nil } if vm.T != nil { - require := require.New(vm.T) - require.FailNow(errAppResponse.Error()) + require.FailNow(vm.T, errAppResponse.Error()) } return errAppResponse } @@ -227,8 +218,7 @@ func (vm *TestVM) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) return nil } if vm.T != nil { - require := require.New(vm.T) - require.FailNow(errAppGossip.Error()) + require.FailNow(vm.T, errAppGossip.Error()) } return errAppGossip } @@ -241,8 +231,7 @@ func (vm *TestVM) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requ return nil } if vm.T != nil { - require := require.New(vm.T) - require.FailNow(errCrossChainAppRequest.Error()) + require.FailNow(vm.T, errCrossChainAppRequest.Error()) } return errCrossChainAppRequest } @@ -255,8 +244,7 @@ func (vm *TestVM) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID return nil } if vm.T != nil { - require := require.New(vm.T) - require.FailNow(errCrossChainAppRequestFailed.Error()) + require.FailNow(vm.T, errCrossChainAppRequestFailed.Error()) } return errCrossChainAppRequestFailed } @@ -269,8 +257,7 @@ func (vm *TestVM) CrossChainAppResponse(ctx context.Context, chainID ids.ID, req return nil } if vm.T != nil { - require := require.New(vm.T) - require.FailNow(errCrossChainAppResponse.Error()) + require.FailNow(vm.T, errCrossChainAppResponse.Error()) } return errCrossChainAppResponse } @@ -280,8 +267,7 @@ func (vm *TestVM) Connected(ctx context.Context, id ids.NodeID, nodeVersion *ver return vm.ConnectedF(ctx, id, nodeVersion) } if vm.CantConnected && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errConnected.Error()) + require.FailNow(vm.T, errConnected.Error()) } return nil } @@ -291,8 +277,7 @@ func (vm *TestVM) Disconnected(ctx context.Context, id ids.NodeID) error { return vm.DisconnectedF(ctx, id) } if vm.CantDisconnected && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errDisconnected.Error()) + require.FailNow(vm.T, errDisconnected.Error()) } return nil } @@ -302,8 +287,7 @@ func (vm *TestVM) Version(ctx context.Context) (string, error) { return vm.VersionF(ctx) } if vm.CantVersion && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errVersion.Error()) + require.FailNow(vm.T, errVersion.Error()) } return "", nil } diff --git a/snow/engine/snowman/block/test_batched_vm.go b/snow/engine/snowman/block/test_batched_vm.go index 46627c68332..ef799115607 100644 --- a/snow/engine/snowman/block/test_batched_vm.go +++ b/snow/engine/snowman/block/test_batched_vm.go @@ -65,8 +65,7 @@ func (vm *TestBatchedVM) GetAncestors( ) } if vm.CantGetAncestors && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errGetAncestor.Error()) + require.FailNow(vm.T, errGetAncestor.Error()) } return nil, errGetAncestor } @@ -79,8 +78,7 @@ func (vm *TestBatchedVM) BatchedParseBlock( return vm.BatchedParseBlockF(ctx, blks) } if vm.CantBatchParseBlock && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errBatchedParseBlock.Error()) + require.FailNow(vm.T, errBatchedParseBlock.Error()) } return nil, errBatchedParseBlock } diff --git a/snow/engine/snowman/block/test_height_indexed_vm.go b/snow/engine/snowman/block/test_height_indexed_vm.go index 3d72cf4cc2c..ae4e437bc53 100644 --- a/snow/engine/snowman/block/test_height_indexed_vm.go +++ b/snow/engine/snowman/block/test_height_indexed_vm.go @@ -36,8 +36,7 @@ func (vm *TestHeightIndexedVM) VerifyHeightIndex(ctx context.Context) error { return vm.VerifyHeightIndexF(ctx) } if vm.CantVerifyHeightIndex && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errVerifyHeightIndex.Error()) + require.FailNow(vm.T, errVerifyHeightIndex.Error()) } return errVerifyHeightIndex } @@ -47,8 +46,7 @@ func (vm *TestHeightIndexedVM) GetBlockIDAtHeight(ctx context.Context, height ui return vm.GetBlockIDAtHeightF(ctx, height) } if vm.CantGetBlockIDAtHeight && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errGetAncestor.Error()) + require.FailNow(vm.T, errGetAncestor.Error()) } return ids.Empty, errGetBlockIDAtHeight } diff --git a/snow/engine/snowman/block/test_state_summary.go b/snow/engine/snowman/block/test_state_summary.go index fb37c7b94be..089e6dcfd36 100644 --- a/snow/engine/snowman/block/test_state_summary.go +++ b/snow/engine/snowman/block/test_state_summary.go @@ -46,8 +46,7 @@ func (s *TestStateSummary) Accept(ctx context.Context) (StateSyncMode, error) { return s.AcceptF(ctx) } if s.CantAccept && s.T != nil { - require := require.New(s.T) - require.FailNow(errAccept.Error()) + require.FailNow(s.T, errAccept.Error()) } return StateSyncSkipped, errAccept } diff --git a/snow/engine/snowman/block/test_state_syncable_vm.go b/snow/engine/snowman/block/test_state_syncable_vm.go index ce02dc08a59..b05dd811868 100644 --- a/snow/engine/snowman/block/test_state_syncable_vm.go +++ b/snow/engine/snowman/block/test_state_syncable_vm.go @@ -42,8 +42,7 @@ func (vm *TestStateSyncableVM) StateSyncEnabled(ctx context.Context) (bool, erro return vm.StateSyncEnabledF(ctx) } if vm.CantStateSyncEnabled && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errStateSyncEnabled.Error()) + require.FailNow(vm.T, errStateSyncEnabled.Error()) } return false, errStateSyncEnabled } @@ -53,8 +52,7 @@ func (vm *TestStateSyncableVM) GetOngoingSyncStateSummary(ctx context.Context) ( return vm.GetOngoingSyncStateSummaryF(ctx) } if vm.CantStateSyncGetOngoingSummary && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errStateSyncGetOngoingSummary.Error()) + require.FailNow(vm.T, errStateSyncGetOngoingSummary.Error()) } return nil, errStateSyncGetOngoingSummary } @@ -64,8 +62,7 @@ func (vm *TestStateSyncableVM) GetLastStateSummary(ctx context.Context) (StateSu return vm.GetLastStateSummaryF(ctx) } if vm.CantGetLastStateSummary && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errGetLastStateSummary.Error()) + require.FailNow(vm.T, errGetLastStateSummary.Error()) } return nil, errGetLastStateSummary } @@ -75,8 +72,7 @@ func (vm *TestStateSyncableVM) ParseStateSummary(ctx context.Context, summaryByt return vm.ParseStateSummaryF(ctx, summaryBytes) } if vm.CantParseStateSummary && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errParseStateSummary.Error()) + require.FailNow(vm.T, errParseStateSummary.Error()) } return nil, errParseStateSummary } @@ -86,8 +82,7 @@ func (vm *TestStateSyncableVM) GetStateSummary(ctx context.Context, summaryHeigh return vm.GetStateSummaryF(ctx, summaryHeight) } if vm.CantGetStateSummary && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errGetStateSummary.Error()) + require.FailNow(vm.T, errGetStateSummary.Error()) } return nil, errGetStateSummary } diff --git a/snow/engine/snowman/block/test_vm.go b/snow/engine/snowman/block/test_vm.go index b6ffe4fcaac..2988828b0eb 100644 --- a/snow/engine/snowman/block/test_vm.go +++ b/snow/engine/snowman/block/test_vm.go @@ -55,8 +55,7 @@ func (vm *TestVM) BuildBlock(ctx context.Context) (snowman.Block, error) { return vm.BuildBlockF(ctx) } if vm.CantBuildBlock && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errBuildBlock.Error()) + require.FailNow(vm.T, errBuildBlock.Error()) } return nil, errBuildBlock } @@ -66,8 +65,7 @@ func (vm *TestVM) ParseBlock(ctx context.Context, b []byte) (snowman.Block, erro return vm.ParseBlockF(ctx, b) } if vm.CantParseBlock && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errParseBlock.Error()) + require.FailNow(vm.T, errParseBlock.Error()) } return nil, errParseBlock } @@ -77,8 +75,7 @@ func (vm *TestVM) GetBlock(ctx context.Context, id ids.ID) (snowman.Block, error return vm.GetBlockF(ctx, id) } if vm.CantGetBlock && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errGetBlock.Error()) + require.FailNow(vm.T, errGetBlock.Error()) } return nil, errGetBlock } @@ -88,8 +85,7 @@ func (vm *TestVM) SetPreference(ctx context.Context, id ids.ID) error { return vm.SetPreferenceF(ctx, id) } if vm.CantSetPreference && vm.T != nil { - require := require.New(vm.T) - require.FailNow("Unexpectedly called SetPreference") + require.FailNow(vm.T, "Unexpectedly called SetPreference") } return nil } @@ -99,8 +95,7 @@ func (vm *TestVM) LastAccepted(ctx context.Context) (ids.ID, error) { return vm.LastAcceptedF(ctx) } if vm.CantLastAccepted && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errLastAccepted.Error()) + require.FailNow(vm.T, errLastAccepted.Error()) } return ids.ID{}, errLastAccepted } diff --git a/snow/engine/snowman/test_engine.go b/snow/engine/snowman/test_engine.go index ca6b9da462a..ed6e1b1743c 100644 --- a/snow/engine/snowman/test_engine.go +++ b/snow/engine/snowman/test_engine.go @@ -38,8 +38,7 @@ func (e *EngineTest) GetBlock(ctx context.Context, blkID ids.ID) (snowman.Block, return e.GetBlockF(ctx, blkID) } if e.CantGetBlock && e.T != nil { - require := require.New(e.T) - require.FailNow(errGetBlock.Error()) + require.FailNow(e.T, errGetBlock.Error()) } return nil, errGetBlock } diff --git a/snow/networking/benchlist/test_benchable.go b/snow/networking/benchlist/test_benchable.go index 1e059fa4ebb..5e179763d2d 100644 --- a/snow/networking/benchlist/test_benchable.go +++ b/snow/networking/benchlist/test_benchable.go @@ -28,8 +28,7 @@ func (b *TestBenchable) Benched(chainID ids.ID, validatorID ids.NodeID) { if b.BenchedF != nil { b.BenchedF(chainID, validatorID) } else if b.CantBenched && b.T != nil { - require := require.New(b.T) - require.FailNow("Unexpectedly called Benched") + require.FailNow(b.T, "Unexpectedly called Benched") } } @@ -37,7 +36,6 @@ func (b *TestBenchable) Unbenched(chainID ids.ID, validatorID ids.NodeID) { if b.UnbenchedF != nil { b.UnbenchedF(chainID, validatorID) } else if b.CantUnbenched && b.T != nil { - require := require.New(b.T) - require.FailNow("Unexpectedly called Unbenched") + require.FailNow(b.T, "Unexpectedly called Unbenched") } } diff --git a/snow/validators/test_state.go b/snow/validators/test_state.go index 6a2d720363d..b27e6d97261 100644 --- a/snow/validators/test_state.go +++ b/snow/validators/test_state.go @@ -41,8 +41,7 @@ func (vm *TestState) GetMinimumHeight(ctx context.Context) (uint64, error) { return vm.GetMinimumHeightF(ctx) } if vm.CantGetMinimumHeight && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errMinimumHeight.Error()) + require.FailNow(vm.T, errMinimumHeight.Error()) } return 0, errMinimumHeight } @@ -52,8 +51,7 @@ func (vm *TestState) GetCurrentHeight(ctx context.Context) (uint64, error) { return vm.GetCurrentHeightF(ctx) } if vm.CantGetCurrentHeight && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errCurrentHeight.Error()) + require.FailNow(vm.T, errCurrentHeight.Error()) } return 0, errCurrentHeight } @@ -63,8 +61,7 @@ func (vm *TestState) GetSubnetID(ctx context.Context, chainID ids.ID) (ids.ID, e return vm.GetSubnetIDF(ctx, chainID) } if vm.CantGetSubnetID && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errSubnetID.Error()) + require.FailNow(vm.T, errSubnetID.Error()) } return ids.Empty, errSubnetID } @@ -78,8 +75,7 @@ func (vm *TestState) GetValidatorSet( return vm.GetValidatorSetF(ctx, height, subnetID) } if vm.CantGetValidatorSet && vm.T != nil { - require := require.New(vm.T) - require.FailNow(errGetValidatorSet.Error()) + require.FailNow(vm.T, errGetValidatorSet.Error()) } return nil, errGetValidatorSet } From 62d168ea8f92c4aca37f945976df0214c2cc8cee Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 16 May 2023 16:11:19 -0400 Subject: [PATCH 55/79] nit --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 8051ff3e57d..81bb35603cc 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -85,7 +85,7 @@ function test_require_len_zero { } function test_require_equal_len { - if grep -R -o -P 'require\.Equal\(.*, len\(' .; then + if grep -R -o -P 'require\.Equal\((len\()|(.+? len\()' .; then echo "" echo "Use require.Len instead of require.Equal when testing for length." echo "" From e7cf3711677b56ba02f16538c21a02c0664d2477 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 16 May 2023 16:24:49 -0400 Subject: [PATCH 56/79] regex is easy they said --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 81bb35603cc..494f3786fe4 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -85,7 +85,7 @@ function test_require_len_zero { } function test_require_equal_len { - if grep -R -o -P 'require\.Equal\((len\()|(.+? len\()' .; then + if grep -R -o -P 'require\.Equal\(((len\()|(.+? len\())' .; then echo "" echo "Use require.Len instead of require.Equal when testing for length." echo "" From b9d80b31b472a49614cec2816270aeb378ca94e5 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 16 May 2023 16:35:07 -0400 Subject: [PATCH 57/79] revert regex changes --- scripts/lint.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index 494f3786fe4..faab2ef7485 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -85,7 +85,16 @@ function test_require_len_zero { } function test_require_equal_len { - if grep -R -o -P 'require\.Equal\(((len\()|(.+? len\())' .; then + # This should only flag if len(foo) is the *actual* val, not the expected val. + # + # These should *not* match: + # - require.Equal(len(foo), 2) + # - require.Equal(t, len(foo), 2) + # + # These should match: + # - require.Equal(2, len(foo)) + # - require.Equal(t, 2, len(foo)) + if grep -R -o -P 'require\.Equal\((t, )?.*, len\([^,]*$' .; then echo "" echo "Use require.Len instead of require.Equal when testing for length." echo "" From d1d0fdce72363e84ba79b38c7bb0c253e407c768 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 16 May 2023 16:35:53 -0400 Subject: [PATCH 58/79] revert window change --- utils/window/window_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/utils/window/window_test.go b/utils/window/window_test.go index ad35ea1eafb..9e36658850b 100644 --- a/utils/window/window_test.go +++ b/utils/window/window_test.go @@ -55,8 +55,7 @@ func TestAdd(t *testing.T) { window.Add(test.newlyAdded) - expectedLen := len(test.window) + 1 - require.Equal(t, expectedLen, window.Length()) + require.Equal(t, len(test.window)+1, window.Length()) oldest, ok := window.Oldest() require.Equal(t, test.expectedOldest, oldest) require.True(t, ok) From 38e57422fd08c124d2227b850a54627f0407fbe3 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 16 May 2023 16:42:23 -0400 Subject: [PATCH 59/79] nit --- scripts/lint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/lint.sh b/scripts/lint.sh index faab2ef7485..39aafb48798 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -94,7 +94,7 @@ function test_require_equal_len { # These should match: # - require.Equal(2, len(foo)) # - require.Equal(t, 2, len(foo)) - if grep -R -o -P 'require\.Equal\((t, )?.*, len\([^,]*$' .; then + if grep -R -o -P --exclude-dir='scripts' 'require\.Equal\((t, )?.*, len\([^,]*$' .; then echo "" echo "Use require.Len instead of require.Equal when testing for length." echo "" From b93c523177b0dc83886cd9c9a831c61f5c053558 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 16 May 2023 18:12:58 -0400 Subject: [PATCH 60/79] reduce diff --- x/sync/sync_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index bd258fa30ce..acec30d05cb 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -979,7 +979,7 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { Log: logging.NoLog{}, }) require.NoError(err) - require.NotNil(syncer) + require.NotNil(t, syncer) require.NoError(syncer.StartSyncing(context.Background())) From 1c8d49cb46268fcf219eb060522152230933add3 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 17 May 2023 08:09:39 -0400 Subject: [PATCH 61/79] cleanup --- x/sync/sync_test.go | 305 ++++++++++++++++++++++++-------------------- 1 file changed, 164 insertions(+), 141 deletions(-) diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index acec30d05cb..3adaa769033 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -58,6 +58,8 @@ func (client *mockClient) GetRangeProof(ctx context.Context, request *syncpb.Ran } func Test_Creation(t *testing.T) { + require := require.New(t) + db, err := merkledb.New( context.Background(), memdb.New(), @@ -67,7 +69,7 @@ func Test_Creation(t *testing.T) { NodeCacheSize: 1000, }, ) - require.NoError(t, err) + require.NoError(err) syncer, err := NewStateSyncManager(StateSyncConfig{ SyncDB: db, @@ -76,11 +78,13 @@ func Test_Creation(t *testing.T) { SimultaneousWorkLimit: 5, Log: logging.NoLog{}, }) - require.NotNil(t, syncer) - require.NoError(t, err) + require.NoError(err) + require.NotNil(syncer) } func Test_Completion(t *testing.T) { + require := require.New(t) + for i := 0; i < 10; i++ { ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -93,9 +97,9 @@ func Test_Completion(t *testing.T) { NodeCacheSize: 1000, }, ) - require.NoError(t, err) + require.NoError(err) emptyRoot, err := emptyDB.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) db, err := merkledb.New( context.Background(), memdb.New(), @@ -105,7 +109,7 @@ func Test_Completion(t *testing.T) { NodeCacheSize: 1000, }, ) - require.NoError(t, err) + require.NoError(err) syncer, err := NewStateSyncManager(StateSyncConfig{ SyncDB: db, Client: &mockClient{db: emptyDB}, @@ -113,68 +117,70 @@ func Test_Completion(t *testing.T) { SimultaneousWorkLimit: 5, Log: logging.NoLog{}, }) - require.NoError(t, err) - require.NotNil(t, syncer) - require.NoError(t, syncer.StartSyncing(context.Background())) - require.NoError(t, syncer.Wait(context.Background())) + require.NoError(err) + require.NotNil(syncer) + require.NoError(syncer.StartSyncing(context.Background())) + require.NoError(syncer.Wait(context.Background())) syncer.workLock.Lock() - require.Zero(t, syncer.unprocessedWork.Len()) - require.Equal(t, 1, syncer.processedWork.Len()) + require.Zero(syncer.unprocessedWork.Len()) + require.Equal(1, syncer.processedWork.Len()) syncer.workLock.Unlock() } } func Test_Midpoint(t *testing.T) { + require := require.New(t) + mid := midPoint([]byte{1, 255}, []byte{2, 1}) - require.Equal(t, []byte{2, 0}, mid) + require.Equal([]byte{2, 0}, mid) mid = midPoint(nil, []byte{255, 255, 0}) - require.Equal(t, []byte{127, 255, 128}, mid) + require.Equal([]byte{127, 255, 128}, mid) mid = midPoint([]byte{255, 255, 255}, []byte{255, 255}) - require.Equal(t, []byte{255, 255, 127, 128}, mid) + require.Equal([]byte{255, 255, 127, 128}, mid) mid = midPoint(nil, []byte{255}) - require.Equal(t, []byte{127, 127}, mid) + require.Equal([]byte{127, 127}, mid) mid = midPoint([]byte{1, 255}, []byte{255, 1}) - require.Equal(t, []byte{128, 128}, mid) + require.Equal([]byte{128, 128}, mid) mid = midPoint([]byte{140, 255}, []byte{141, 0}) - require.Equal(t, []byte{140, 255, 127}, mid) + require.Equal([]byte{140, 255, 127}, mid) mid = midPoint([]byte{126, 255}, []byte{127}) - require.Equal(t, []byte{126, 255, 127}, mid) + require.Equal([]byte{126, 255, 127}, mid) mid = midPoint(nil, nil) - require.Equal(t, []byte{127}, mid) + require.Equal([]byte{127}, mid) low := midPoint(nil, mid) - require.Equal(t, []byte{63, 127}, low) + require.Equal([]byte{63, 127}, low) high := midPoint(mid, nil) - require.Equal(t, []byte{191}, high) + require.Equal([]byte{191}, high) mid = midPoint([]byte{255, 255}, nil) - require.Equal(t, []byte{255, 255, 127, 127}, mid) + require.Equal([]byte{255, 255, 127, 127}, mid) mid = midPoint([]byte{255}, nil) - require.Equal(t, []byte{255, 127, 127}, mid) + require.Equal([]byte{255, 127, 127}, mid) for i := 0; i < 5000; i++ { r := rand.New(rand.NewSource(int64(i))) // #nosec G404 start := make([]byte, r.Intn(99)+1) _, err := r.Read(start) - require.NoError(t, err) + require.NoError(err) end := make([]byte, r.Intn(99)+1) _, err = r.Read(end) - require.NoError(t, err) + require.NoError(err) for bytes.Equal(start, end) { _, err = r.Read(end) - require.NoError(t, err) + require.NoError(err) } if bytes.Compare(start, end) == 1 { @@ -182,18 +188,20 @@ func Test_Midpoint(t *testing.T) { } mid = midPoint(start, end) - require.Equal(t, -1, bytes.Compare(start, mid)) - require.Equal(t, -1, bytes.Compare(mid, end)) + require.Equal(-1, bytes.Compare(start, mid)) + require.Equal(-1, bytes.Compare(mid, end)) } } func Test_Sync_FindNextKey_InSync(t *testing.T) { + require := require.New(t) + for i := 0; i < 3; i++ { r := rand.New(rand.NewSource(int64(i))) // #nosec G404 dbToSync, err := generateTrie(t, r, 1000) - require.NoError(t, err) + require.NoError(err) syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) db, err := merkledb.New( context.Background(), @@ -204,7 +212,7 @@ func Test_Sync_FindNextKey_InSync(t *testing.T) { NodeCacheSize: 1000, }, ) - require.NoError(t, err) + require.NoError(err) syncer, err := NewStateSyncManager(StateSyncConfig{ SyncDB: db, Client: &mockClient{db: dbToSync}, @@ -212,24 +220,24 @@ func Test_Sync_FindNextKey_InSync(t *testing.T) { SimultaneousWorkLimit: 5, Log: logging.NoLog{}, }) - require.NoError(t, err) - require.NotNil(t, syncer) + require.NoError(err) + require.NotNil(syncer) - require.NoError(t, syncer.StartSyncing(context.Background())) - require.NoError(t, syncer.Wait(context.Background())) + require.NoError(syncer.StartSyncing(context.Background())) + require.NoError(syncer.Wait(context.Background())) proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 500) - require.NoError(t, err) + require.NoError(err) // the two dbs should be in sync, so next key should be nil lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key nextKey, err := syncer.findNextKey(context.Background(), lastKey, nil, proof.EndProof) - require.NoError(t, err) - require.Nil(t, nextKey) + require.NoError(err) + require.Nil(nextKey) // add an extra value to sync db past the last key returned newKey := midPoint(lastKey, nil) - require.NoError(t, db.Put(newKey, []byte{1})) + require.NoError(db.Put(newKey, []byte{1})) // create a range endpoint that is before the newly added key, but after the last key endPointBeforeNewKey := make([]byte, 0, 2) @@ -256,14 +264,16 @@ func Test_Sync_FindNextKey_InSync(t *testing.T) { } nextKey, err = syncer.findNextKey(context.Background(), lastKey, endPointBeforeNewKey, proof.EndProof) - require.NoError(t, err) + require.NoError(err) // next key would be after the end of the range, so it returns nil instead - require.Nil(t, nextKey) + require.Nil(nextKey) } } func Test_Sync_FindNextKey_Deleted(t *testing.T) { + require := require.New(t) + db, err := merkledb.New( context.Background(), memdb.New(), @@ -273,12 +283,12 @@ func Test_Sync_FindNextKey_Deleted(t *testing.T) { NodeCacheSize: 1000, }, ) - require.NoError(t, err) - require.NoError(t, db.Put([]byte{0x10}, []byte{1})) - require.NoError(t, db.Put([]byte{0x11, 0x11}, []byte{2})) + require.NoError(err) + require.NoError(db.Put([]byte{0x10}, []byte{1})) + require.NoError(db.Put([]byte{0x11, 0x11}, []byte{2})) syncRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) syncer, err := NewStateSyncManager(StateSyncConfig{ SyncDB: db, @@ -287,29 +297,31 @@ func Test_Sync_FindNextKey_Deleted(t *testing.T) { SimultaneousWorkLimit: 5, Log: logging.NoLog{}, }) - require.NoError(t, err) + require.NoError(err) // 0x12 was "deleted" and there should be no extra node in the proof since there was nothing with a common prefix noExtraNodeProof, err := db.GetProof(context.Background(), []byte{0x12}) - require.NoError(t, err) + require.NoError(err) // 0x11 was "deleted" and 0x11.0x11 should be in the exclusion proof extraNodeProof, err := db.GetProof(context.Background(), []byte{0x11}) - require.NoError(t, err) + require.NoError(err) // there is now another value in the range that needs to be sync'ed - require.NoError(t, db.Put([]byte{0x13}, []byte{3})) + require.NoError(db.Put([]byte{0x13}, []byte{3})) nextKey, err := syncer.findNextKey(context.Background(), []byte{0x12}, []byte{0x20}, noExtraNodeProof.Path) - require.NoError(t, err) - require.Equal(t, []byte{0x13}, nextKey) + require.NoError(err) + require.Equal([]byte{0x13}, nextKey) nextKey, err = syncer.findNextKey(context.Background(), []byte{0x11}, []byte{0x20}, extraNodeProof.Path) - require.NoError(t, err) - require.Equal(t, []byte{0x13}, nextKey) + require.NoError(err) + require.Equal([]byte{0x13}, nextKey) } func Test_Sync_FindNextKey_BranchInLocal(t *testing.T) { + require := require.New(t) + db, err := merkledb.New( context.Background(), memdb.New(), @@ -319,14 +331,14 @@ func Test_Sync_FindNextKey_BranchInLocal(t *testing.T) { NodeCacheSize: 1000, }, ) - require.NoError(t, err) - require.NoError(t, db.Put([]byte{0x11}, []byte{1})) - require.NoError(t, db.Put([]byte{0x11, 0x11}, []byte{2})) + require.NoError(err) + require.NoError(db.Put([]byte{0x11}, []byte{1})) + require.NoError(db.Put([]byte{0x11, 0x11}, []byte{2})) syncRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) proof, err := db.GetProof(context.Background(), []byte{0x11, 0x11}) - require.NoError(t, err) + require.NoError(err) syncer, err := NewStateSyncManager(StateSyncConfig{ SyncDB: db, @@ -335,15 +347,17 @@ func Test_Sync_FindNextKey_BranchInLocal(t *testing.T) { SimultaneousWorkLimit: 5, Log: logging.NoLog{}, }) - require.NoError(t, err) - require.NoError(t, db.Put([]byte{0x12}, []byte{4})) + require.NoError(err) + require.NoError(db.Put([]byte{0x12}, []byte{4})) nextKey, err := syncer.findNextKey(context.Background(), []byte{0x11, 0x11}, []byte{0x20}, proof.Path) - require.NoError(t, err) - require.Equal(t, []byte{0x12}, nextKey) + require.NoError(err) + require.Equal([]byte{0x12}, nextKey) } func Test_Sync_FindNextKey_BranchInReceived(t *testing.T) { + require := require.New(t) + db, err := merkledb.New( context.Background(), memdb.New(), @@ -353,15 +367,15 @@ func Test_Sync_FindNextKey_BranchInReceived(t *testing.T) { NodeCacheSize: 1000, }, ) - require.NoError(t, err) - require.NoError(t, db.Put([]byte{0x11}, []byte{1})) - require.NoError(t, db.Put([]byte{0x12}, []byte{2})) - require.NoError(t, db.Put([]byte{0x11, 0x11}, []byte{3})) + require.NoError(err) + require.NoError(db.Put([]byte{0x11}, []byte{1})) + require.NoError(db.Put([]byte{0x12}, []byte{2})) + require.NoError(db.Put([]byte{0x11, 0x11}, []byte{3})) syncRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) proof, err := db.GetProof(context.Background(), []byte{0x11, 0x11}) - require.NoError(t, err) + require.NoError(err) syncer, err := NewStateSyncManager(StateSyncConfig{ SyncDB: db, @@ -370,21 +384,23 @@ func Test_Sync_FindNextKey_BranchInReceived(t *testing.T) { SimultaneousWorkLimit: 5, Log: logging.NoLog{}, }) - require.NoError(t, err) - require.NoError(t, db.Delete([]byte{0x12})) + require.NoError(err) + require.NoError(db.Delete([]byte{0x12})) nextKey, err := syncer.findNextKey(context.Background(), []byte{0x11, 0x11}, []byte{0x20}, proof.Path) - require.NoError(t, err) - require.Equal(t, []byte{0x12}, nextKey) + require.NoError(err) + require.Equal([]byte{0x12}, nextKey) } func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { + require := require.New(t) + for i := 0; i < 10; i++ { r := rand.New(rand.NewSource(int64(i))) // #nosec G404 dbToSync, err := generateTrie(t, r, 1000) - require.NoError(t, err) + require.NoError(err) syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) db, err := merkledb.New( context.Background(), @@ -395,7 +411,7 @@ func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { NodeCacheSize: 1000, }, ) - require.NoError(t, err) + require.NoError(err) syncer, err := NewStateSyncManager(StateSyncConfig{ SyncDB: db, Client: &mockClient{db: dbToSync}, @@ -403,42 +419,42 @@ func Test_Sync_FindNextKey_ExtraValues(t *testing.T) { SimultaneousWorkLimit: 5, Log: logging.NoLog{}, }) - require.NoError(t, err) - require.NotNil(t, syncer) + require.NoError(err) + require.NotNil(syncer) - require.NoError(t, syncer.StartSyncing(context.Background())) - require.NoError(t, syncer.Wait(context.Background())) + require.NoError(syncer.StartSyncing(context.Background())) + require.NoError(syncer.Wait(context.Background())) proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 500) - require.NoError(t, err) + require.NoError(err) // add an extra value to local db lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key midpoint := midPoint(lastKey, nil) - require.NoError(t, db.Put(midpoint, []byte{1})) + require.NoError(db.Put(midpoint, []byte{1})) // next key at prefix of newly added point nextKey, err := syncer.findNextKey(context.Background(), lastKey, nil, proof.EndProof) - require.NoError(t, err) - require.NotNil(t, nextKey) + require.NoError(err) + require.NotNil(nextKey) - require.True(t, isPrefix(midpoint, nextKey)) + require.True(isPrefix(midpoint, nextKey)) - require.NoError(t, db.Delete(midpoint)) + require.NoError(db.Delete(midpoint)) - require.NoError(t, dbToSync.Put(midpoint, []byte{1})) + require.NoError(dbToSync.Put(midpoint, []byte{1})) proof, err = dbToSync.GetRangeProof(context.Background(), nil, lastKey, 500) - require.NoError(t, err) + require.NoError(err) // next key at prefix of newly added point nextKey, err = syncer.findNextKey(context.Background(), lastKey, nil, proof.EndProof) - require.NoError(t, err) - require.NotNil(t, nextKey) + require.NoError(err) + require.NotNil(nextKey) // deal with odd length key - require.True(t, isPrefix(midpoint, nextKey)) + require.True(isPrefix(midpoint, nextKey)) } } @@ -456,12 +472,14 @@ func isPrefix(data []byte, prefix []byte) bool { } func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { + require := require.New(t) + for i := 0; i < 10; i++ { r := rand.New(rand.NewSource(int64(i))) // #nosec G404 dbToSync, err := generateTrie(t, r, 500) - require.NoError(t, err) + require.NoError(err) syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) db, err := merkledb.New( context.Background(), @@ -472,7 +490,7 @@ func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { NodeCacheSize: 1000, }, ) - require.NoError(t, err) + require.NoError(err) syncer, err := NewStateSyncManager(StateSyncConfig{ SyncDB: db, Client: &mockClient{db: dbToSync}, @@ -480,35 +498,35 @@ func Test_Sync_FindNextKey_DifferentChild(t *testing.T) { SimultaneousWorkLimit: 5, Log: logging.NoLog{}, }) - require.NoError(t, err) - require.NotNil(t, syncer) - require.NoError(t, syncer.StartSyncing(context.Background())) - require.NoError(t, syncer.Wait(context.Background())) + require.NoError(err) + require.NotNil(syncer) + require.NoError(syncer.StartSyncing(context.Background())) + require.NoError(syncer.Wait(context.Background())) proof, err := dbToSync.GetRangeProof(context.Background(), nil, nil, 100) - require.NoError(t, err) + require.NoError(err) lastKey := proof.KeyValues[len(proof.KeyValues)-1].Key // local db has a different child than remote db lastKey = append(lastKey, 16) - require.NoError(t, db.Put(lastKey, []byte{1})) + require.NoError(db.Put(lastKey, []byte{1})) - require.NoError(t, dbToSync.Put(lastKey, []byte{2})) + require.NoError(dbToSync.Put(lastKey, []byte{2})) proof, err = dbToSync.GetRangeProof(context.Background(), nil, proof.KeyValues[len(proof.KeyValues)-1].Key, 100) - require.NoError(t, err) + require.NoError(err) nextKey, err := syncer.findNextKey(context.Background(), proof.KeyValues[len(proof.KeyValues)-1].Key, nil, proof.EndProof) - require.NoError(t, err) - require.Equal(t, nextKey, lastKey) + require.NoError(err) + require.Equal(nextKey, lastKey) } } // Test findNextKey by computing the expected result in a naive, inefficient // way and comparing it to the actual result func TestFindNextKeyRandom(t *testing.T) { - rand := rand.New(rand.NewSource(1337)) //nolint:gosec require := require.New(t) + rand := rand.New(rand.NewSource(1337)) //nolint:gosec // Create a "remote" database and "local" database remoteDB, err := merkledb.New( @@ -719,12 +737,14 @@ func TestFindNextKeyRandom(t *testing.T) { } func Test_Sync_Result_Correct_Root(t *testing.T) { + require := require.New(t) + for i := 0; i < 3; i++ { r := rand.New(rand.NewSource(int64(i))) // #nosec G404 dbToSync, err := generateTrie(t, r, 1000) - require.NoError(t, err) + require.NoError(err) syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) db, err := merkledb.New( context.Background(), @@ -735,7 +755,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { NodeCacheSize: 1000, }, ) - require.NoError(t, err) + require.NoError(err) syncer, err := NewStateSyncManager(StateSyncConfig{ SyncDB: db, Client: &mockClient{db: dbToSync}, @@ -743,46 +763,48 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { SimultaneousWorkLimit: 5, Log: logging.NoLog{}, }) - require.NoError(t, err) - require.NotNil(t, syncer) - require.NoError(t, syncer.StartSyncing(context.Background())) + require.NoError(err) + require.NotNil(syncer) + require.NoError(syncer.StartSyncing(context.Background())) - require.NoError(t, syncer.Wait(context.Background())) - require.NoError(t, syncer.Error()) + require.NoError(syncer.Wait(context.Background())) + require.NoError(syncer.Error()) // new db has fully sync'ed and should be at the same root as the original db newRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, syncRoot, newRoot) + require.NoError(err) + require.Equal(syncRoot, newRoot) // make sure they stay in sync addkey := make([]byte, r.Intn(50)) _, err = r.Read(addkey) - require.NoError(t, err) + require.NoError(err) val := make([]byte, r.Intn(50)) _, err = r.Read(val) - require.NoError(t, err) + require.NoError(err) - require.NoError(t, db.Put(addkey, val)) + require.NoError(db.Put(addkey, val)) - require.NoError(t, dbToSync.Put(addkey, val)) + require.NoError(dbToSync.Put(addkey, val)) syncRoot, err = dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) newRoot, err = db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, syncRoot, newRoot) + require.NoError(err) + require.Equal(syncRoot, newRoot) } } func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { + require := require.New(t) + for i := 0; i < 3; i++ { r := rand.New(rand.NewSource(int64(i))) // #nosec G404 dbToSync, err := generateTrie(t, r, 3*maxKeyValuesLimit) - require.NoError(t, err) + require.NoError(err) syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) - require.NoError(t, err) + require.NoError(err) db, err := merkledb.New( context.Background(), @@ -793,7 +815,7 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { NodeCacheSize: 1000, }, ) - require.NoError(t, err) + require.NoError(err) syncer, err := NewStateSyncManager(StateSyncConfig{ SyncDB: db, @@ -802,14 +824,13 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { SimultaneousWorkLimit: 5, Log: logging.NoLog{}, }) - require.NoError(t, err) - require.NotNil(t, syncer) - require.NoError(t, syncer.StartSyncing(context.Background())) + require.NoError(err) + require.NotNil(syncer) + require.NoError(syncer.StartSyncing(context.Background())) // Wait until we've processed some work // before updating the sync target. require.Eventually( - t, func() bool { syncer.workLock.Lock() defer syncer.workLock.Unlock() @@ -828,16 +849,16 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { SimultaneousWorkLimit: 5, Log: logging.NoLog{}, }) - require.NoError(t, err) - require.NotNil(t, newSyncer) + require.NoError(err) + require.NotNil(newSyncer) - require.NoError(t, newSyncer.StartSyncing(context.Background())) - require.NoError(t, newSyncer.Error()) - require.NoError(t, newSyncer.Wait(context.Background())) + require.NoError(newSyncer.StartSyncing(context.Background())) + require.NoError(newSyncer.Error()) + require.NoError(newSyncer.Wait(context.Background())) newRoot, err := db.GetMerkleRoot(context.Background()) - require.NoError(t, err) - require.Equal(t, syncRoot, newRoot) + require.NoError(err) + require.Equal(syncRoot, newRoot) } } @@ -888,7 +909,7 @@ func Test_Sync_Error_During_Sync(t *testing.T) { Log: logging.NoLog{}, }) require.NoError(err) - require.NotNil(t, syncer) + require.NotNil(syncer) require.NoError(syncer.StartSyncing(context.Background())) @@ -979,7 +1000,7 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { Log: logging.NoLog{}, }) require.NoError(err) - require.NotNil(t, syncer) + require.NotNil(syncer) require.NoError(syncer.StartSyncing(context.Background())) @@ -1059,6 +1080,8 @@ func generateTrie(t *testing.T, r *rand.Rand, count int) (*merkledb.Database, er } func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen int) (*merkledb.Database, [][]byte, error) { + require := require.New(t) + db, err := merkledb.New( context.Background(), memdb.New(), @@ -1083,14 +1106,14 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen key := make([]byte, r.Intn(50)+len(prefix)) copy(key, prefix) _, err := r.Read(key[len(prefix):]) - require.NoError(t, err) + require.NoError(err) return key } // new key key := make([]byte, r.Intn(50)+minKeyLen) _, err = r.Read(key) - require.NoError(t, err) + require.NoError(err) return key } @@ -1100,7 +1123,7 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen value = nil } else { _, err = r.Read(value) - require.NoError(t, err) + require.NoError(err) } key := genKey() if _, seen := seenKeys[string(key)]; seen { From 5a8b27117a154ee8e01b99e5e1bb908b7ad1b435 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 17 May 2023 08:21:22 -0400 Subject: [PATCH 62/79] nits --- api/health/health_test.go | 3 +-- genesis/config_test.go | 3 +-- utils/sampler/weighted_heap_test.go | 3 +-- vms/components/avax/utxo_id_test.go | 3 +-- vms/secp256k1fx/keychain_test.go | 3 +-- 5 files changed, 5 insertions(+), 10 deletions(-) diff --git a/api/health/health_test.go b/api/health/health_test.go index 923f1f8558f..ca3b7e29fa4 100644 --- a/api/health/health_test.go +++ b/api/health/health_test.go @@ -42,8 +42,7 @@ func awaitHealthy(t *testing.T, r Reporter, healthy bool) { } func awaitLiveness(t *testing.T, r Reporter, liveness bool) { - require := require.New(t) - require.Eventually(func() bool { + require.Eventually(t, func() bool { _, ok := r.Liveness() return ok == liveness }, awaitTimeout, awaitFreq) diff --git a/genesis/config_test.go b/genesis/config_test.go index c7fea58cd4c..455045dad70 100644 --- a/genesis/config_test.go +++ b/genesis/config_test.go @@ -60,8 +60,7 @@ func TestAllocationLess(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - require.Equal(tt.expected, tt.alloc1.Less(tt.alloc2)) + require.Equal(t, tt.expected, tt.alloc1.Less(tt.alloc2)) }) } } diff --git a/utils/sampler/weighted_heap_test.go b/utils/sampler/weighted_heap_test.go index 4d409198ce5..3187c14fa10 100644 --- a/utils/sampler/weighted_heap_test.go +++ b/utils/sampler/weighted_heap_test.go @@ -73,8 +73,7 @@ func TestWeightedHeapElementLess(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - require.Equal(tt.expected, tt.elt1.Less(tt.elt2)) + require.Equal(t, tt.expected, tt.elt1.Less(tt.elt2)) }) } } diff --git a/vms/components/avax/utxo_id_test.go b/vms/components/avax/utxo_id_test.go index a35ac023693..f2818c62d1a 100644 --- a/vms/components/avax/utxo_id_test.go +++ b/vms/components/avax/utxo_id_test.go @@ -111,8 +111,7 @@ func TestUTXOIDLess(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - require.Equal(tt.expected, tt.id1.Less(&tt.id2)) + require.Equal(t, tt.expected, tt.id1.Less(&tt.id2)) }) } } diff --git a/vms/secp256k1fx/keychain_test.go b/vms/secp256k1fx/keychain_test.go index 44118b9ff0a..88878137846 100644 --- a/vms/secp256k1fx/keychain_test.go +++ b/vms/secp256k1fx/keychain_test.go @@ -27,8 +27,7 @@ var ( ) func TestNewKeychain(t *testing.T) { - require := require.New(t) - require.NotNil(NewKeychain()) + require.NotNil(t, NewKeychain()) } func TestKeychainGetUnknownAddr(t *testing.T) { From 41e92b710a5bc6cdf88f36669ab4987836e85228 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon, 12 Jun 2023 15:53:59 -0700 Subject: [PATCH 63/79] revert non-snow changes --- database/linkeddb/linkeddb_test.go | 8 +-- database/test_database.go | 56 +++++++------------ vms/components/avax/utxo_state_test.go | 6 +- .../blocks/executor/backend_test.go | 2 +- .../stakeable/stakeable_lock_test.go | 6 +- vms/platformvm/state/diff_test.go | 8 +-- vms/rpcchainvm/state_syncable_vm_test.go | 8 +-- 7 files changed, 39 insertions(+), 55 deletions(-) diff --git a/database/linkeddb/linkeddb_test.go b/database/linkeddb/linkeddb_test.go index 0c3d1861a26..c7b24693eaa 100644 --- a/database/linkeddb/linkeddb_test.go +++ b/database/linkeddb/linkeddb_test.go @@ -26,7 +26,7 @@ func TestLinkedDB(t *testing.T) { require.False(has, "db unexpectedly had key %s", key) _, err = ldb.Get(key) - require.ErrorIs(err, database.ErrNotFound, "Expected db.Get to return a Not Found error.") + require.Equal(database.ErrNotFound, err, "Expected db.Get to return a Not Found error.") require.NoError(ldb.Delete(key)) @@ -47,7 +47,7 @@ func TestLinkedDB(t *testing.T) { require.False(has, "db unexpectedly had key %s", key) _, err = ldb.Get(key) - require.ErrorIs(err, database.ErrNotFound, "Expected db.Get to return a Not Found error.") + require.Equal(database.ErrNotFound, err, "Expected db.Get to return a Not Found error.") iterator := db.NewIterator() next := iterator.Next() @@ -401,7 +401,7 @@ func TestLinkedDBHeadKey(t *testing.T) { ldb := NewDefault(db) _, err := ldb.HeadKey() - require.ErrorIs(err, database.ErrNotFound) + require.Equal(database.ErrNotFound, err) key0 := []byte("hello0") value0 := []byte("world0") @@ -434,7 +434,7 @@ func TestLinkedDBHead(t *testing.T) { ldb := NewDefault(db) _, _, err := ldb.Head() - require.ErrorIs(err, database.ErrNotFound) + require.Equal(database.ErrNotFound, err) key0 := []byte("hello0") value0 := []byte("world0") diff --git a/database/test_database.go b/database/test_database.go index e6ce5eb121f..1fcaeb49da6 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -75,7 +75,7 @@ func TestSimpleKeyValue(t *testing.T, db Database) { require.False(has) _, err = db.Get(key) - require.ErrorIs(err, ErrNotFound) + require.Equal(ErrNotFound, err) require.NoError(db.Delete(key)) require.NoError(db.Put(key, value)) @@ -95,7 +95,7 @@ func TestSimpleKeyValue(t *testing.T, db Database) { require.False(has) _, err = db.Get(key) - require.ErrorIs(err, ErrNotFound) + require.Equal(ErrNotFound, err) require.NoError(db.Delete(key)) } @@ -107,7 +107,7 @@ func TestKeyEmptyValue(t *testing.T, db Database) { val := []byte(nil) _, err := db.Get(key) - require.ErrorIs(err, ErrNotFound) + require.Equal(ErrNotFound, err) require.NoError(db.Put(key, val)) @@ -128,7 +128,7 @@ func TestEmptyKey(t *testing.T, db Database) { // Test that nil key can be retrieved by empty key _, err := db.Get(nilKey) - require.ErrorIs(err, ErrNotFound) + require.Equal(ErrNotFound, err) require.NoError(db.Put(nilKey, val1)) @@ -157,7 +157,7 @@ func TestSimpleKeyValueClosed(t *testing.T, db Database) { require.False(has) _, err = db.Get(key) - require.ErrorIs(err, ErrNotFound) + require.Equal(ErrNotFound, err) require.NoError(db.Delete(key)) require.NoError(db.Put(key, value)) @@ -173,19 +173,14 @@ func TestSimpleKeyValueClosed(t *testing.T, db Database) { require.NoError(db.Close()) _, err = db.Has(key) - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, err) _, err = db.Get(key) - require.ErrorIs(err, ErrClosed) - - err = db.Put(key, value) - require.ErrorIs(err, ErrClosed) - - err = db.Delete(key) - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, err) - err = db.Close() - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, db.Put(key, value)) + require.Equal(ErrClosed, db.Delete(key)) + require.Equal(ErrClosed, db.Close()) } // TestMemorySafetyDatabase ensures it is safe to modify a key after passing it @@ -237,8 +232,7 @@ func TestNewBatchClosed(t *testing.T, db Database) { require.NoError(batch.Put(key, value)) require.Positive(batch.Size()) - err := batch.Write() - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, batch.Write()) } // TestBatchPut tests to make sure that batched writes work as expected. @@ -270,8 +264,7 @@ func TestBatchPut(t *testing.T, db Database) { require.NoError(batch.Put(key, value)) require.NoError(db.Close()) - err = batch.Write() - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, batch.Write()) } // TestBatchDelete tests to make sure that batched deletes work as expected. @@ -294,7 +287,7 @@ func TestBatchDelete(t *testing.T, db Database) { require.False(has) _, err = db.Get(key) - require.ErrorIs(err, ErrNotFound) + require.Equal(ErrNotFound, err) require.NoError(db.Delete(key)) } @@ -496,8 +489,7 @@ func TestBatchReplayPropagateError(t *testing.T, db Database) { gomock.InOrder( mockBatch.EXPECT().Put(key1, value1).Return(ErrClosed).Times(1), ) - err := batch.Replay(mockBatch) - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, batch.Replay(mockBatch)) mockBatch = NewMockBatch(ctrl) gomock.InOrder( @@ -812,8 +804,7 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - err := iterator.Error() - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, iterator.Error()) } { @@ -825,8 +816,7 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - err := iterator.Error() - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, iterator.Error()) } { @@ -838,8 +828,7 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - err := iterator.Error() - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, iterator.Error()) } { @@ -851,8 +840,7 @@ func TestIteratorClosed(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - err := iterator.Error() - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, iterator.Error()) } } @@ -889,8 +877,7 @@ func TestIteratorError(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - err := iterator.Error() - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, iterator.Error()) } // TestIteratorErrorAfterRelease tests to make sure that an iterator that was @@ -912,8 +899,7 @@ func TestIteratorErrorAfterRelease(t *testing.T, db Database) { require.False(iterator.Next()) require.Nil(iterator.Key()) require.Nil(iterator.Value()) - err := iterator.Error() - require.ErrorIs(err, ErrClosed) + require.Equal(ErrClosed, iterator.Error()) } // TestCompactNoPanic tests to make sure compact never panics. @@ -1174,6 +1160,6 @@ func FuzzKeyValue(f *testing.F, db Database) { require.False(exists) _, err = db.Get(key) - require.ErrorIs(err, ErrNotFound) + require.Equal(ErrNotFound, err) }) } diff --git a/vms/components/avax/utxo_state_test.go b/vms/components/avax/utxo_state_test.go index 0444285607f..864d31f3565 100644 --- a/vms/components/avax/utxo_state_test.go +++ b/vms/components/avax/utxo_state_test.go @@ -58,10 +58,10 @@ func TestUTXOState(t *testing.T) { s := NewUTXOState(db, manager) _, err := s.GetUTXO(utxoID) - require.ErrorIs(err, database.ErrNotFound) + require.Equal(database.ErrNotFound, err) _, err = s.GetUTXO(utxoID) - require.ErrorIs(err, database.ErrNotFound) + require.Equal(database.ErrNotFound, err) require.NoError(s.DeleteUTXO(utxoID)) @@ -78,7 +78,7 @@ func TestUTXOState(t *testing.T) { require.NoError(s.DeleteUTXO(utxoID)) _, err = s.GetUTXO(utxoID) - require.ErrorIs(err, database.ErrNotFound) + require.Equal(database.ErrNotFound, err) require.NoError(s.PutUTXO(utxo)) diff --git a/vms/platformvm/blocks/executor/backend_test.go b/vms/platformvm/blocks/executor/backend_test.go index c48e28774f2..63d30873e85 100644 --- a/vms/platformvm/blocks/executor/backend_test.go +++ b/vms/platformvm/blocks/executor/backend_test.go @@ -100,7 +100,7 @@ func TestBackendGetBlock(t *testing.T) { blkID := ids.GenerateTestID() state.EXPECT().GetStatelessBlock(blkID).Return(nil, choices.Unknown, database.ErrNotFound) _, err := b.GetBlock(blkID) - require.ErrorIs(err, database.ErrNotFound) + require.Equal(database.ErrNotFound, err) } { diff --git a/vms/platformvm/stakeable/stakeable_lock_test.go b/vms/platformvm/stakeable/stakeable_lock_test.go index cb71b4e9ebd..5a6cfce5d8a 100644 --- a/vms/platformvm/stakeable/stakeable_lock_test.go +++ b/vms/platformvm/stakeable/stakeable_lock_test.go @@ -70,8 +70,7 @@ func TestLockOutVerify(t *testing.T) { Locktime: tt.locktime, TransferableOut: tt.transferableOutF(ctrl), } - err := lockOut.Verify() - require.ErrorIs(t, err, tt.expectedErr) + require.Equal(t, tt.expectedErr, lockOut.Verify()) }) } } @@ -130,8 +129,7 @@ func TestLockInVerify(t *testing.T) { Locktime: tt.locktime, TransferableIn: tt.transferableInF(ctrl), } - err := lockOut.Verify() - require.ErrorIs(t, err, tt.expectedErr) + require.Equal(t, tt.expectedErr, lockOut.Verify()) }) } } diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index c6668ffa92c..a91ee816d0a 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -476,14 +476,14 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedCurrentStakerIterator, expectedErr := expected.GetCurrentStakerIterator() actualCurrentStakerIterator, actualErr := actual.GetCurrentStakerIterator() - require.ErrorIs(t, actualErr, expectedErr) + require.Equal(t, expectedErr, actualErr) if expectedErr == nil { assertIteratorsEqual(t, expectedCurrentStakerIterator, actualCurrentStakerIterator) } expectedPendingStakerIterator, expectedErr := expected.GetPendingStakerIterator() actualPendingStakerIterator, actualErr := actual.GetPendingStakerIterator() - require.ErrorIs(t, actualErr, expectedErr) + require.Equal(t, expectedErr, actualErr) if expectedErr == nil { assertIteratorsEqual(t, expectedPendingStakerIterator, actualPendingStakerIterator) } @@ -500,7 +500,7 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedSubnets, expectedErr := expected.GetSubnets() actualSubnets, actualErr := actual.GetSubnets() - require.ErrorIs(t, actualErr, expectedErr) + require.Equal(t, expectedErr, actualErr) if expectedErr == nil { require.Equal(t, expectedSubnets, actualSubnets) @@ -509,7 +509,7 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedChains, expectedErr := expected.GetChains(subnetID) actualChains, actualErr := actual.GetChains(subnetID) - require.ErrorIs(t, actualErr, expectedErr) + require.Equal(t, expectedErr, actualErr) if expectedErr == nil { require.Equal(t, expectedChains, actualChains) } diff --git a/vms/rpcchainvm/state_syncable_vm_test.go b/vms/rpcchainvm/state_syncable_vm_test.go index 4af9ed7727c..4be3941815e 100644 --- a/vms/rpcchainvm/state_syncable_vm_test.go +++ b/vms/rpcchainvm/state_syncable_vm_test.go @@ -339,7 +339,7 @@ func TestGetOngoingSyncStateSummary(t *testing.T) { // test unimplemented case; this is just a guard _, err := vm.GetOngoingSyncStateSummary(context.Background()) - require.ErrorIs(err, block.ErrStateSyncableVMNotImplemented) + require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful retrieval summary, err := vm.GetOngoingSyncStateSummary(context.Background()) @@ -364,7 +364,7 @@ func TestGetLastStateSummary(t *testing.T) { // test unimplemented case; this is just a guard _, err := vm.GetLastStateSummary(context.Background()) - require.ErrorIs(err, block.ErrStateSyncableVMNotImplemented) + require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful retrieval summary, err := vm.GetLastStateSummary(context.Background()) @@ -389,7 +389,7 @@ func TestParseStateSummary(t *testing.T) { // test unimplemented case; this is just a guard _, err := vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) - require.ErrorIs(err, block.ErrStateSyncableVMNotImplemented) + require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful parsing summary, err := vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) @@ -418,7 +418,7 @@ func TestGetStateSummary(t *testing.T) { // test unimplemented case; this is just a guard _, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) - require.ErrorIs(err, block.ErrStateSyncableVMNotImplemented) + require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful retrieval summary, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) From 6c8381e15c56bc9093902df405c1221d27a941e7 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon, 12 Jun 2023 16:02:35 -0700 Subject: [PATCH 64/79] nit --- snow/networking/tracker/targeter_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/snow/networking/tracker/targeter_test.go b/snow/networking/tracker/targeter_test.go index 1a6a0354e15..1909d02b42c 100644 --- a/snow/networking/tracker/targeter_test.go +++ b/snow/networking/tracker/targeter_test.go @@ -119,11 +119,9 @@ func TestTarget(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - tt.setup() target := targeter.TargetUsage(tt.nodeID) - require.Equal(tt.expectedTarget, target) + require.Equal(t, tt.expectedTarget, target) }) } } From 5b8f55d0e894620ed22193fbf9488e23c7f6aa3d Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon, 12 Jun 2023 16:04:14 -0700 Subject: [PATCH 65/79] nit --- snow/networking/timeout/manager_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/snow/networking/timeout/manager_test.go b/snow/networking/timeout/manager_test.go index 324a96ef338..d84afbec95a 100644 --- a/snow/networking/timeout/manager_test.go +++ b/snow/networking/timeout/manager_test.go @@ -18,8 +18,6 @@ import ( ) func TestManagerFire(t *testing.T) { - require := require.New(t) - benchlist := benchlist.NewNoBenchlist() manager, err := NewManager( &timer.AdaptiveTimeoutConfig{ @@ -33,7 +31,7 @@ func TestManagerFire(t *testing.T) { "", prometheus.NewRegistry(), ) - require.NoError(err) + require.NoError(t, err) go manager.Dispatch() wg := sync.WaitGroup{} From 399dcdf1bf6c9874cc62240927549ae011e9f4a1 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon, 12 Jun 2023 16:09:24 -0700 Subject: [PATCH 66/79] nit --- snow/engine/snowman/syncer/utils_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/snow/engine/snowman/syncer/utils_test.go b/snow/engine/snowman/syncer/utils_test.go index 08f62cb8877..a39a81a2b8c 100644 --- a/snow/engine/snowman/syncer/utils_test.go +++ b/snow/engine/snowman/syncer/utils_test.go @@ -55,14 +55,12 @@ type fullVM struct { } func buildTestPeers(t *testing.T) validators.Set { - require := require.New(t) - // we consider more than common.MaxOutstandingBroadcastRequests peers // so to test the effect of cap on number of requests sent out vdrs := validators.NewSet() for idx := 0; idx < 2*common.MaxOutstandingBroadcastRequests; idx++ { beaconID := ids.GenerateTestNodeID() - require.NoError(vdrs.Add(beaconID, nil, ids.Empty, 1)) + require.NoError(t, vdrs.Add(beaconID, nil, ids.Empty, 1)) } return vdrs } From a825a8b41855f0e41a8b852e3078f8c9ea78eb21 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon, 12 Jun 2023 16:14:22 -0700 Subject: [PATCH 67/79] nit --- snow/engine/avalanche/vertex/parser_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/snow/engine/avalanche/vertex/parser_test.go b/snow/engine/avalanche/vertex/parser_test.go index 16c2c9f425f..5d765d8384d 100644 --- a/snow/engine/avalanche/vertex/parser_test.go +++ b/snow/engine/avalanche/vertex/parser_test.go @@ -13,11 +13,9 @@ import ( ) func TestParseInvalid(t *testing.T) { - require := require.New(t) - vtxBytes := []byte{1, 2, 3, 4, 5} _, err := Parse(vtxBytes) - require.ErrorIs(err, codec.ErrUnknownVersion) + require.ErrorIs(t, err, codec.ErrUnknownVersion) } func TestParseValid(t *testing.T) { From 11497adacb84cdd211591010ac058f5828a3a0a6 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon, 12 Jun 2023 16:28:16 -0700 Subject: [PATCH 68/79] nits --- snow/consensus/snowball/nnary_snowflake_test.go | 1 - snow/consensus/snowball/parameters_test.go | 12 +++--------- .../snowman/poll/early_term_no_traversal_test.go | 8 +++----- snow/consensus/snowman/poll/no_early_term_test.go | 5 ++--- snow/consensus/snowman/poll/set_test.go | 5 ++--- snow/engine/avalanche/getter/getter_test.go | 4 +--- snow/engine/snowman/getter/getter_test.go | 4 +--- snow/networking/tracker/resource_tracker_test.go | 8 ++++---- snow/validators/manager_test.go | 3 --- 9 files changed, 16 insertions(+), 34 deletions(-) diff --git a/snow/consensus/snowball/nnary_snowflake_test.go b/snow/consensus/snowball/nnary_snowflake_test.go index 40c4f5e248e..385a991c9a7 100644 --- a/snow/consensus/snowball/nnary_snowflake_test.go +++ b/snow/consensus/snowball/nnary_snowflake_test.go @@ -48,7 +48,6 @@ func TestVirtuousNnarySnowflake(t *testing.T) { sb := nnarySnowflake{} sb.Initialize(betaVirtuous, betaRogue, Red) - require.Equal(Red, sb.Preference()) require.False(sb.Finalized()) diff --git a/snow/consensus/snowball/parameters_test.go b/snow/consensus/snowball/parameters_test.go index 627060a6db1..f6172f435d7 100644 --- a/snow/consensus/snowball/parameters_test.go +++ b/snow/consensus/snowball/parameters_test.go @@ -11,8 +11,6 @@ import ( ) func TestParametersVerify(t *testing.T) { - require := require.New(t) - p := Parameters{ K: 1, Alpha: 1, @@ -24,12 +22,10 @@ func TestParametersVerify(t *testing.T) { MaxItemProcessingTime: 1, } - require.NoError(p.Verify()) + require.NoError(t, p.Verify()) } func TestParametersAnotherVerify(t *testing.T) { - require := require.New(t) - p := Parameters{ K: 1, Alpha: 1, @@ -41,12 +37,10 @@ func TestParametersAnotherVerify(t *testing.T) { MaxItemProcessingTime: 1, } - require.NoError(p.Verify()) + require.NoError(t, p.Verify()) } func TestParametersYetAnotherVerify(t *testing.T) { - require := require.New(t) - p := Parameters{ K: 1, Alpha: 1, @@ -58,7 +52,7 @@ func TestParametersYetAnotherVerify(t *testing.T) { MaxItemProcessingTime: 1, } - require.NoError(p.Verify()) + require.NoError(t, p.Verify()) } func TestParametersInvalidK(t *testing.T) { diff --git a/snow/consensus/snowman/poll/early_term_no_traversal_test.go b/snow/consensus/snowman/poll/early_term_no_traversal_test.go index b8a706237bd..627c97b3398 100644 --- a/snow/consensus/snowman/poll/early_term_no_traversal_test.go +++ b/snow/consensus/snowman/poll/early_term_no_traversal_test.go @@ -38,8 +38,6 @@ func TestEarlyTermNoTraversalResults(t *testing.T) { } func TestEarlyTermNoTraversalString(t *testing.T) { - require := require.New(t) - alpha := 2 vtxID := ids.ID{1} @@ -62,11 +60,12 @@ func TestEarlyTermNoTraversalString(t *testing.T) { NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 received Bag[ids.ID]: (Size = 1) SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 1` - require.Equal(expected, poll.String()) + require.Equal(t, expected, poll.String()) } func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { require := require.New(t) + alpha := 2 vtxID := ids.ID{1} @@ -229,7 +228,6 @@ func TestEarlyTermNoTraversalWithWeightedResponses(t *testing.T) { } func TestEarlyTermNoTraversalDropWithWeightedResponses(t *testing.T) { - require := require.New(t) alpha := 2 vdr1 := ids.NodeID{1} @@ -246,5 +244,5 @@ func TestEarlyTermNoTraversalDropWithWeightedResponses(t *testing.T) { poll := factory.New(vdrs) poll.Drop(vdr2) - require.True(poll.Finished()) + require.True(t, poll.Finished()) } diff --git a/snow/consensus/snowman/poll/no_early_term_test.go b/snow/consensus/snowman/poll/no_early_term_test.go index d5da4f7722f..415c46b2e25 100644 --- a/snow/consensus/snowman/poll/no_early_term_test.go +++ b/snow/consensus/snowman/poll/no_early_term_test.go @@ -36,8 +36,6 @@ func TestNoEarlyTermResults(t *testing.T) { } func TestNoEarlyTermString(t *testing.T) { - require := require.New(t) - vtxID := ids.ID{1} vdr1 := ids.NodeID{1} @@ -58,11 +56,12 @@ func TestNoEarlyTermString(t *testing.T) { NodeID-BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp: 1 received Bag[ids.ID]: (Size = 1) SYXsAycDPUu4z2ZksJD5fh5nTDcH3vCFHnpcVye5XuJ2jArg: 1` - require.Equal(expected, poll.String()) + require.Equal(t, expected, poll.String()) } func TestNoEarlyTermDropsDuplicatedVotes(t *testing.T) { require := require.New(t) + vtxID := ids.ID{1} vdr1 := ids.NodeID{1} diff --git a/snow/consensus/snowman/poll/set_test.go b/snow/consensus/snowman/poll/set_test.go index 90a13a59c26..5b6bb151a9e 100644 --- a/snow/consensus/snowman/poll/set_test.go +++ b/snow/consensus/snowman/poll/set_test.go @@ -30,8 +30,7 @@ func TestNewSetErrorOnMetrics(t *testing.T) { Name: "poll_duration", }))) - s := NewSet(factory, log, namespace, registerer) - require.NotNil(s) + require.NotNil(NewSet(factory, log, namespace, registerer)) } func TestCreateAndFinishPollOutOfOrder_NewerFinishesFirst(t *testing.T) { @@ -284,7 +283,7 @@ func TestSetString(t *testing.T) { RequestID 0: waiting on Bag[ids.NodeID]: (Size = 1) NodeID-6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt: 1 - received Bag[ids.ID]: (Size = 0)` + received Bag[ids.ID]: (Size = 0)` require.True(s.Add(0, vdrs)) require.Equal(expected, s.String()) } diff --git a/snow/engine/avalanche/getter/getter_test.go b/snow/engine/avalanche/getter/getter_test.go index 7964c405855..d9d56d10880 100644 --- a/snow/engine/avalanche/getter/getter_test.go +++ b/snow/engine/avalanche/getter/getter_test.go @@ -23,11 +23,9 @@ import ( var errUnknownVertex = errors.New("unknown vertex") func testSetup(t *testing.T) (*vertex.TestManager, *common.SenderTest, common.Config) { - require := require.New(t) - peers := validators.NewSet() peer := ids.GenerateTestNodeID() - require.NoError(peers.Add(peer, nil, ids.Empty, 1)) + require.NoError(t, peers.Add(peer, nil, ids.Empty, 1)) sender := &common.SenderTest{T: t} sender.Default(true) diff --git a/snow/engine/snowman/getter/getter_test.go b/snow/engine/snowman/getter/getter_test.go index eee9a3d481a..02056d0c788 100644 --- a/snow/engine/snowman/getter/getter_test.go +++ b/snow/engine/snowman/getter/getter_test.go @@ -34,8 +34,6 @@ func testSetup( t *testing.T, ctrl *gomock.Controller, ) (StateSyncEnabledMock, *common.SenderTest, common.Config) { - require := require.New(t) - ctx := snow.DefaultConsensusContextTest() peers := validators.NewSet() @@ -63,7 +61,7 @@ func testSetup( sender.CantSendGetAcceptedFrontier = false peer := ids.GenerateTestNodeID() - require.NoError(peers.Add(peer, nil, ids.Empty, 1)) + require.NoError(t, peers.Add(peer, nil, ids.Empty, 1)) commonConfig := common.Config{ Ctx: ctx, diff --git a/snow/networking/tracker/resource_tracker_test.go b/snow/networking/tracker/resource_tracker_test.go index b9ecdbcdf3f..12256f9f784 100644 --- a/snow/networking/tracker/resource_tracker_test.go +++ b/snow/networking/tracker/resource_tracker_test.go @@ -68,7 +68,7 @@ func TestCPUTracker(t *testing.T) { node1Utilization := cpuTracker.Usage(node1, endTime2) node2Utilization := cpuTracker.Usage(node2, endTime2) - require.Less(node1Utilization, node2Utilization) + require.Greater(node2Utilization, node1Utilization) cumulative := cpuTracker.TotalUsage() sum := node1Utilization + node2Utilization @@ -79,9 +79,9 @@ func TestCPUTracker(t *testing.T) { startTime3 := endTime2 endTime3 := startTime3.Add(halflife) newNode1Utilization := cpuTracker.Usage(node1, endTime3) - require.Less(newNode1Utilization, node1Utilization) + require.Greater(node1Utilization, newNode1Utilization) newCumulative := cpuTracker.TotalUsage() - require.Less(newCumulative, cumulative) + require.Greater(cumulative, newCumulative) startTime4 := endTime3 endTime4 := startTime4.Add(halflife) @@ -91,7 +91,7 @@ func TestCPUTracker(t *testing.T) { cumulative = cpuTracker.TotalUsage() sum = node1Utilization + node2Utilization - require.Less(cumulative, sum) + require.Greater(sum, cumulative) } func TestCPUTrackerTimeUntilCPUUtilization(t *testing.T) { diff --git a/snow/validators/manager_test.go b/snow/validators/manager_test.go index bc9bb5f1d7c..d81a49735de 100644 --- a/snow/validators/manager_test.go +++ b/snow/validators/manager_test.go @@ -96,9 +96,6 @@ func TestContains(t *testing.T) { require.NoError(Add(m, subnetID, nodeID, nil, ids.Empty, 1)) require.True(Contains(m, subnetID, nodeID)) - require.NoError(Add(m, subnetID, nodeID, nil, ids.Empty, 1)) - require.True(Contains(m, subnetID, nodeID)) - require.NoError(RemoveWeight(m, subnetID, nodeID, 1)) require.False(Contains(m, subnetID, nodeID)) } From e120b398011feec2ff8854756c37b68e070d3245 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue, 13 Jun 2023 11:33:42 -0700 Subject: [PATCH 69/79] moar Empty --- snow/networking/benchlist/benchlist_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/snow/networking/benchlist/benchlist_test.go b/snow/networking/benchlist/benchlist_test.go index 15742449ea9..a33abd943c5 100644 --- a/snow/networking/benchlist/benchlist_test.go +++ b/snow/networking/benchlist/benchlist_test.go @@ -77,8 +77,8 @@ func TestBenchlistAdd(t *testing.T) { // Still shouldn't be benched due to not enough consecutive failure require.False(b.isBenched(vdrID0)) - require.Zero(b.benchedQueue.Len()) - require.Zero(b.benchlistSet.Len()) + require.Empty(b.benchedQueue) + require.Empty(b.benchlistSet) require.Len(b.failureStreaks, 1) fs := b.failureStreaks[vdrID0] require.Equal(threshold-1, fs.consecutive) @@ -91,8 +91,8 @@ func TestBenchlistAdd(t *testing.T) { // has passed since the first failure b.lock.Lock() require.False(b.isBenched(vdrID0)) - require.Zero(b.benchedQueue.Len()) - require.Zero(b.benchlistSet.Len()) + require.Empty(b.benchedQueue) + require.Empty(b.benchlistSet) b.lock.Unlock() // Move the time up From 909d2a4dd3a4ddddb41fdb85c678e7704176b120 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 14 Jun 2023 00:07:00 -0700 Subject: [PATCH 70/79] nits --- x/merkledb/trie_test.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index abb2ffe5848..1a284634b42 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -5,7 +5,6 @@ package merkledb import ( "context" - "errors" "math/rand" "strconv" "sync" @@ -861,7 +860,7 @@ func Test_Trie_MultipleStates(t *testing.T) { if pastRoot == ids.Empty { pastRoot = mroot } else { - require.Equal(pastRoot, mroot, "root mismatch") + require.Equal(pastRoot, mroot) } } }) @@ -1270,7 +1269,7 @@ func Test_Trie_ConcurrentReadWrite(t *testing.T) { func() bool { value, err := newTrie.GetValue(context.Background(), []byte("key")) - if errors.Is(err, database.ErrNotFound) { + if err == database.ErrNotFound { return false } From b406c844eea33e27100eb6189fda6d987ae6196e Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 14 Jun 2023 00:08:47 -0700 Subject: [PATCH 71/79] fix merge errors --- x/sync/sync_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index 8b9d8eb6e67..a03f18d84e5 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -499,7 +499,6 @@ func TestFindNextKeyRandom(t *testing.T) { t.Logf("seed: %d", now) rand := rand.New(rand.NewSource(now)) // #nosec G404 require := require.New(t) - rand := rand.New(rand.NewSource(1337)) //nolint:gosec // Create a "remote" database and "local" database remoteDB, err := merkledb.New( @@ -1037,6 +1036,8 @@ func generateTrie(t *testing.T, r *rand.Rand, count int) (merkledb.MerkleDB, err } func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen int) (merkledb.MerkleDB, [][]byte, error) { + require := require.New(t) + db, err := merkledb.New( context.Background(), memdb.New(), From 51a6001f54471e8780e43f81d5a7e12b6a5e7914 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 14 Jun 2023 00:15:02 -0700 Subject: [PATCH 72/79] nit --- vms/avm/state_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/vms/avm/state_test.go b/vms/avm/state_test.go index c75e6450bf2..e5f4207396a 100644 --- a/vms/avm/state_test.go +++ b/vms/avm/state_test.go @@ -95,8 +95,6 @@ func TestSetsAndGets(t *testing.T) { } func TestFundingNoAddresses(t *testing.T) { - require := require.New(t) - _, _, vm, _ := GenesisVMWithArgs( t, []*common.Fx{{ @@ -112,7 +110,7 @@ func TestFundingNoAddresses(t *testing.T) { ) ctx := vm.ctx defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(t, vm.Shutdown(context.Background())) ctx.Lock.Unlock() }() From 13d31cf56350f46c7ec66f5d4a32f528acd3bc3c Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 14 Jun 2023 00:22:14 -0700 Subject: [PATCH 73/79] nit --- vms/platformvm/reward/calculator_test.go | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/vms/platformvm/reward/calculator_test.go b/vms/platformvm/reward/calculator_test.go index 47e1d934927..cb16435af9c 100644 --- a/vms/platformvm/reward/calculator_test.go +++ b/vms/platformvm/reward/calculator_test.go @@ -29,8 +29,6 @@ var defaultConfig = Config{ } func TestLongerDurationBonus(t *testing.T) { - require := require.New(t) - c := NewCalculator(defaultConfig) shortDuration := 24 * time.Hour totalDuration := 365 * 24 * time.Hour @@ -44,7 +42,7 @@ func TestLongerDurationBonus(t *testing.T) { longBalance := units.KiloAvax longBalance += c.Calculate(totalDuration, longBalance, 359*units.MegaAvax+longBalance) - require.Less(shortBalance, longBalance, "should promote stakers to stake longer") + require.Less(t, shortBalance, longBalance, "should promote stakers to stake longer") } func TestRewards(t *testing.T) { @@ -136,8 +134,6 @@ func TestRewards(t *testing.T) { } func TestRewardsOverflow(t *testing.T) { - require := require.New(t) - var ( maxSupply uint64 = math.MaxUint64 initialSupply uint64 = 1 @@ -153,12 +149,10 @@ func TestRewardsOverflow(t *testing.T) { maxSupply, // The staked amount is larger than the current supply initialSupply, ) - require.Equal(maxSupply-initialSupply, reward) + require.Equal(t, maxSupply-initialSupply, reward) } func TestRewardsMint(t *testing.T) { - require := require.New(t) - var ( maxSupply uint64 = 1000 initialSupply uint64 = 1 @@ -174,5 +168,5 @@ func TestRewardsMint(t *testing.T) { maxSupply, // The staked amount is larger than the current supply initialSupply, ) - require.Equal(maxSupply-initialSupply, rewards) + require.Equal(t, maxSupply-initialSupply, rewards) } From 0470bce25e8f4584bdaad80f9c01481f99ec5625 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 14 Jun 2023 00:26:05 -0700 Subject: [PATCH 74/79] nit --- vms/platformvm/txs/executor/standard_tx_executor_test.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index 086d8d1e7ab..51453c7b89c 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -975,8 +975,6 @@ func TestStandardTxExecutorAddValidator(t *testing.T) { // Returns a RemoveSubnetValidatorTx that passes syntactic verification. func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *txs.Tx) { - require := require.New(t) - t.Helper() creds := []verify.Verifiable{ @@ -1031,7 +1029,7 @@ func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *tx Unsigned: unsignedTx, Creds: creds, } - require.NoError(tx.Initialize(txs.Codec)) + require.NoError(t, tx.Initialize(txs.Codec)) return unsignedTx, tx } @@ -1332,8 +1330,6 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { // Returns a TransformSubnetTx that passes syntactic verification. func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { - require := require.New(t) - t.Helper() creds := []verify.Verifiable{ @@ -1400,7 +1396,7 @@ func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { Unsigned: unsignedTx, Creds: creds, } - require.NoError(tx.Initialize(txs.Codec)) + require.NoError(t, tx.Initialize(txs.Codec)) return unsignedTx, tx } From b951e0c403c030ab4bfc9c2cf558f141db10d231 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 14 Jun 2023 00:37:33 -0700 Subject: [PATCH 75/79] nit --- vms/platformvm/validator_set_property_test.go | 6 +++--- vms/platformvm/vm_regression_test.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/vms/platformvm/validator_set_property_test.go b/vms/platformvm/validator_set_property_test.go index ef1297cd9a9..047a8273b87 100644 --- a/vms/platformvm/validator_set_property_test.go +++ b/vms/platformvm/validator_set_property_test.go @@ -71,7 +71,7 @@ func TestGetValidatorsSetProperty(t *testing.T) { properties.Property("check GetValidatorSet", prop.ForAll( func(events []uint8) string { - vm, subnetID, err := buildVM() + vm, subnetID, err := buildVM(t) if err != nil { return fmt.Sprintf("failed building vm: %s", err.Error()) } @@ -706,7 +706,7 @@ func TestTimestampListGenerator(t *testing.T) { // add a single validator at the end of times, // to make sure it won't pollute our tests -func buildVM() (*VM, ids.ID, error) { +func buildVM(t *testing.T) (*VM, ids.ID, error) { vdrs := validators.NewManager() primaryVdrs := validators.NewSet() _ = vdrs.Add(constants.PrimaryNetworkID, primaryVdrs) @@ -739,7 +739,7 @@ func buildVM() (*VM, ids.ID, error) { atomicDB := prefixdb.New([]byte{1}, baseDBManager.Current().Database) msgChan := make(chan common.Message, 1) - ctx := defaultContext() + ctx := defaultContext(t) m := atomic.NewMemory(atomicDB) ctx.SharedMemory = m.NewSharedMemory(ctx.ChainID) diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 043cc8d96f8..fe61cd8b50e 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -1454,7 +1454,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { // setup require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -1736,7 +1736,7 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // setup require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) @@ -1896,7 +1896,7 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { // setup require := require.New(t) - vm, _, _ := defaultVM() + vm, _, _ := defaultVM(t) vm.ctx.Lock.Lock() defer func() { require.NoError(vm.Shutdown(context.Background())) From 68b6c41a4a9c1ed4cfbd88108526c4c00462b878 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Wed, 14 Jun 2023 19:10:59 -0700 Subject: [PATCH 76/79] nits --- vms/avm/blocks/builder/builder_test.go | 4 +--- vms/platformvm/vm_regression_test.go | 3 +-- vms/platformvm/warp/test_signer.go | 3 +-- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/vms/avm/blocks/builder/builder_test.go b/vms/avm/blocks/builder/builder_test.go index 8321f51e1bd..74deb5e9dd6 100644 --- a/vms/avm/blocks/builder/builder_test.go +++ b/vms/avm/blocks/builder/builder_test.go @@ -510,9 +510,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { tx := transactions[0] txID := tx.ID() require.NoError(mempool.Add(tx)) - - has := mempool.Has(txID) - require.True(has) + require.True(mempool.Has(txID)) ctrl := gomock.NewController(t) defer ctrl.Finish() diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index fe61cd8b50e..5a8b4c43d8c 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -1404,8 +1404,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t subnetValidators := validators.NewSet() require.NoError(vm.state.ValidatorSet(createSubnetTx.ID(), subnetValidators)) - added := vm.Validators.Add(createSubnetTx.ID(), subnetValidators) - require.True(added) + require.True(vm.Validators.Add(createSubnetTx.ID(), subnetValidators)) addSubnetValidatorTx, err := vm.txBuilder.NewAddSubnetValidatorTx( defaultMaxValidatorStake, diff --git a/vms/platformvm/warp/test_signer.go b/vms/platformvm/warp/test_signer.go index fb9331f41ce..aee6616ddac 100644 --- a/vms/platformvm/warp/test_signer.go +++ b/vms/platformvm/warp/test_signer.go @@ -53,6 +53,5 @@ func TestSignerVerifies(t *testing.T, s Signer, sk *bls.SecretKey, chainID ids.I pk := bls.PublicFromSecretKey(sk) msgBytes := msg.Bytes() - valid := bls.Verify(pk, sig, msgBytes) - require.True(valid) + require.True(bls.Verify(pk, sig, msgBytes)) } From e91659a6875c511246071d2fdcc69a06a5dee45a Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu, 15 Jun 2023 11:32:16 -0700 Subject: [PATCH 77/79] pr review --- vms/avm/vm_test.go | 2 -- vms/platformvm/state/diff_test.go | 8 ++++---- vms/proposervm/post_fork_block_test.go | 2 +- vms/proposervm/post_fork_option_test.go | 2 +- vms/proposervm/pre_fork_block_test.go | 2 +- vms/proposervm/scheduler/scheduler_test.go | 4 ++-- vms/proposervm/vm_test.go | 6 ++++-- vms/registry/vm_registry_test.go | 14 ++++++-------- 8 files changed, 19 insertions(+), 21 deletions(-) diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index b431c82dcd0..59f4b228c36 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -1051,8 +1051,6 @@ func TestTxNotCached(t *testing.T) { require.NoError(err) registerer := prometheus.NewRegistry() - require.NoError(err) - vm.metrics, err = metrics.New("", registerer) require.NoError(err) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 0c6509c2fe5..6bf3db525c1 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -478,14 +478,14 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedCurrentStakerIterator, expectedErr := expected.GetCurrentStakerIterator() actualCurrentStakerIterator, actualErr := actual.GetCurrentStakerIterator() - require.ErrorIs(actualErr, expectedErr) + require.Equal(actualErr, expectedErr) if expectedErr == nil { assertIteratorsEqual(t, expectedCurrentStakerIterator, actualCurrentStakerIterator) } expectedPendingStakerIterator, expectedErr := expected.GetPendingStakerIterator() actualPendingStakerIterator, actualErr := actual.GetPendingStakerIterator() - require.ErrorIs(actualErr, expectedErr) + require.Equal(actualErr, expectedErr) if expectedErr == nil { assertIteratorsEqual(t, expectedPendingStakerIterator, actualPendingStakerIterator) } @@ -502,7 +502,7 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedSubnets, expectedErr := expected.GetSubnets() actualSubnets, actualErr := actual.GetSubnets() - require.ErrorIs(actualErr, expectedErr) + require.Equal(actualErr, expectedErr) if expectedErr == nil { require.Equal(expectedSubnets, actualSubnets) @@ -511,7 +511,7 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedChains, expectedErr := expected.GetChains(subnetID) actualChains, actualErr := actual.GetChains(subnetID) - require.ErrorIs(actualErr, expectedErr) + require.Equal(actualErr, expectedErr) if expectedErr != nil { continue } diff --git a/vms/proposervm/post_fork_block_test.go b/vms/proposervm/post_fork_block_test.go index 911e16e32de..e7c7cc83390 100644 --- a/vms/proposervm/post_fork_block_test.go +++ b/vms/proposervm/post_fork_block_test.go @@ -35,7 +35,7 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { // test _, err := proBlk.Options(context.Background()) - require.ErrorIs(err, snowman.ErrNotOracle) + require.Equal(err, snowman.ErrNotOracle) // setup _, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks diff --git a/vms/proposervm/post_fork_option_test.go b/vms/proposervm/post_fork_option_test.go index 06f8aa28be7..644cdd74af1 100644 --- a/vms/proposervm/post_fork_option_test.go +++ b/vms/proposervm/post_fork_option_test.go @@ -518,7 +518,7 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { require.IsType(&postForkBlock{}, parentBlk) postForkBlk := parentBlk.(*postForkBlock) _, err = postForkBlk.Options(context.Background()) - require.ErrorIs(err, snowman.ErrNotOracle) + require.Equal(err, snowman.ErrNotOracle) // Build the child statelessChild, err := block.BuildOption( diff --git a/vms/proposervm/pre_fork_block_test.go b/vms/proposervm/pre_fork_block_test.go index 00f0ef08eb2..85232b2d208 100644 --- a/vms/proposervm/pre_fork_block_test.go +++ b/vms/proposervm/pre_fork_block_test.go @@ -36,7 +36,7 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { // test _, err := proBlk.Options(context.Background()) - require.ErrorIs(err, snowman.ErrNotOracle) + require.Equal(err, snowman.ErrNotOracle) // setup proBlk = preForkBlock{ diff --git a/vms/proposervm/scheduler/scheduler_test.go b/vms/proposervm/scheduler/scheduler_test.go index 238982a18ef..4752c1c12f5 100644 --- a/vms/proposervm/scheduler/scheduler_test.go +++ b/vms/proposervm/scheduler/scheduler_test.go @@ -24,7 +24,7 @@ func TestDelayFromNew(t *testing.T) { fromVM <- common.PendingTxs <-toEngine - require.Negative(t, time.Until(startTime)) + require.LessOrEqual(t, time.Until(startTime), 0) } func TestDelayFromSetTime(t *testing.T) { @@ -41,7 +41,7 @@ func TestDelayFromSetTime(t *testing.T) { fromVM <- common.PendingTxs <-toEngine - require.Negative(t, time.Until(startTime)) + require.LessOrEqual(t, time.Until(startTime), 0) } func TestReceipt(*testing.T) { diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index e7192d2fa85..0ea3e3289c9 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -9,6 +9,7 @@ import ( "crypto" "crypto/tls" "errors" + "fmt" "testing" "time" @@ -58,6 +59,7 @@ var ( errUnverifiedBlock = errors.New("unverified block") errMarshallingFailed = errors.New("marshalling failed") errTooHigh = errors.New("too high") + errUnexpectedCall = errors.New("unexpected call") ) func init() { @@ -958,8 +960,8 @@ func TestExpiredBuildBlock(t *testing.T) { require.NoError(proVM.SetPreference(context.Background(), parsedBlock.ID())) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { - require.FailNow("unexpectedly called build block") - return nil, nil + require.FailNow(fmt.Errorf("%w: BuildBlock", errUnexpectedCall).Error()) + return nil, errUnexpectedCall } // The first notification will be read from the consensus engine diff --git a/vms/registry/vm_registry_test.go b/vms/registry/vm_registry_test.go index 6b04329f3bc..565f3127b5a 100644 --- a/vms/registry/vm_registry_test.go +++ b/vms/registry/vm_registry_test.go @@ -58,9 +58,9 @@ func TestReload_Success(t *testing.T) { Return(nil) installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) + require.NoError(err) require.ElementsMatch([]ids.ID{id3, id4}, installedVMs) require.Empty(failedVMs) - require.NoError(err) } // Tests that we fail if we're not able to get the vms on disk @@ -73,9 +73,9 @@ func TestReload_GetNewVMsFails(t *testing.T) { resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest) installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) + require.ErrorIs(err, errTest) require.Empty(installedVMs) require.Empty(failedVMs) - require.ErrorIs(err, errTest) } // Tests that if we fail to register a VM, we fail. @@ -114,12 +114,11 @@ func TestReload_PartialRegisterFailure(t *testing.T) { Return(nil) installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) - + require.NoError(err) require.Len(failedVMs, 1) require.ErrorIs(failedVMs[id3], errTest) require.Len(installedVMs, 1) require.Equal(id4, installedVMs[0]) - require.NoError(err) } // Tests the happy case where Reload succeeds. @@ -158,9 +157,9 @@ func TestReloadWithReadLock_Success(t *testing.T) { Return(nil) installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) + require.NoError(err) require.ElementsMatch([]ids.ID{id3, id4}, installedVMs) require.Empty(failedVMs) - require.NoError(err) } // Tests that we fail if we're not able to get the vms on disk @@ -173,9 +172,9 @@ func TestReloadWithReadLock_GetNewVMsFails(t *testing.T) { resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest) installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) + require.ErrorIs(err, errTest) require.Empty(installedVMs) require.Empty(failedVMs) - require.ErrorIs(err, errTest) } // Tests that if we fail to register a VM, we fail. @@ -214,12 +213,11 @@ func TestReloadWithReadLock_PartialRegisterFailure(t *testing.T) { Return(nil) installedVMs, failedVMs, err := resources.vmRegistry.ReloadWithReadLock(context.Background()) - + require.NoError(err) require.Len(failedVMs, 1) require.ErrorIs(failedVMs[id3], errTest) require.Len(installedVMs, 1) require.Equal(id4, installedVMs[0]) - require.NoError(err) } type registryTestResources struct { From 7319b4a19093c48b78f627ab2a03eb2904271996 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu, 15 Jun 2023 11:33:49 -0700 Subject: [PATCH 78/79] fix merge error --- snow/consensus/snowman/consensus_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index 7f30785f859..3a3815a54f4 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -1558,8 +1558,6 @@ func ErrorOnAddDuplicateBlockID(t *testing.T, factory Factory) { } func gatherCounterGauge(t *testing.T, reg *prometheus.Registry) map[string]float64 { - require := require.New(t) - ms, err := reg.Gather() require.NoError(t, err) mss := make(map[string]float64) From 471c3662dfc0c2de16e0166b8226aee9315a4226 Mon Sep 17 00:00:00 2001 From: dhrubabasu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu, 15 Jun 2023 11:37:50 -0700 Subject: [PATCH 79/79] =?UTF-8?q?=F0=9F=8E=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- vms/platformvm/state/diff_test.go | 8 ++++---- vms/proposervm/post_fork_block_test.go | 2 +- vms/proposervm/post_fork_option_test.go | 2 +- vms/proposervm/pre_fork_block_test.go | 2 +- vms/proposervm/scheduler/scheduler_test.go | 4 ++-- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 6bf3db525c1..bdde20ce055 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -478,14 +478,14 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedCurrentStakerIterator, expectedErr := expected.GetCurrentStakerIterator() actualCurrentStakerIterator, actualErr := actual.GetCurrentStakerIterator() - require.Equal(actualErr, expectedErr) + require.Equal(expectedErr, actualErr) if expectedErr == nil { assertIteratorsEqual(t, expectedCurrentStakerIterator, actualCurrentStakerIterator) } expectedPendingStakerIterator, expectedErr := expected.GetPendingStakerIterator() actualPendingStakerIterator, actualErr := actual.GetPendingStakerIterator() - require.Equal(actualErr, expectedErr) + require.Equal(expectedErr, actualErr) if expectedErr == nil { assertIteratorsEqual(t, expectedPendingStakerIterator, actualPendingStakerIterator) } @@ -502,7 +502,7 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedSubnets, expectedErr := expected.GetSubnets() actualSubnets, actualErr := actual.GetSubnets() - require.Equal(actualErr, expectedErr) + require.Equal(expectedErr, actualErr) if expectedErr == nil { require.Equal(expectedSubnets, actualSubnets) @@ -511,7 +511,7 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { expectedChains, expectedErr := expected.GetChains(subnetID) actualChains, actualErr := actual.GetChains(subnetID) - require.Equal(actualErr, expectedErr) + require.Equal(expectedErr, actualErr) if expectedErr != nil { continue } diff --git a/vms/proposervm/post_fork_block_test.go b/vms/proposervm/post_fork_block_test.go index e7c7cc83390..0c23ad024fe 100644 --- a/vms/proposervm/post_fork_block_test.go +++ b/vms/proposervm/post_fork_block_test.go @@ -35,7 +35,7 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { // test _, err := proBlk.Options(context.Background()) - require.Equal(err, snowman.ErrNotOracle) + require.Equal(snowman.ErrNotOracle, err) // setup _, _, proVM, _, _ := initTestProposerVM(t, time.Time{}, 0) // enable ProBlks diff --git a/vms/proposervm/post_fork_option_test.go b/vms/proposervm/post_fork_option_test.go index 644cdd74af1..92851a9078f 100644 --- a/vms/proposervm/post_fork_option_test.go +++ b/vms/proposervm/post_fork_option_test.go @@ -518,7 +518,7 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { require.IsType(&postForkBlock{}, parentBlk) postForkBlk := parentBlk.(*postForkBlock) _, err = postForkBlk.Options(context.Background()) - require.Equal(err, snowman.ErrNotOracle) + require.Equal(snowman.ErrNotOracle, err) // Build the child statelessChild, err := block.BuildOption( diff --git a/vms/proposervm/pre_fork_block_test.go b/vms/proposervm/pre_fork_block_test.go index 85232b2d208..0ba73342b75 100644 --- a/vms/proposervm/pre_fork_block_test.go +++ b/vms/proposervm/pre_fork_block_test.go @@ -36,7 +36,7 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { // test _, err := proBlk.Options(context.Background()) - require.Equal(err, snowman.ErrNotOracle) + require.Equal(snowman.ErrNotOracle, err) // setup proBlk = preForkBlock{ diff --git a/vms/proposervm/scheduler/scheduler_test.go b/vms/proposervm/scheduler/scheduler_test.go index 4752c1c12f5..821a36883e9 100644 --- a/vms/proposervm/scheduler/scheduler_test.go +++ b/vms/proposervm/scheduler/scheduler_test.go @@ -24,7 +24,7 @@ func TestDelayFromNew(t *testing.T) { fromVM <- common.PendingTxs <-toEngine - require.LessOrEqual(t, time.Until(startTime), 0) + require.LessOrEqual(t, time.Until(startTime), time.Duration(0)) } func TestDelayFromSetTime(t *testing.T) { @@ -41,7 +41,7 @@ func TestDelayFromSetTime(t *testing.T) { fromVM <- common.PendingTxs <-toEngine - require.LessOrEqual(t, time.Until(startTime), 0) + require.LessOrEqual(t, time.Until(startTime), time.Duration(0)) } func TestReceipt(*testing.T) {