From 8866972128a5a89d9d477ce70d27afb4f96568ae Mon Sep 17 00:00:00 2001 From: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Wed, 6 Dec 2023 15:00:22 -0500 Subject: [PATCH] Squashed commit of the following: commit b36416da40fd03f4d75c023a203079e814c5aa8d Author: Alberto Benegiamo Date: Wed Dec 6 12:12:11 2023 -0700 Drop Pending Stakers 1 - introduced ScheduledStaker txs (#2323) Co-authored-by: Stephen Buttolph commit 7df1f3a1c26eada44c44efacc8ee7414963ad4f6 Author: Stephen Buttolph Date: Wed Dec 6 13:46:56 2023 -0500 Restrict GOPROXY (#2434) commit 21b7ab880c197f567068dc5870bf38ad632670fc Author: Stephen Buttolph Date: Tue Dec 5 19:43:00 2023 -0500 Fix platformvm.SetPreference (#2429) commit ada692a8ee3e88c3d780a75cf13fd4ad4517aa49 Author: Stephen Buttolph Date: Tue Dec 5 17:40:03 2023 -0500 Update minimum golang version to v1.20.12 (#2427) commit 004a23e0275ab2af5c63232015495b3e0f236139 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue Dec 5 12:56:25 2023 -0500 `vms/platformvm`: Add `decisionTxs` parameter to `NewBanffProposalBlock` (#2411) Co-authored-by: Dan Laine commit 439dc1e55216d612a11b463949bf7be7cf3138ec Author: Alberto Benegiamo Date: Tue Dec 5 10:46:13 2023 -0700 ProposerVM Extend windows 0 - Cleanup (#2404) Co-authored-by: Stephen Buttolph commit 477157d27d607da50d51464dd2a4a6743ab0a891 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Tue Dec 5 11:10:56 2023 -0500 `vms/platformvm`: Cleanup some block tests (#2422) Co-authored-by: Dan Laine commit b6700c956159cba6bdec346dc91343ec51492fe8 Author: Stephen Buttolph Date: Mon Dec 4 22:24:34 2023 -0500 Update slices dependency to use Compare (#2424) Co-authored-by: James Walker commit 5d9e482d220657e71312994bb4ce682391dd2e1e Author: Dan Laine Date: Mon Dec 4 19:27:29 2023 -0500 allow user of `build_fuzz.sh` to specify a directory to fuzz in (#2414) commit 2e32281b623ed5cc0c1b7101deae21667f5cef49 Author: Stephen Buttolph Date: Mon Dec 4 18:30:41 2023 -0500 Fix duplicated bootstrapper engine termination (#2334) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> commit b741c198ae8e931003d94b104c70fc8200721ad5 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon Dec 4 17:49:15 2023 -0500 `vms/platformvm`: Move `VerifyUniqueInputs` from `verifier` to `backend` (#2410) commit 6aa20fc839351d9b8d75cb9a1aef7c9bd07abfef Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Mon Dec 4 17:32:07 2023 -0500 `vms/platformvm`: Initialize txs in `Transactions` field for `BanffProposalBlock` (#2419) commit c11accdc01890ed8cb5948012daa47686f72e3fd Author: Alberto Benegiamo Date: Mon Dec 4 09:16:01 2023 -0700 Drop Pending Stakers 0 - De-duplicate staking tx verification (#2335) commit 05ce36676753359fd1fdc802fb7dcf1c00ecc7a6 Author: marun Date: Sat Dec 2 13:33:18 2023 -0800 testing: Update to latest version of ginkgo (#2390) commit 04af33e14494f8cdfec6480fdccfe8879a713f70 Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Sat Dec 2 16:33:07 2023 -0500 `vms/platformvm`: Cleanup block builder tests (#2406) Co-authored-by: Stephen Buttolph commit 7623ffd4be915a5185c9ed5e11fa9be15a6e1f00 Author: Stephen Buttolph Date: Fri Dec 1 19:39:15 2023 -0500 Update versions for v1.10.17 (#2394) commit be1a2ad249910c0e4f7606afd8b19d2f48c728fd Author: aaronbuchwald Date: Fri Dec 1 13:31:49 2023 -0500 Add more descriptive formatted error (#2403) Co-authored-by: Stephen Buttolph commit 9b851419158596a5c200e47d83108a71bea8bddb Author: Dhruba Basu <7675102+dhrubabasu@users.noreply.github.com> Date: Thu Nov 30 12:42:36 2023 -0800 `vms/platformvm`: Move `GetRewardUTXOs`, `GetSubnets`, and `GetChains` to `State` interface (#2402) commit de3b16cac8a7f61c92b7c695599071de482d709f Author: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Date: Thu Nov 30 12:49:53 2023 -0500 Add `p2p.Network` component (#2283) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> Co-authored-by: Stephen Buttolph commit 0ab2046ef3987bb513ea8c33d11b817169a84e98 Author: marun Date: Thu Nov 30 09:18:59 2023 -0800 Rename `testnet` fixture to `tmpnet` (#2307) commit 907b34c5aa0cf0037c765183101bfcf0f2850f5b Author: Stephen Buttolph Date: Wed Nov 29 21:32:43 2023 -0500 Update bootstrap IPs (#2396) commit 96d451d2e2986fedc45d3c60834d2aae9c404643 Author: Stephen Buttolph Date: Wed Nov 29 18:15:47 2023 -0500 Periodically PullGossip only from connected validators (#2399) commit 0da5bccfb04f98e1e1ac5079acea84056457fcbc Author: Dan Laine Date: Wed Nov 29 16:32:16 2023 -0500 Remove method `CappedList` from `set.Set` (#2395) Signed-off-by: Joshua Kim <20001595+joshua-kim@users.noreply.github.com> --- .github/workflows/auto-generated-checker.yml | 6 +- .github/workflows/build-linux-binaries.yml | 4 +- .github/workflows/build-macos-release.yml | 2 +- .github/workflows/build-public-ami.yml | 2 +- .../workflows/build-ubuntu-amd64-release.yml | 4 +- .../workflows/build-ubuntu-arm64-release.yml | 4 +- .github/workflows/build-win-release.yml | 2 +- .github/workflows/fuzz.yml | 2 +- .github/workflows/static-analysis.yml | 2 +- ...e.persistent.yml => test.e2e.existing.yml} | 16 +- .github/workflows/test.e2e.yml | 8 +- .github/workflows/test.unit.yml | 2 +- .github/workflows/test.upgrade.yml | 10 +- CONTRIBUTING.md | 2 +- Dockerfile | 2 +- README.md | 2 +- RELEASES.md | 72 +++ api/metrics/multi_gatherer.go | 6 +- chains/atomic/state.go | 15 +- chains/manager.go | 60 +- codec/reflectcodec/type_codec.go | 4 +- genesis/bootstrappers.json | 48 +- genesis/config.go | 8 +- genesis/config_test.go | 35 +- go.mod | 8 +- go.sum | 17 +- ids/id.go | 4 +- ids/id_test.go | 51 +- ids/node_id.go | 4 +- ids/node_id_test.go | 51 +- ids/short.go | 4 +- network/p2p/client.go | 7 +- network/p2p/gossip/gossip_test.go | 42 +- network/p2p/network.go | 188 ++++++ network/p2p/network_test.go | 596 ++++++++++++++++++ network/p2p/peers.go | 50 -- network/p2p/peers_test.go | 150 ----- network/p2p/router.go | 63 +- network/p2p/router_test.go | 366 ----------- network/p2p/validators.go | 29 +- network/p2p/validators_test.go | 13 +- node/overridden_manager.go | 4 - proto/Dockerfile.buf | 2 +- scripts/build_avalanche.sh | 2 +- scripts/build_fuzz.sh | 10 +- ...build_testnetctl.sh => build_tmpnetctl.sh} | 6 +- scripts/constants.sh | 3 + scripts/tests.e2e.existing.sh | 63 ++ scripts/tests.e2e.persistent.sh | 60 -- scripts/tests.e2e.sh | 14 +- scripts/tests.upgrade.sh | 2 +- snow/consensus/snowman/test_block.go | 4 +- .../avalanche/bootstrap/bootstrapper.go | 3 +- snow/engine/common/tracker/peers.go | 14 + snow/engine/snowman/bootstrap/bootstrapper.go | 1 + .../snowman/bootstrap/bootstrapper_test.go | 121 ++++ snow/engine/snowman/config.go | 16 +- snow/engine/snowman/config_test.go | 10 +- snow/engine/snowman/transitive.go | 17 +- snow/engine/snowman/transitive_test.go | 4 + snow/validators/manager.go | 19 - snow/validators/set.go | 23 - tests/e2e/README.md | 42 +- tests/e2e/c/dynamic_fees.go | 4 +- tests/e2e/faultinjection/duplicate_node_id.go | 10 +- tests/e2e/p/interchain_workflow.go | 6 +- tests/e2e/p/staking_rewards.go | 26 +- tests/e2e/p/workflow.go | 2 +- tests/fixture/e2e/env.go | 21 +- tests/fixture/e2e/flags.go | 35 +- tests/fixture/e2e/helpers.go | 22 +- tests/fixture/testnet/README.md | 8 - tests/fixture/tmpnet/README.md | 20 + tests/fixture/{testnet => tmpnet}/cmd/main.go | 16 +- tests/fixture/{testnet => tmpnet}/common.go | 2 +- tests/fixture/{testnet => tmpnet}/config.go | 2 +- .../fixture/{testnet => tmpnet}/interfaces.go | 2 +- .../{testnet => tmpnet}/local/README.md | 44 +- .../{testnet => tmpnet}/local/config.go | 16 +- .../{testnet => tmpnet}/local/network.go | 36 +- .../{testnet => tmpnet}/local/network_test.go | 0 .../fixture/{testnet => tmpnet}/local/node.go | 22 +- .../{testnet => tmpnet}/local/node_test.go | 0 utils/sampler/weighted_array.go | 4 +- utils/sampler/weighted_array_test.go | 41 +- utils/sampler/weighted_heap.go | 13 +- utils/sampler/weighted_heap_test.go | 35 +- utils/sampler/weighted_linear.go | 4 +- utils/sampler/weighted_linear_test.go | 41 +- utils/set/sampleable_set.go | 2 +- utils/set/set.go | 26 +- utils/set/set_test.go | 29 - utils/sorting.go | 39 +- utils/sorting_test.go | 30 +- version/compatibility.json | 3 +- version/constants.go | 2 +- vms/avm/block/executor/manager.go | 4 +- vms/avm/genesis.go | 4 +- vms/avm/genesis_test.go | 41 +- vms/avm/txs/initial_state.go | 4 +- vms/avm/txs/initial_state_test.go | 36 +- vms/avm/txs/operation.go | 8 +- vms/components/avax/transferables.go | 4 +- vms/components/avax/utxo_id.go | 13 +- vms/components/avax/utxo_id_test.go | 35 +- vms/platformvm/api/static_service.go | 21 +- vms/platformvm/api/static_service_test.go | 49 +- vms/platformvm/block/builder/builder.go | 20 +- vms/platformvm/block/builder/builder_test.go | 164 ++--- vms/platformvm/block/builder/helpers_test.go | 14 - vms/platformvm/block/executor/backend.go | 29 + vms/platformvm/block/executor/manager.go | 6 +- vms/platformvm/block/executor/manager_test.go | 15 + .../block/executor/proposal_block_test.go | 14 + .../block/executor/rejector_test.go | 1 + vms/platformvm/block/executor/verifier.go | 31 +- vms/platformvm/block/parse_test.go | 61 +- vms/platformvm/block/proposal_block.go | 28 +- vms/platformvm/block/proposal_block_test.go | 124 ++-- vms/platformvm/block/standard_block.go | 2 +- vms/platformvm/state/diff.go | 97 +-- vms/platformvm/state/diff_test.go | 164 ++--- vms/platformvm/state/mock_state.go | 90 --- vms/platformvm/state/staker.go | 4 +- vms/platformvm/state/staker_test.go | 6 +- vms/platformvm/state/state.go | 46 +- vms/platformvm/state/state_test.go | 2 +- vms/platformvm/txs/add_delegator_tx.go | 3 +- .../txs/add_permissionless_delegator_tx.go | 5 +- .../txs/add_permissionless_validator_tx.go | 3 +- vms/platformvm/txs/add_subnet_validator_tx.go | 3 +- vms/platformvm/txs/add_validator_tx.go | 3 +- .../txs/executor/staker_tx_verification.go | 170 +++-- .../txs/executor/standard_tx_executor.go | 56 +- .../txs/executor/tx_mempool_verifier.go | 27 +- vms/platformvm/txs/mempool/mempool.go | 2 +- vms/platformvm/txs/mock_scheduled_staker.go | 151 +++++ vms/platformvm/txs/mock_staker.go | 28 - vms/platformvm/txs/staker_tx.go | 8 +- vms/platformvm/validator_set_property_test.go | 2 +- vms/platformvm/warp/signature_test.go | 4 +- vms/platformvm/warp/validator.go | 4 +- vms/proposervm/batched_vm_test.go | 14 +- vms/proposervm/block.go | 133 ++-- vms/proposervm/block_test.go | 8 +- vms/proposervm/config.go | 32 + vms/proposervm/height_indexed_vm.go | 10 +- vms/proposervm/post_fork_block_test.go | 40 +- vms/proposervm/post_fork_option_test.go | 14 +- vms/proposervm/pre_fork_block.go | 10 +- vms/proposervm/pre_fork_block_test.go | 10 +- vms/proposervm/proposer/validators.go | 4 +- vms/proposervm/proposer/validators_test.go | 36 +- vms/proposervm/state_syncable_vm_test.go | 38 +- vms/proposervm/vm.go | 33 +- vms/proposervm/vm_regression_test.go | 15 +- vms/proposervm/vm_test.go | 170 ++--- x/archivedb/key_test.go | 8 +- x/merkledb/key.go | 13 +- x/merkledb/view_iterator.go | 4 +- x/sync/sync_test.go | 14 +- x/sync/workheap_test.go | 4 +- 162 files changed, 2865 insertions(+), 2315 deletions(-) rename .github/workflows/{test.e2e.persistent.yml => test.e2e.existing.yml} (63%) create mode 100644 network/p2p/network.go create mode 100644 network/p2p/network_test.go delete mode 100644 network/p2p/peers.go delete mode 100644 network/p2p/peers_test.go rename scripts/{build_testnetctl.sh => build_tmpnetctl.sh} (71%) create mode 100755 scripts/tests.e2e.existing.sh delete mode 100755 scripts/tests.e2e.persistent.sh delete mode 100644 tests/fixture/testnet/README.md create mode 100644 tests/fixture/tmpnet/README.md rename tests/fixture/{testnet => tmpnet}/cmd/main.go (86%) rename tests/fixture/{testnet => tmpnet}/common.go (98%) rename tests/fixture/{testnet => tmpnet}/config.go (99%) rename tests/fixture/{testnet => tmpnet}/interfaces.go (97%) rename tests/fixture/{testnet => tmpnet}/local/README.md (86%) rename tests/fixture/{testnet => tmpnet}/local/config.go (79%) rename tests/fixture/{testnet => tmpnet}/local/network.go (95%) rename tests/fixture/{testnet => tmpnet}/local/network_test.go (100%) rename tests/fixture/{testnet => tmpnet}/local/node.go (95%) rename tests/fixture/{testnet => tmpnet}/local/node_test.go (100%) create mode 100644 vms/platformvm/txs/mock_scheduled_staker.go create mode 100644 vms/proposervm/config.go diff --git a/.github/workflows/auto-generated-checker.yml b/.github/workflows/auto-generated-checker.yml index eb53ff4c5b83..cca52dfa382a 100644 --- a/.github/workflows/auto-generated-checker.yml +++ b/.github/workflows/auto-generated-checker.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - uses: bufbuild/buf-setup-action@v1.26.1 - shell: bash @@ -32,7 +32,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - shell: bash run: scripts/mock.gen.sh @@ -45,7 +45,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - shell: bash run: go mod tidy diff --git a/.github/workflows/build-linux-binaries.yml b/.github/workflows/build-linux-binaries.yml index dc1a358da550..7053379ef508 100644 --- a/.github/workflows/build-linux-binaries.yml +++ b/.github/workflows/build-linux-binaries.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - run: go version @@ -81,7 +81,7 @@ jobs: - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - run: go version diff --git a/.github/workflows/build-macos-release.yml b/.github/workflows/build-macos-release.yml index 3699fc0b5e37..c1a884aebf4f 100644 --- a/.github/workflows/build-macos-release.yml +++ b/.github/workflows/build-macos-release.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - run: go version diff --git a/.github/workflows/build-public-ami.yml b/.github/workflows/build-public-ami.yml index 1e483ccd7f6a..32c816efdd3d 100644 --- a/.github/workflows/build-public-ami.yml +++ b/.github/workflows/build-public-ami.yml @@ -19,7 +19,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - run: go version diff --git a/.github/workflows/build-ubuntu-amd64-release.yml b/.github/workflows/build-ubuntu-amd64-release.yml index bbb0cbd3f4a2..640cff56a53f 100644 --- a/.github/workflows/build-ubuntu-amd64-release.yml +++ b/.github/workflows/build-ubuntu-amd64-release.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - run: go version @@ -78,7 +78,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - run: go version diff --git a/.github/workflows/build-ubuntu-arm64-release.yml b/.github/workflows/build-ubuntu-arm64-release.yml index 8d1ba4a2ea2e..ac660b8cd678 100644 --- a/.github/workflows/build-ubuntu-arm64-release.yml +++ b/.github/workflows/build-ubuntu-arm64-release.yml @@ -18,7 +18,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - run: go version @@ -78,7 +78,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - run: go version diff --git a/.github/workflows/build-win-release.yml b/.github/workflows/build-win-release.yml index c84767d4c87d..d71725d124c2 100644 --- a/.github/workflows/build-win-release.yml +++ b/.github/workflows/build-win-release.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - run: go version diff --git a/.github/workflows/fuzz.yml b/.github/workflows/fuzz.yml index 7fa95a88e7f3..b0ede2b349b7 100644 --- a/.github/workflows/fuzz.yml +++ b/.github/workflows/fuzz.yml @@ -18,7 +18,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - name: Run fuzz tests shell: bash diff --git a/.github/workflows/static-analysis.yml b/.github/workflows/static-analysis.yml index 8510a693632c..81f488a55ef3 100644 --- a/.github/workflows/static-analysis.yml +++ b/.github/workflows/static-analysis.yml @@ -20,7 +20,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - name: Run static analysis tests shell: bash diff --git a/.github/workflows/test.e2e.persistent.yml b/.github/workflows/test.e2e.existing.yml similarity index 63% rename from .github/workflows/test.e2e.persistent.yml rename to .github/workflows/test.e2e.existing.yml index d3448904d4c5..c00a328610d1 100644 --- a/.github/workflows/test.e2e.persistent.yml +++ b/.github/workflows/test.e2e.existing.yml @@ -1,4 +1,4 @@ -name: Test e2e with persistent network +name: Test e2e with existing network on: push: @@ -15,7 +15,7 @@ permissions: contents: read jobs: - test_e2e_persistent: + test_e2e_existing: runs-on: ubuntu-latest steps: - name: Git checkout @@ -23,17 +23,17 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - name: Build the avalanchego binary shell: bash run: ./scripts/build.sh -r - - name: Run e2e tests with persistent network + - name: Run e2e tests with existing network shell: bash - run: E2E_SERIAL=1 ./scripts/tests.e2e.persistent.sh - - name: Upload testnet network dir + run: E2E_SERIAL=1 ./scripts/tests.e2e.existing.sh + - name: Upload tmpnet network dir uses: actions/upload-artifact@v3 if: always() with: - name: testnet-data - path: ~/.testnetctl/networks/1000 + name: tmpnet-data + path: ~/.tmpnet/networks/1000 diff --git a/.github/workflows/test.e2e.yml b/.github/workflows/test.e2e.yml index 4e7c70ad8a11..2f8e30156f72 100644 --- a/.github/workflows/test.e2e.yml +++ b/.github/workflows/test.e2e.yml @@ -23,7 +23,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - name: Build the avalanchego binary shell: bash @@ -31,9 +31,9 @@ jobs: - name: Run e2e tests shell: bash run: E2E_SERIAL=1 ./scripts/tests.e2e.sh - - name: Upload testnet network dir + - name: Upload tmpnet network dir uses: actions/upload-artifact@v3 if: always() with: - name: testnet-data - path: ~/.testnetctl/networks/1000 + name: tmpnet-data + path: ~/.tmpnet/networks/1000 diff --git a/.github/workflows/test.unit.yml b/.github/workflows/test.unit.yml index ae4cb85983c3..dc766f07484f 100644 --- a/.github/workflows/test.unit.yml +++ b/.github/workflows/test.unit.yml @@ -27,7 +27,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - name: Set timeout on Windows # Windows UT run slower and need a longer timeout shell: bash diff --git a/.github/workflows/test.upgrade.yml b/.github/workflows/test.upgrade.yml index e564a51f5630..8f8ec5fc35ed 100644 --- a/.github/workflows/test.upgrade.yml +++ b/.github/workflows/test.upgrade.yml @@ -23,19 +23,19 @@ jobs: - name: Set up Go uses: actions/setup-go@v3 with: - go-version: '~1.20.10' + go-version: '~1.20.12' check-latest: true - name: Build the avalanchego binary shell: bash run: ./scripts/build.sh - name: Run upgrade tests shell: bash - # 1.10.7 is the first version compatible with the testnet fixture by + # 1.10.7 is the first version compatible with the ephnet fixture by # virtue of writing a process context file on node start. run: ./scripts/tests.upgrade.sh 1.10.7 - - name: Upload testnet network dir + - name: Upload ephnet network dir uses: actions/upload-artifact@v3 if: always() with: - name: testnet-data - path: ~/.testnetctl/networks/1000 + name: ephnet-data + path: ~/.ephnet/networks/1000 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 968f72ef24fe..4abb6a4d82ff 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,7 +4,7 @@ To start developing on AvalancheGo, you'll need a few things installed. -- Golang version >= 1.20.8 +- Golang version >= 1.20.12 - gcc - g++ diff --git a/Dockerfile b/Dockerfile index f4e6c21441cf..21ab344ef4b0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ # README.md # go.mod # ============= Compilation Stage ================ -FROM golang:1.20.10-bullseye AS builder +FROM golang:1.20.12-bullseye AS builder WORKDIR /build # Copy and download avalanche dependencies using go mod diff --git a/README.md b/README.md index 7842615f35be..ac3cd62841a9 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ The minimum recommended hardware specification for nodes connected to Mainnet is If you plan to build AvalancheGo from source, you will also need the following software: -- [Go](https://golang.org/doc/install) version >= 1.20.10 +- [Go](https://golang.org/doc/install) version >= 1.20.12 - [gcc](https://gcc.gnu.org/) - g++ diff --git a/RELEASES.md b/RELEASES.md index 073214ac4424..a7c232d7674b 100644 --- a/RELEASES.md +++ b/RELEASES.md @@ -1,5 +1,77 @@ # Release Notes +## [v1.10.17](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.17) + +This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. + +The plugin version is unchanged at `30` and is compatible with versions `v1.10.15-v1.10.16`. + +### APIs + +- Added `avalanche_{chainID}_blks_build_accept_latency` metric +- Added `avalanche_{chainID}_blks_issued{source}` metric with sources: + - `pull_gossip` + - `push_gossip` + - `put_gossip` which is deprecated + - `built` + - `unknown` +- Added `avalanche_{chainID}_issuer_stake_sum` metric +- Added `avalanche_{chainID}_issuer_stake_count` metric + +### Configs + +- Added: + - `--consensus-frontier-poll-frequency` +- Removed: + - `--consensus-accepted-frontier-gossip-frequency` +- Deprecated: + - `--consensus-accepted-frontier-gossip-validator-size` + - `--consensus-accepted-frontier-gossip-non-validator-size` + - `--consensus-accepted-frontier-gossip-peer-size` + - Updated the default value to 1 to align with the change in default gossip frequency + - `--consensus-on-accept-gossip-validator-size` + - `--consensus-on-accept-gossip-non-validator-size` + - `--consensus-on-accept-gossip-peer-size` + +### Fixes + +- Fixed `duplicated operation on provided value` error when executing atomic operations after state syncing the C-chain +- Removed useage of atomic trie after commitment +- Fixed atomic trie root overwrite during state sync +- Prevented closure of `stdout` and `stderr` when shutting down the logger + +### What's Changed + +- Remove Banff check from mempool verifier by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2360 +- Document storage growth in readme by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2364 +- Add metric for duration between block timestamp and acceptance time by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2366 +- `vms/platformvm`: Remove unused `withMetrics` txheap by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2373 +- Move peerTracker from x/sync to network/p2p by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2356 +- Logging avoid closing standard outputs by @felipemadero in https://github.com/ava-labs/avalanchego/pull/2372 +- `vms/platformvm`: Adjust `Diff.Apply` signature by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2368 +- Add bls validator info to genesis by @felipemadero in https://github.com/ava-labs/avalanchego/pull/2371 +- Remove `engine.GetVM` by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2374 +- `vms/platformvm`: Consolidate `state` pkg mocks by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2370 +- Remove common bootstrapper by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2297 +- `vms/platformvm`: Move `toEngine` channel to mempool by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2333 +- `vms/avm`: Rename `states` pkg to `state` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2381 +- Implement generic bimap by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2383 +- Unexport RequestID from snowman engine by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2384 +- Add metric to track the stake weight of block providers by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2376 +- Add block source metrics to monitor gossip by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2386 +- Rename `D` to `Durango` by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2389 +- Replace periodic push accepted gossip with pull preference gossip for block discovery by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2367 +- MerkleDB Remove ID from Node to reduce size and removal channel creation. by @dboehm-avalabs in https://github.com/ava-labs/avalanchego/pull/2324 +- Remove method `CappedList` from `set.Set` by @danlaine in https://github.com/ava-labs/avalanchego/pull/2395 +- Periodically PullGossip only from connected validators by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2399 +- Update bootstrap IPs by @StephenButtolph in https://github.com/ava-labs/avalanchego/pull/2396 +- Rename `testnet` fixture to `tmpnet` by @marun in https://github.com/ava-labs/avalanchego/pull/2307 +- Add `p2p.Network` component by @joshua-kim in https://github.com/ava-labs/avalanchego/pull/2283 +- `vms/platformvm`: Move `GetRewardUTXOs`, `GetSubnets`, and `GetChains` to `State` interface by @dhrubabasu in https://github.com/ava-labs/avalanchego/pull/2402 +- Add more descriptive formatted error by @aaronbuchwald in https://github.com/ava-labs/avalanchego/pull/2403 + +**Full Changelog**: https://github.com/ava-labs/avalanchego/compare/v1.10.16...v1.10.17 + ## [v1.10.16](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.16) This version is backwards compatible to [v1.10.0](https://github.com/ava-labs/avalanchego/releases/tag/v1.10.0). It is optional, but encouraged. diff --git a/api/metrics/multi_gatherer.go b/api/metrics/multi_gatherer.go index ce9af54936be..e3c88778ad4e 100644 --- a/api/metrics/multi_gatherer.go +++ b/api/metrics/multi_gatherer.go @@ -13,6 +13,8 @@ import ( dto "github.com/prometheus/client_model/go" "golang.org/x/exp/slices" + + "github.com/ava-labs/avalanchego/utils" ) var ( @@ -91,7 +93,7 @@ func (g *multiGatherer) Register(namespace string, gatherer prometheus.Gatherer) } func sortMetrics(m []*dto.MetricFamily) { - slices.SortFunc(m, func(i, j *dto.MetricFamily) bool { - return *i.Name < *j.Name + slices.SortFunc(m, func(i, j *dto.MetricFamily) int { + return utils.Compare(*i.Name, *j.Name) }) } diff --git a/chains/atomic/state.go b/chains/atomic/state.go index cd7a3f2a0faa..402477299c21 100644 --- a/chains/atomic/state.go +++ b/chains/atomic/state.go @@ -6,17 +6,22 @@ package atomic import ( "bytes" "errors" + "fmt" + + "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/linkeddb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/set" ) -var errDuplicatedOperation = errors.New("duplicated operation on provided value") +var ( + errDuplicatePut = errors.New("duplicate put") + errDuplicateRemove = errors.New("duplicate remove") +) type dbElement struct { // Present indicates the value was removed before existing. @@ -86,7 +91,7 @@ func (s *state) SetValue(e *Element) error { } // This key was written twice, which is invalid - return errDuplicatedOperation + return fmt.Errorf("%w: Key=0x%x Value=0x%x", errDuplicatePut, e.Key, e.Value) } if err != database.ErrNotFound { // An unexpected error occurred, so we should propagate that error @@ -160,7 +165,7 @@ func (s *state) RemoveValue(key []byte) error { // Don't allow the removal of something that was already removed. if !value.Present { - return errDuplicatedOperation + return fmt.Errorf("%w: Key=0x%x", errDuplicateRemove, key) } // Remove [key] from the indexDB for each trait that has indexed this key. @@ -203,7 +208,7 @@ func (s *state) getKeys(traits [][]byte, startTrait, startKey []byte, limit int) lastKey := startKey // Iterate over the traits in order appending all of the keys that possess // the given [traits]. - utils.SortBytes(traits) + slices.SortFunc(traits, bytes.Compare) for _, trait := range traits { switch bytes.Compare(trait, startTrait) { case -1: diff --git a/chains/manager.go b/chains/manager.go index a1158e67716c..86da6811c41a 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -769,12 +769,14 @@ func (m *manager) createAvalancheChain( // using. var vmWrappingProposerVM block.ChainVM = proposervm.New( vmWrappedInsideProposerVM, - m.ApricotPhase4Time, - m.ApricotPhase4MinPChainHeight, - minBlockDelay, - numHistoricalBlocks, - m.stakingSigner, - m.stakingCert, + proposervm.Config{ + ActivationTime: m.ApricotPhase4Time, + MinimumPChainHeight: m.ApricotPhase4MinPChainHeight, + MinBlkDelay: minBlockDelay, + NumHistoricalBlocks: numHistoricalBlocks, + StakingLeafSigner: m.stakingSigner, + StakingCertLeaf: m.stakingCert, + }, ) if m.MeterVMEnabled { @@ -859,13 +861,14 @@ func (m *manager) createAvalancheChain( // Create engine, bootstrapper and state-syncer in this order, // to make sure start callbacks are duly initialized snowmanEngineConfig := smeng.Config{ - Ctx: ctx, - AllGetsServer: snowGetHandler, - VM: vmWrappingProposerVM, - Sender: snowmanMessageSender, - Validators: vdrs, - Params: consensusParams, - Consensus: snowmanConsensus, + Ctx: ctx, + AllGetsServer: snowGetHandler, + VM: vmWrappingProposerVM, + Sender: snowmanMessageSender, + Validators: vdrs, + ConnectedValidators: connectedValidators, + Params: consensusParams, + Consensus: snowmanConsensus, } snowmanEngine, err := smeng.New(snowmanEngineConfig) if err != nil { @@ -1111,12 +1114,14 @@ func (m *manager) createSnowmanChain( vm = proposervm.New( vm, - m.ApricotPhase4Time, - m.ApricotPhase4MinPChainHeight, - minBlockDelay, - numHistoricalBlocks, - m.stakingSigner, - m.stakingCert, + proposervm.Config{ + ActivationTime: m.ApricotPhase4Time, + MinimumPChainHeight: m.ApricotPhase4MinPChainHeight, + MinBlkDelay: minBlockDelay, + NumHistoricalBlocks: numHistoricalBlocks, + StakingLeafSigner: m.stakingSigner, + StakingCertLeaf: m.stakingCert, + }, ) if m.MeterVMEnabled { @@ -1201,14 +1206,15 @@ func (m *manager) createSnowmanChain( // Create engine, bootstrapper and state-syncer in this order, // to make sure start callbacks are duly initialized engineConfig := smeng.Config{ - Ctx: ctx, - AllGetsServer: snowGetHandler, - VM: vm, - Sender: messageSender, - Validators: vdrs, - Params: consensusParams, - Consensus: consensus, - PartialSync: m.PartialSyncPrimaryNetwork && ctx.ChainID == constants.PlatformChainID, + Ctx: ctx, + AllGetsServer: snowGetHandler, + VM: vm, + Sender: messageSender, + Validators: vdrs, + ConnectedValidators: connectedValidators, + Params: consensusParams, + Consensus: consensus, + PartialSync: m.PartialSyncPrimaryNetwork && ctx.ChainID == constants.PlatformChainID, } engine, err := smeng.New(engineConfig) if err != nil { diff --git a/codec/reflectcodec/type_codec.go b/codec/reflectcodec/type_codec.go index 9f9037f43d4e..6f18f8500272 100644 --- a/codec/reflectcodec/type_codec.go +++ b/codec/reflectcodec/type_codec.go @@ -490,10 +490,10 @@ func (c *genericCodec) marshal( endOffset = p.Offset } - slices.SortFunc(sortedKeys, func(a, b keyTuple) bool { + slices.SortFunc(sortedKeys, func(a, b keyTuple) int { aBytes := p.Bytes[a.startIndex:a.endIndex] bBytes := p.Bytes[b.startIndex:b.endIndex] - return bytes.Compare(aBytes, bBytes) < 0 + return bytes.Compare(aBytes, bBytes) }) allKeyBytes := slices.Clone(p.Bytes[startOffset:p.Offset]) diff --git a/genesis/bootstrappers.json b/genesis/bootstrappers.json index 62784fc7b4da..341828bc7c23 100644 --- a/genesis/bootstrappers.json +++ b/genesis/bootstrappers.json @@ -2,99 +2,99 @@ "mainnet": [ { "id": "NodeID-A6onFGyJjA37EZ7kYHANMR1PFRT8NmXrF", - "ip": "54.94.43.49:9651" + "ip": "54.232.137.108:9651" }, { "id": "NodeID-6SwnPJLH8cWfrJ162JjZekbmzaFpjPcf", - "ip": "52.79.47.77:9651" + "ip": "13.124.187.98:9651" }, { "id": "NodeID-GSgaA47umS1px2ohVjodW9621Ks63xDxD", - "ip": "18.229.206.191:9651" + "ip": "54.232.142.167:9651" }, { "id": "NodeID-BQEo5Fy1FRKLbX51ejqDd14cuSXJKArH2", - "ip": "3.34.221.73:9651" + "ip": "3.39.67.183:9651" }, { "id": "NodeID-Drv1Qh7iJvW3zGBBeRnYfCzk56VCRM2GQ", - "ip": "13.244.155.170:9651" + "ip": "13.245.185.253:9651" }, { "id": "NodeID-DAtCoXfLT6Y83dgJ7FmQg8eR53hz37J79", - "ip": "13.244.47.224:9651" + "ip": "13.246.169.11:9651" }, { "id": "NodeID-FGRoKnyYKFWYFMb6Xbocf4hKuyCBENgWM", - "ip": "122.248.200.212:9651" + "ip": "13.251.82.39:9651" }, { "id": "NodeID-Dw7tuwxpAmcpvVGp9JzaHAR3REPoJ8f2R", - "ip": "52.30.9.211:9651" + "ip": "34.250.50.224:9651" }, { "id": "NodeID-4kCLS16Wy73nt1Zm54jFZsL7Msrv3UCeJ", - "ip": "122.248.199.127:9651" + "ip": "18.142.247.237:9651" }, { "id": "NodeID-9T7NXBFpp8LWCyc58YdKNoowDipdVKAWz", - "ip": "18.202.190.40:9651" + "ip": "34.252.106.116:9651" }, { "id": "NodeID-6ghBh6yof5ouMCya2n9fHzhpWouiZFVVj", - "ip": "15.206.182.45:9651" + "ip": "43.205.156.229:9651" }, { "id": "NodeID-HiFv1DpKXkAAfJ1NHWVqQoojjznibZXHP", - "ip": "15.207.11.193:9651" + "ip": "13.233.176.118:9651" }, { "id": "NodeID-Fv3t2shrpkmvLnvNzcv1rqRKbDAYFnUor", - "ip": "44.226.118.72:9651" + "ip": "35.164.160.193:9651" }, { "id": "NodeID-AaxT2P4uuPAHb7vAD8mNvjQ3jgyaV7tu9", - "ip": "54.185.87.50:9651" + "ip": "54.185.77.104:9651" }, { "id": "NodeID-kZNuQMHhydefgnwjYX1fhHMpRNAs9my1", - "ip": "18.158.15.12:9651" + "ip": "3.74.3.14:9651" }, { "id": "NodeID-A7GwTSd47AcDVqpTVj7YtxtjHREM33EJw", - "ip": "3.21.38.33:9651" + "ip": "3.135.107.20:9651" }, { "id": "NodeID-Hr78Fy8uDYiRYocRYHXp4eLCYeb8x5UuM", - "ip": "54.93.182.129:9651" + "ip": "3.77.28.168:9651" }, { "id": "NodeID-9CkG9MBNavnw7EVSRsuFr7ws9gascDQy3", - "ip": "3.128.138.36:9651" + "ip": "18.216.88.69:9651" }, { "id": "NodeID-A8jypu63CWp76STwKdqP6e9hjL675kdiG", - "ip": "3.104.107.241:9651" + "ip": "3.24.26.175:9651" }, { "id": "NodeID-HsBEx3L71EHWSXaE6gvk2VsNntFEZsxqc", - "ip": "3.106.25.139:9651" + "ip": "52.64.55.185:9651" }, { "id": "NodeID-Nr584bLpGgbCUbZFSBaBz3Xum5wpca9Ym", - "ip": "18.162.129.129:9651" + "ip": "16.162.27.145:9651" }, { "id": "NodeID-QKGoUvqcgormCoMj6yPw9isY7DX9H4mdd", - "ip": "18.162.161.230:9651" + "ip": "18.163.169.191:9651" }, { "id": "NodeID-HCw7S2TVbFPDWNBo1GnFWqJ47f9rDJtt1", - "ip": "52.47.181.114:9651" + "ip": "13.39.184.151:9651" }, { "id": "NodeID-FYv1Lb29SqMpywYXH7yNkcFAzRF2jvm3K", - "ip": "15.188.9.42:9651" + "ip": "13.36.28.133:9651" } ], "fuji": [ diff --git a/genesis/config.go b/genesis/config.go index a951e9e078fc..cedaf90127a9 100644 --- a/genesis/config.go +++ b/genesis/config.go @@ -53,9 +53,11 @@ func (a Allocation) Unparse(networkID uint32) (UnparsedAllocation, error) { return ua, err } -func (a Allocation) Less(other Allocation) bool { - return a.InitialAmount < other.InitialAmount || - (a.InitialAmount == other.InitialAmount && a.AVAXAddr.Less(other.AVAXAddr)) +func (a Allocation) Compare(other Allocation) int { + if amountCmp := utils.Compare(a.InitialAmount, other.InitialAmount); amountCmp != 0 { + return amountCmp + } + return a.AVAXAddr.Compare(other.AVAXAddr) } type Staker struct { diff --git a/genesis/config_test.go b/genesis/config_test.go index 455045dad70b..d83815cebfba 100644 --- a/genesis/config_test.go +++ b/genesis/config_test.go @@ -11,56 +11,43 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -func TestAllocationLess(t *testing.T) { +func TestAllocationCompare(t *testing.T) { type test struct { name string alloc1 Allocation alloc2 Allocation - expected bool + expected int } tests := []test{ { name: "equal", alloc1: Allocation{}, alloc2: Allocation{}, - expected: false, + expected: 0, }, { - name: "first initial amount smaller", + name: "initial amount smaller", alloc1: Allocation{}, alloc2: Allocation{ InitialAmount: 1, }, - expected: true, + expected: -1, }, { - name: "first initial amount larger", - alloc1: Allocation{ - InitialAmount: 1, - }, - alloc2: Allocation{}, - expected: false, - }, - { - name: "first bytes smaller", + name: "bytes smaller", alloc1: Allocation{}, alloc2: Allocation{ AVAXAddr: ids.ShortID{1}, }, - expected: true, - }, - { - name: "first bytes larger", - alloc1: Allocation{ - AVAXAddr: ids.ShortID{1}, - }, - alloc2: Allocation{}, - expected: false, + expected: -1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, tt.alloc1.Less(tt.alloc2)) + require := require.New(t) + + require.Equal(tt.expected, tt.alloc1.Compare(tt.alloc2)) + require.Equal(-tt.expected, tt.alloc2.Compare(tt.alloc1)) }) } } diff --git a/go.mod b/go.mod index a37213c95efc..af74594875e6 100644 --- a/go.mod +++ b/go.mod @@ -32,8 +32,8 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/mr-tron/base58 v1.2.0 github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d - github.com/onsi/ginkgo/v2 v2.4.0 - github.com/onsi/gomega v1.24.0 + github.com/onsi/ginkgo/v2 v2.13.1 + github.com/onsi/gomega v1.29.0 github.com/pires/go-proxyproto v0.6.2 github.com/prometheus/client_golang v1.14.0 github.com/prometheus/client_model v0.3.0 @@ -98,10 +98,11 @@ require ( github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-sourcemap/sourcemap v2.1.3+incompatible // indirect github.com/go-stack/stack v1.8.1 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/pprof v0.0.0-20230207041349-798e818bf904 // indirect github.com/google/uuid v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.12.0 // indirect @@ -147,6 +148,7 @@ require ( go.uber.org/multierr v1.10.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.16.0 // indirect google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 65a5481d671e..2ee782aa3334 100644 --- a/go.sum +++ b/go.sum @@ -233,6 +233,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= @@ -298,8 +300,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -480,16 +483,16 @@ github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vv github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= +github.com/onsi/ginkgo/v2 v2.13.1 h1:LNGfMbR2OVGBfXjvRZIZ2YCTQdGKtPLvuI1rMCCj3OU= +github.com/onsi/ginkgo/v2 v2.13.1/go.mod h1:XStQ8QcGwLyF4HdfcZB8SFOS/MWCgDuXMSBe6zrvLgM= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= -github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.29.0 h1:KIA/t2t5UBzoirT4H9tsML45GEbo3ouUnBHsCfD2tVg= +github.com/onsi/gomega v1.29.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -586,6 +589,7 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= @@ -739,6 +743,7 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -970,6 +975,8 @@ golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.16.0 h1:GO788SKMRunPIBCXiQyo2AaexLstOrVhuAL5YwsckQM= +golang.org/x/tools v0.16.0/go.mod h1:kYVVN6I1mBNoB1OX+noeBjbRk4IUEPa7JJ+TJMEooJ0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/ids/id.go b/ids/id.go index 68148018b078..6e5aed7876fd 100644 --- a/ids/id.go +++ b/ids/id.go @@ -145,6 +145,6 @@ func (id ID) MarshalText() ([]byte, error) { return []byte(id.String()), nil } -func (id ID) Less(other ID) bool { - return bytes.Compare(id[:], other[:]) < 0 +func (id ID) Compare(other ID) int { + return bytes.Compare(id[:], other[:]) } diff --git a/ids/id_test.go b/ids/id_test.go index 3424b17633e6..3197e7843697 100644 --- a/ids/id_test.go +++ b/ids/id_test.go @@ -5,6 +5,7 @@ package ids import ( "encoding/json" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -200,26 +201,34 @@ func TestIDMapMarshalling(t *testing.T) { require.Equal(originalMap, unmarshalledMap) } -func TestIDLess(t *testing.T) { - require := require.New(t) +func TestIDCompare(t *testing.T) { + tests := []struct { + a ID + b ID + expected int + }{ + { + a: ID{1}, + b: ID{0}, + expected: 1, + }, + { + a: ID{1}, + b: ID{1}, + expected: 0, + }, + { + a: ID{1, 0}, + b: ID{1, 2}, + expected: -1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a, test.b, test.expected), func(t *testing.T) { + require := require.New(t) - id1 := ID{} - id2 := ID{} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = ID{1} - id2 = ID{0} - require.False(id1.Less(id2)) - require.True(id2.Less(id1)) - - id1 = ID{1} - id2 = ID{1} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = ID{1, 0} - id2 = ID{1, 2} - require.True(id1.Less(id2)) - require.False(id2.Less(id1)) + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) + } } diff --git a/ids/node_id.go b/ids/node_id.go index 57d0c0b5bc69..a20b00d24462 100644 --- a/ids/node_id.go +++ b/ids/node_id.go @@ -66,8 +66,8 @@ func (id *NodeID) UnmarshalText(text []byte) error { return id.UnmarshalJSON(text) } -func (id NodeID) Less(other NodeID) bool { - return bytes.Compare(id[:], other[:]) == -1 +func (id NodeID) Compare(other NodeID) int { + return bytes.Compare(id[:], other[:]) } // ToNodeID attempt to convert a byte slice into a node id diff --git a/ids/node_id_test.go b/ids/node_id_test.go index b92fb6e19053..f3c11a452d24 100644 --- a/ids/node_id_test.go +++ b/ids/node_id_test.go @@ -5,6 +5,7 @@ package ids import ( "encoding/json" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -174,26 +175,34 @@ func TestNodeIDMapMarshalling(t *testing.T) { require.Equal(originalMap, unmarshalledMap) } -func TestNodeIDLess(t *testing.T) { - require := require.New(t) +func TestNodeIDCompare(t *testing.T) { + tests := []struct { + a NodeID + b NodeID + expected int + }{ + { + a: NodeID{1}, + b: NodeID{0}, + expected: 1, + }, + { + a: NodeID{1}, + b: NodeID{1}, + expected: 0, + }, + { + a: NodeID{1, 0}, + b: NodeID{1, 2}, + expected: -1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a, test.b, test.expected), func(t *testing.T) { + require := require.New(t) - id1 := NodeID{} - id2 := NodeID{} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = NodeID{1} - id2 = NodeID{} - require.False(id1.Less(id2)) - require.True(id2.Less(id1)) - - id1 = NodeID{1} - id2 = NodeID{1} - require.False(id1.Less(id2)) - require.False(id2.Less(id1)) - - id1 = NodeID{1} - id2 = NodeID{1, 2} - require.True(id1.Less(id2)) - require.False(id2.Less(id1)) + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) + } } diff --git a/ids/short.go b/ids/short.go index 25b96f1755b0..b19e0420690d 100644 --- a/ids/short.go +++ b/ids/short.go @@ -110,8 +110,8 @@ func (id ShortID) MarshalText() ([]byte, error) { return []byte(id.String()), nil } -func (id ShortID) Less(other ShortID) bool { - return bytes.Compare(id[:], other[:]) == -1 +func (id ShortID) Compare(other ShortID) int { + return bytes.Compare(id[:], other[:]) } // ShortIDsToStrings converts an array of shortIDs to an array of their string diff --git a/network/p2p/client.go b/network/p2p/client.go index 97de07a447ba..6f2e35c26896 100644 --- a/network/p2p/client.go +++ b/network/p2p/client.go @@ -41,10 +41,9 @@ type CrossChainAppResponseCallback func( type Client struct { handlerID uint64 handlerPrefix []byte - router *Router + router *router sender common.AppSender - // nodeSampler is used to select nodes to route AppRequestAny to - nodeSampler NodeSampler + options *clientOptions } // AppRequestAny issues an AppRequest to an arbitrary node decided by Client. @@ -55,7 +54,7 @@ func (c *Client) AppRequestAny( appRequestBytes []byte, onResponse AppResponseCallback, ) error { - sampled := c.nodeSampler.Sample(ctx, 1) + sampled := c.options.nodeSampler.Sample(ctx, 1) if len(sampled) != 1 { return ErrNoPeers } diff --git a/network/p2p/gossip/gossip_test.go b/network/p2p/gossip/gossip_test.go index eb4b23ecd9c8..d30fac0008e7 100644 --- a/network/p2p/gossip/gossip_test.go +++ b/network/p2p/gossip/gossip_test.go @@ -13,8 +13,6 @@ import ( "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/snow/engine/common" @@ -117,10 +115,9 @@ func TestGossiperGossip(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - responseSender := common.NewMockSender(ctrl) - responseRouter := p2p.NewRouter(logging.NoLog{}, responseSender, prometheus.NewRegistry(), "") + responseSender := &common.SenderTest{} + responseNetwork := p2p.NewNetwork(logging.NoLog{}, responseSender, prometheus.NewRegistry(), "") responseBloom, err := NewBloomFilter(1000, 0.01) require.NoError(err) responseSet := testSet{ @@ -130,31 +127,30 @@ func TestGossiperGossip(t *testing.T) { for _, item := range tt.responder { require.NoError(responseSet.Add(item)) } - peers := &p2p.Peers{} - require.NoError(peers.Connected(context.Background(), ids.EmptyNodeID, nil)) handler, err := NewHandler[*testTx](responseSet, tt.config, prometheus.NewRegistry()) require.NoError(err) - _, err = responseRouter.RegisterAppProtocol(0x0, handler, peers) + _, err = responseNetwork.NewAppProtocol(0x0, handler) require.NoError(err) - requestSender := common.NewMockSender(ctrl) - requestRouter := p2p.NewRouter(logging.NoLog{}, requestSender, prometheus.NewRegistry(), "") - - gossiped := make(chan struct{}) - requestSender.EXPECT().SendAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) { + requestSender := &common.SenderTest{ + SendAppRequestF: func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) error { go func() { - require.NoError(responseRouter.AppRequest(ctx, ids.EmptyNodeID, requestID, time.Time{}, request)) + require.NoError(responseNetwork.AppRequest(ctx, ids.EmptyNodeID, requestID, time.Time{}, request)) }() - }).AnyTimes() + return nil + }, + } - responseSender.EXPECT(). - SendAppResponse(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) { - require.NoError(requestRouter.AppResponse(ctx, nodeID, requestID, appResponseBytes)) - close(gossiped) - }).AnyTimes() + requestNetwork := p2p.NewNetwork(logging.NoLog{}, requestSender, prometheus.NewRegistry(), "") + require.NoError(requestNetwork.Connected(context.Background(), ids.EmptyNodeID, nil)) + + gossiped := make(chan struct{}) + responseSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, appResponseBytes []byte) error { + require.NoError(requestNetwork.AppResponse(ctx, nodeID, requestID, appResponseBytes)) + close(gossiped) + return nil + } bloom, err := NewBloomFilter(1000, 0.01) require.NoError(err) @@ -166,7 +162,7 @@ func TestGossiperGossip(t *testing.T) { require.NoError(requestSet.Add(item)) } - requestClient, err := requestRouter.RegisterAppProtocol(0x0, nil, peers) + requestClient, err := requestNetwork.NewAppProtocol(0x0, nil) require.NoError(err) config := Config{ diff --git a/network/p2p/network.go b/network/p2p/network.go new file mode 100644 index 000000000000..444c2e4b9408 --- /dev/null +++ b/network/p2p/network.go @@ -0,0 +1,188 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "encoding/binary" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" +) + +var ( + _ validators.Connector = (*Network)(nil) + _ common.AppHandler = (*Network)(nil) + _ NodeSampler = (*peerSampler)(nil) +) + +// ClientOption configures Client +type ClientOption interface { + apply(options *clientOptions) +} + +type clientOptionFunc func(options *clientOptions) + +func (o clientOptionFunc) apply(options *clientOptions) { + o(options) +} + +// WithValidatorSampling configures Client.AppRequestAny to sample validators +func WithValidatorSampling(validators *Validators) ClientOption { + return clientOptionFunc(func(options *clientOptions) { + options.nodeSampler = validators + }) +} + +// clientOptions holds client-configurable values +type clientOptions struct { + // nodeSampler is used to select nodes to route Client.AppRequestAny to + nodeSampler NodeSampler +} + +// NewNetwork returns an instance of Network +func NewNetwork( + log logging.Logger, + sender common.AppSender, + metrics prometheus.Registerer, + namespace string, +) *Network { + return &Network{ + Peers: &Peers{}, + log: log, + sender: sender, + metrics: metrics, + namespace: namespace, + router: newRouter(log, sender, metrics, namespace), + } +} + +// Network exposes networking state and supports building p2p application +// protocols +type Network struct { + Peers *Peers + + log logging.Logger + sender common.AppSender + metrics prometheus.Registerer + namespace string + + router *router +} + +func (n *Network) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { + return n.router.AppRequest(ctx, nodeID, requestID, deadline, request) +} + +func (n *Network) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { + return n.router.AppResponse(ctx, nodeID, requestID, response) +} + +func (n *Network) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32) error { + return n.router.AppRequestFailed(ctx, nodeID, requestID) +} + +func (n *Network) AppGossip(ctx context.Context, nodeID ids.NodeID, msg []byte) error { + return n.router.AppGossip(ctx, nodeID, msg) +} + +func (n *Network) CrossChainAppRequest(ctx context.Context, chainID ids.ID, requestID uint32, deadline time.Time, request []byte) error { + return n.router.CrossChainAppRequest(ctx, chainID, requestID, deadline, request) +} + +func (n *Network) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { + return n.router.CrossChainAppResponse(ctx, chainID, requestID, response) +} + +func (n *Network) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32) error { + return n.router.CrossChainAppRequestFailed(ctx, chainID, requestID) +} + +func (n *Network) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { + n.Peers.add(nodeID) + return nil +} + +func (n *Network) Disconnected(_ context.Context, nodeID ids.NodeID) error { + n.Peers.remove(nodeID) + return nil +} + +// NewAppProtocol reserves an identifier for an application protocol handler and +// returns a Client that can be used to send messages for the corresponding +// protocol. +func (n *Network) NewAppProtocol(handlerID uint64, handler Handler, options ...ClientOption) (*Client, error) { + if err := n.router.addHandler(handlerID, handler); err != nil { + return nil, err + } + + client := &Client{ + handlerID: handlerID, + handlerPrefix: binary.AppendUvarint(nil, handlerID), + sender: n.sender, + router: n.router, + options: &clientOptions{ + nodeSampler: &peerSampler{ + peers: n.Peers, + }, + }, + } + + for _, option := range options { + option.apply(client.options) + } + + return client, nil +} + +// Peers contains metadata about the current set of connected peers +type Peers struct { + lock sync.RWMutex + set set.SampleableSet[ids.NodeID] +} + +func (p *Peers) add(nodeID ids.NodeID) { + p.lock.Lock() + defer p.lock.Unlock() + + p.set.Add(nodeID) +} + +func (p *Peers) remove(nodeID ids.NodeID) { + p.lock.Lock() + defer p.lock.Unlock() + + p.set.Remove(nodeID) +} + +func (p *Peers) has(nodeID ids.NodeID) bool { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.set.Contains(nodeID) +} + +// Sample returns a pseudo-random sample of up to limit Peers +func (p *Peers) Sample(limit int) []ids.NodeID { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.set.Sample(limit) +} + +type peerSampler struct { + peers *Peers +} + +func (p peerSampler) Sample(_ context.Context, limit int) []ids.NodeID { + return p.peers.Sample(limit) +} diff --git a/network/p2p/network_test.go b/network/p2p/network_test.go new file mode 100644 index 000000000000..590858a0c467 --- /dev/null +++ b/network/p2p/network_test.go @@ -0,0 +1,596 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p2p + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/stretchr/testify/require" + + "go.uber.org/mock/gomock" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p/mocks" + "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" +) + +func TestAppRequestResponse(t *testing.T) { + handlerID := uint64(0x0) + request := []byte("request") + response := []byte("response") + nodeID := ids.GenerateTestNodeID() + chainID := ids.GenerateTestID() + + ctxKey := new(string) + ctxVal := new(string) + *ctxKey = "foo" + *ctxVal = "bar" + + tests := []struct { + name string + requestFunc func(t *testing.T, network *Network, client *Client, sender *common.SenderTest, handler *mocks.MockHandler, wg *sync.WaitGroup) + }{ + { + name: "app request", + requestFunc: func(t *testing.T, network *Network, client *Client, sender *common.SenderTest, handler *mocks.MockHandler, wg *sync.WaitGroup) { + sender.SendAppRequestF = func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) error { + for range nodeIDs { + go func() { + require.NoError(t, network.AppRequest(ctx, nodeID, requestID, time.Time{}, request)) + }() + } + + return nil + } + sender.SendAppResponseF = func(ctx context.Context, _ ids.NodeID, requestID uint32, response []byte) error { + go func() { + ctx = context.WithValue(ctx, ctxKey, ctxVal) + require.NoError(t, network.AppResponse(ctx, nodeID, requestID, response)) + }() + + return nil + } + handler.EXPECT(). + AppRequest(context.Background(), nodeID, gomock.Any(), request). + DoAndReturn(func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { + return response, nil + }) + + callback := func(ctx context.Context, actualNodeID ids.NodeID, actualResponse []byte, err error) { + defer wg.Done() + + require.NoError(t, err) + require.Equal(t, ctxVal, ctx.Value(ctxKey)) + require.Equal(t, nodeID, actualNodeID) + require.Equal(t, response, actualResponse) + } + + require.NoError(t, client.AppRequestAny(context.Background(), request, callback)) + }, + }, + { + name: "app request failed", + requestFunc: func(t *testing.T, network *Network, client *Client, sender *common.SenderTest, handler *mocks.MockHandler, wg *sync.WaitGroup) { + sender.SendAppRequestF = func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) error { + for range nodeIDs { + go func() { + require.NoError(t, network.AppRequestFailed(ctx, nodeID, requestID)) + }() + } + + return nil + } + + callback := func(_ context.Context, actualNodeID ids.NodeID, actualResponse []byte, err error) { + defer wg.Done() + + require.ErrorIs(t, err, ErrAppRequestFailed) + require.Equal(t, nodeID, actualNodeID) + require.Nil(t, actualResponse) + } + + require.NoError(t, client.AppRequest(context.Background(), set.Of(nodeID), request, callback)) + }, + }, + { + name: "cross-chain app request", + requestFunc: func(t *testing.T, network *Network, client *Client, sender *common.SenderTest, handler *mocks.MockHandler, wg *sync.WaitGroup) { + chainID := ids.GenerateTestID() + sender.SendCrossChainAppRequestF = func(ctx context.Context, chainID ids.ID, requestID uint32, request []byte) { + go func() { + require.NoError(t, network.CrossChainAppRequest(ctx, chainID, requestID, time.Time{}, request)) + }() + } + sender.SendCrossChainAppResponseF = func(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) { + go func() { + ctx = context.WithValue(ctx, ctxKey, ctxVal) + require.NoError(t, network.CrossChainAppResponse(ctx, chainID, requestID, response)) + }() + } + handler.EXPECT(). + CrossChainAppRequest(context.Background(), chainID, gomock.Any(), request). + DoAndReturn(func(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { + return response, nil + }) + + callback := func(ctx context.Context, actualChainID ids.ID, actualResponse []byte, err error) { + defer wg.Done() + require.NoError(t, err) + require.Equal(t, ctxVal, ctx.Value(ctxKey)) + require.Equal(t, chainID, actualChainID) + require.Equal(t, response, actualResponse) + } + + require.NoError(t, client.CrossChainAppRequest(context.Background(), chainID, request, callback)) + }, + }, + { + name: "cross-chain app request failed", + requestFunc: func(t *testing.T, network *Network, client *Client, sender *common.SenderTest, handler *mocks.MockHandler, wg *sync.WaitGroup) { + sender.SendCrossChainAppRequestF = func(ctx context.Context, chainID ids.ID, requestID uint32, request []byte) { + go func() { + require.NoError(t, network.CrossChainAppRequestFailed(ctx, chainID, requestID)) + }() + } + + callback := func(_ context.Context, actualChainID ids.ID, actualResponse []byte, err error) { + defer wg.Done() + + require.ErrorIs(t, err, ErrAppRequestFailed) + require.Equal(t, chainID, actualChainID) + require.Nil(t, actualResponse) + } + + require.NoError(t, client.CrossChainAppRequest(context.Background(), chainID, request, callback)) + }, + }, + { + name: "app gossip", + requestFunc: func(t *testing.T, network *Network, client *Client, sender *common.SenderTest, handler *mocks.MockHandler, wg *sync.WaitGroup) { + sender.SendAppGossipF = func(ctx context.Context, gossip []byte) error { + go func() { + require.NoError(t, network.AppGossip(ctx, nodeID, gossip)) + }() + + return nil + } + handler.EXPECT(). + AppGossip(context.Background(), nodeID, request). + DoAndReturn(func(context.Context, ids.NodeID, []byte) error { + defer wg.Done() + return nil + }) + + require.NoError(t, client.AppGossip(context.Background(), request)) + }, + }, + { + name: "app gossip specific", + requestFunc: func(t *testing.T, network *Network, client *Client, sender *common.SenderTest, handler *mocks.MockHandler, wg *sync.WaitGroup) { + sender.SendAppGossipSpecificF = func(ctx context.Context, nodeIDs set.Set[ids.NodeID], bytes []byte) error { + for n := range nodeIDs { + nodeID := n + go func() { + require.NoError(t, network.AppGossip(ctx, nodeID, bytes)) + }() + } + + return nil + } + handler.EXPECT(). + AppGossip(context.Background(), nodeID, request). + DoAndReturn(func(context.Context, ids.NodeID, []byte) error { + defer wg.Done() + return nil + }) + + require.NoError(t, client.AppGossipSpecific(context.Background(), set.Of(nodeID), request)) + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + sender := &common.SenderTest{} + handler := mocks.NewMockHandler(ctrl) + n := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + require.NoError(n.Connected(context.Background(), nodeID, nil)) + client, err := n.NewAppProtocol(handlerID, handler) + require.NoError(err) + + wg := &sync.WaitGroup{} + wg.Add(1) + tt.requestFunc(t, n, client, sender, handler, wg) + wg.Wait() + }) + } +} + +func TestNetworkDropMessage(t *testing.T) { + unregistered := byte(0x0) + + tests := []struct { + name string + requestFunc func(network *Network) error + err error + }{ + { + name: "drop unregistered app request message", + requestFunc: func(network *Network) error { + return network.AppRequest(context.Background(), ids.GenerateTestNodeID(), 0, time.Time{}, []byte{unregistered}) + }, + err: nil, + }, + { + name: "drop empty app request message", + requestFunc: func(network *Network) error { + return network.AppRequest(context.Background(), ids.GenerateTestNodeID(), 0, time.Time{}, []byte{}) + }, + err: nil, + }, + { + name: "drop unregistered cross-chain app request message", + requestFunc: func(network *Network) error { + return network.CrossChainAppRequest(context.Background(), ids.GenerateTestID(), 0, time.Time{}, []byte{unregistered}) + }, + err: nil, + }, + { + name: "drop empty cross-chain app request message", + requestFunc: func(network *Network) error { + return network.CrossChainAppRequest(context.Background(), ids.GenerateTestID(), 0, time.Time{}, []byte{}) + }, + err: nil, + }, + { + name: "drop unregistered gossip message", + requestFunc: func(network *Network) error { + return network.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte{unregistered}) + }, + err: nil, + }, + { + name: "drop empty gossip message", + requestFunc: func(network *Network) error { + return network.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte{}) + }, + err: nil, + }, + { + name: "drop unrequested app request failed", + requestFunc: func(network *Network) error { + return network.AppRequestFailed(context.Background(), ids.GenerateTestNodeID(), 0) + }, + err: ErrUnrequestedResponse, + }, + { + name: "drop unrequested app response", + requestFunc: func(network *Network) error { + return network.AppResponse(context.Background(), ids.GenerateTestNodeID(), 0, nil) + }, + err: ErrUnrequestedResponse, + }, + { + name: "drop unrequested cross-chain request failed", + requestFunc: func(network *Network) error { + return network.CrossChainAppRequestFailed(context.Background(), ids.GenerateTestID(), 0) + }, + err: ErrUnrequestedResponse, + }, + { + name: "drop unrequested cross-chain response", + requestFunc: func(network *Network) error { + return network.CrossChainAppResponse(context.Background(), ids.GenerateTestID(), 0, nil) + }, + err: ErrUnrequestedResponse, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + network := NewNetwork(logging.NoLog{}, &common.SenderTest{}, prometheus.NewRegistry(), "") + + err := tt.requestFunc(network) + require.ErrorIs(err, tt.err) + }) + } +} + +// It's possible for the request id to overflow and wrap around. +// If there are still pending requests with the same request id, we should +// not attempt to issue another request until the previous one has cleared. +func TestAppRequestDuplicateRequestIDs(t *testing.T) { + require := require.New(t) + ctrl := gomock.NewController(t) + + handler := mocks.NewMockHandler(ctrl) + sender := &common.SenderTest{ + SendAppResponseF: func(context.Context, ids.NodeID, uint32, []byte) error { + return nil + }, + } + network := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + nodeID := ids.GenerateTestNodeID() + + requestSent := &sync.WaitGroup{} + sender.SendAppRequestF = func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) error { + for range nodeIDs { + requestSent.Add(1) + go func() { + require.NoError(network.AppRequest(ctx, nodeID, requestID, time.Time{}, request)) + requestSent.Done() + }() + } + + return nil + } + + timeout := &sync.WaitGroup{} + response := []byte("response") + handler.EXPECT().AppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). + DoAndReturn(func(ctx context.Context, nodeID ids.NodeID, deadline time.Time, request []byte) ([]byte, error) { + timeout.Wait() + return response, nil + }).AnyTimes() + + require.NoError(network.Connected(context.Background(), nodeID, nil)) + client, err := network.NewAppProtocol(0x1, handler) + require.NoError(err) + + onResponse := func(ctx context.Context, nodeID ids.NodeID, got []byte, err error) { + require.NoError(err) + require.Equal(response, got) + } + + require.NoError(client.AppRequest(context.Background(), set.Of(nodeID), []byte{}, onResponse)) + requestSent.Wait() + + // force the network to use the same requestID + network.router.requestID = 1 + timeout.Add(1) + err = client.AppRequest(context.Background(), set.Of(nodeID), []byte{}, nil) + requestSent.Wait() + require.ErrorIs(err, ErrRequestPending) + + timeout.Done() +} + +// Sample should always return up to [limit] peers, and less if fewer than +// [limit] peers are available. +func TestPeersSample(t *testing.T) { + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + nodeID3 := ids.GenerateTestNodeID() + + tests := []struct { + name string + connected set.Set[ids.NodeID] + disconnected set.Set[ids.NodeID] + limit int + }{ + { + name: "no peers", + limit: 1, + }, + { + name: "one peer connected", + connected: set.Of(nodeID1), + limit: 1, + }, + { + name: "multiple peers connected", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 1, + }, + { + name: "peer connects and disconnects - 1", + connected: set.Of(nodeID1), + disconnected: set.Of(nodeID1), + limit: 1, + }, + { + name: "peer connects and disconnects - 2", + connected: set.Of(nodeID1, nodeID2), + disconnected: set.Of(nodeID2), + limit: 1, + }, + { + name: "peer connects and disconnects - 2", + connected: set.Of(nodeID1, nodeID2, nodeID3), + disconnected: set.Of(nodeID1, nodeID2), + limit: 1, + }, + { + name: "less than limit peers", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 4, + }, + { + name: "limit peers", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 3, + }, + { + name: "more than limit peers", + connected: set.Of(nodeID1, nodeID2, nodeID3), + limit: 2, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + network := NewNetwork(logging.NoLog{}, &common.SenderTest{}, prometheus.NewRegistry(), "") + + for connected := range tt.connected { + require.NoError(network.Connected(context.Background(), connected, nil)) + } + + for disconnected := range tt.disconnected { + require.NoError(network.Disconnected(context.Background(), disconnected)) + } + + sampleable := set.Set[ids.NodeID]{} + sampleable.Union(tt.connected) + sampleable.Difference(tt.disconnected) + + sampled := network.Peers.Sample(tt.limit) + require.Len(sampled, math.Min(tt.limit, len(sampleable))) + require.Subset(sampleable, sampled) + }) + } +} + +func TestAppRequestAnyNodeSelection(t *testing.T) { + tests := []struct { + name string + peers []ids.NodeID + expected error + }{ + { + name: "no peers", + expected: ErrNoPeers, + }, + { + name: "has peers", + peers: []ids.NodeID{ids.GenerateTestNodeID()}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + sent := set.Set[ids.NodeID]{} + sender := &common.SenderTest{ + SendAppRequestF: func(_ context.Context, nodeIDs set.Set[ids.NodeID], _ uint32, _ []byte) error { + for nodeID := range nodeIDs { + sent.Add(nodeID) + } + return nil + }, + } + + n := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + for _, peer := range tt.peers { + require.NoError(n.Connected(context.Background(), peer, &version.Application{})) + } + + client, err := n.NewAppProtocol(1, nil) + require.NoError(err) + + err = client.AppRequestAny(context.Background(), []byte("foobar"), nil) + require.ErrorIs(err, tt.expected) + }) + } +} + +func TestNodeSamplerClientOption(t *testing.T) { + nodeID0 := ids.GenerateTestNodeID() + nodeID1 := ids.GenerateTestNodeID() + nodeID2 := ids.GenerateTestNodeID() + + tests := []struct { + name string + peers []ids.NodeID + option func(t *testing.T, n *Network) ClientOption + expected []ids.NodeID + expectedErr error + }{ + { + name: "default", + peers: []ids.NodeID{nodeID0, nodeID1, nodeID2}, + option: func(_ *testing.T, n *Network) ClientOption { + return clientOptionFunc(func(*clientOptions) {}) + }, + expected: []ids.NodeID{nodeID0, nodeID1, nodeID2}, + }, + { + name: "validator connected", + peers: []ids.NodeID{nodeID0, nodeID1}, + option: func(t *testing.T, n *Network) ClientOption { + state := &validators.TestState{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 0, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + nodeID1: nil, + }, nil + }, + } + + validators := NewValidators(n.Peers, n.log, ids.Empty, state, 0) + return WithValidatorSampling(validators) + }, + expected: []ids.NodeID{nodeID1}, + }, + { + name: "validator disconnected", + peers: []ids.NodeID{nodeID0}, + option: func(t *testing.T, n *Network) ClientOption { + state := &validators.TestState{ + GetCurrentHeightF: func(context.Context) (uint64, error) { + return 0, nil + }, + GetValidatorSetF: func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { + return map[ids.NodeID]*validators.GetValidatorOutput{ + nodeID1: nil, + }, nil + }, + } + + validators := NewValidators(n.Peers, n.log, ids.Empty, state, 0) + return WithValidatorSampling(validators) + }, + expectedErr: ErrNoPeers, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + done := make(chan struct{}) + sender := &common.SenderTest{ + SendAppRequestF: func(_ context.Context, nodeIDs set.Set[ids.NodeID], _ uint32, _ []byte) error { + require.Subset(tt.expected, nodeIDs.List()) + close(done) + return nil + }, + } + network := NewNetwork(logging.NoLog{}, sender, prometheus.NewRegistry(), "") + ctx := context.Background() + for _, peer := range tt.peers { + require.NoError(network.Connected(ctx, peer, nil)) + } + + client, err := network.NewAppProtocol(0x0, nil, tt.option(t, network)) + require.NoError(err) + + if err = client.AppRequestAny(ctx, []byte("request"), nil); err != nil { + close(done) + } + + require.ErrorIs(tt.expectedErr, err) + <-done + }) + } +} diff --git a/network/p2p/peers.go b/network/p2p/peers.go deleted file mode 100644 index 47982aeb2dc4..000000000000 --- a/network/p2p/peers.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package p2p - -import ( - "context" - "sync" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/validators" - "github.com/ava-labs/avalanchego/utils/set" - "github.com/ava-labs/avalanchego/version" -) - -var ( - _ validators.Connector = (*Peers)(nil) - _ NodeSampler = (*Peers)(nil) -) - -// Peers contains a set of nodes that we are connected to. -type Peers struct { - lock sync.RWMutex - peers set.SampleableSet[ids.NodeID] -} - -func (p *Peers) Connected(_ context.Context, nodeID ids.NodeID, _ *version.Application) error { - p.lock.Lock() - defer p.lock.Unlock() - - p.peers.Add(nodeID) - - return nil -} - -func (p *Peers) Disconnected(_ context.Context, nodeID ids.NodeID) error { - p.lock.Lock() - defer p.lock.Unlock() - - p.peers.Remove(nodeID) - - return nil -} - -func (p *Peers) Sample(_ context.Context, limit int) []ids.NodeID { - p.lock.RLock() - defer p.lock.RUnlock() - - return p.peers.Sample(limit) -} diff --git a/network/p2p/peers_test.go b/network/p2p/peers_test.go deleted file mode 100644 index 9835cf065b0b..000000000000 --- a/network/p2p/peers_test.go +++ /dev/null @@ -1,150 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package p2p - -import ( - "context" - "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "go.uber.org/mock/gomock" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/math" - "github.com/ava-labs/avalanchego/utils/set" -) - -// Sample should always return up to [limit] peers, and less if fewer than -// [limit] peers are available. -func TestPeersSample(t *testing.T) { - nodeID1 := ids.GenerateTestNodeID() - nodeID2 := ids.GenerateTestNodeID() - nodeID3 := ids.GenerateTestNodeID() - - tests := []struct { - name string - connected set.Set[ids.NodeID] - disconnected set.Set[ids.NodeID] - limit int - }{ - { - name: "no peers", - limit: 1, - }, - { - name: "one peer connected", - connected: set.Of(nodeID1), - limit: 1, - }, - { - name: "multiple peers connected", - connected: set.Of(nodeID1, nodeID2, nodeID3), - limit: 1, - }, - { - name: "peer connects and disconnects - 1", - connected: set.Of(nodeID1), - disconnected: set.Of(nodeID1), - limit: 1, - }, - { - name: "peer connects and disconnects - 2", - connected: set.Of(nodeID1, nodeID2), - disconnected: set.Of(nodeID2), - limit: 1, - }, - { - name: "peer connects and disconnects - 2", - connected: set.Of(nodeID1, nodeID2, nodeID3), - disconnected: set.Of(nodeID1, nodeID2), - limit: 1, - }, - { - name: "less than limit peers", - connected: set.Of(nodeID1, nodeID2, nodeID3), - limit: 4, - }, - { - name: "limit peers", - connected: set.Of(nodeID1, nodeID2, nodeID3), - limit: 3, - }, - { - name: "more than limit peers", - connected: set.Of(nodeID1, nodeID2, nodeID3), - limit: 2, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - peers := &Peers{} - - for connected := range tt.connected { - require.NoError(peers.Connected(context.Background(), connected, nil)) - } - - for disconnected := range tt.disconnected { - require.NoError(peers.Disconnected(context.Background(), disconnected)) - } - - sampleable := set.Set[ids.NodeID]{} - sampleable.Union(tt.connected) - sampleable.Difference(tt.disconnected) - - sampled := peers.Sample(context.Background(), tt.limit) - require.Len(sampled, math.Min(tt.limit, len(sampleable))) - require.Subset(sampleable, sampled) - }) - } -} - -func TestAppRequestAnyNodeSelection(t *testing.T) { - tests := []struct { - name string - peers []ids.NodeID - expected error - }{ - { - name: "no peers", - expected: ErrNoPeers, - }, - { - name: "has peers", - peers: []ids.NodeID{ids.GenerateTestNodeID()}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - mockAppSender := common.NewMockSender(ctrl) - - expectedCalls := 0 - if tt.expected == nil { - expectedCalls = 1 - } - mockAppSender.EXPECT().SendAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Times(expectedCalls) - - r := NewRouter(logging.NoLog{}, mockAppSender, prometheus.NewRegistry(), "") - peers := &Peers{} - for _, peer := range tt.peers { - require.NoError(peers.Connected(context.Background(), peer, nil)) - } - - client, err := r.RegisterAppProtocol(1, nil, peers) - require.NoError(err) - - err = client.AppRequestAny(context.Background(), []byte("foobar"), nil) - require.ErrorIs(err, tt.expected) - }) - } -} diff --git a/network/p2p/router.go b/network/p2p/router.go index 69428f0c47ac..0b4eff4eb576 100644 --- a/network/p2p/router.go +++ b/network/p2p/router.go @@ -26,7 +26,7 @@ var ( ErrExistingAppProtocol = errors.New("existing app protocol") ErrUnrequestedResponse = errors.New("unrequested response") - _ common.AppHandler = (*Router)(nil) + _ common.AppHandler = (*router)(nil) ) type metrics struct { @@ -55,10 +55,10 @@ type meteredHandler struct { *metrics } -// Router routes incoming application messages to the corresponding registered +// router routes incoming application messages to the corresponding registered // app handler. App messages must be made using the registered handler's // corresponding Client. -type Router struct { +type router struct { log logging.Logger sender common.AppSender metrics prometheus.Registerer @@ -71,14 +71,14 @@ type Router struct { requestID uint32 } -// NewRouter returns a new instance of Router -func NewRouter( +// newRouter returns a new instance of Router +func newRouter( log logging.Logger, sender common.AppSender, metrics prometheus.Registerer, namespace string, -) *Router { - return &Router{ +) *router { + return &router{ log: log, sender: sender, metrics: metrics, @@ -91,15 +91,12 @@ func NewRouter( } } -// RegisterAppProtocol reserves an identifier for an application protocol and -// returns a Client that can be used to send messages for the corresponding -// protocol. -func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSampler NodeSampler) (*Client, error) { +func (r *router) addHandler(handlerID uint64, handler Handler) error { r.lock.Lock() defer r.lock.Unlock() if _, ok := r.handlers[handlerID]; ok { - return nil, fmt.Errorf("failed to register handler id %d: %w", handlerID, ErrExistingAppProtocol) + return fmt.Errorf("failed to register handler id %d: %w", handlerID, ErrExistingAppProtocol) } appRequestTime, err := metric.NewAverager( @@ -109,7 +106,7 @@ func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSamp r.metrics, ) if err != nil { - return nil, fmt.Errorf("failed to register app request metric for handler_%d: %w", handlerID, err) + return fmt.Errorf("failed to register app request metric for handler_%d: %w", handlerID, err) } appRequestFailedTime, err := metric.NewAverager( @@ -119,7 +116,7 @@ func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSamp r.metrics, ) if err != nil { - return nil, fmt.Errorf("failed to register app request failed metric for handler_%d: %w", handlerID, err) + return fmt.Errorf("failed to register app request failed metric for handler_%d: %w", handlerID, err) } appResponseTime, err := metric.NewAverager( @@ -129,7 +126,7 @@ func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSamp r.metrics, ) if err != nil { - return nil, fmt.Errorf("failed to register app response metric for handler_%d: %w", handlerID, err) + return fmt.Errorf("failed to register app response metric for handler_%d: %w", handlerID, err) } appGossipTime, err := metric.NewAverager( @@ -139,7 +136,7 @@ func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSamp r.metrics, ) if err != nil { - return nil, fmt.Errorf("failed to register app gossip metric for handler_%d: %w", handlerID, err) + return fmt.Errorf("failed to register app gossip metric for handler_%d: %w", handlerID, err) } crossChainAppRequestTime, err := metric.NewAverager( @@ -149,7 +146,7 @@ func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSamp r.metrics, ) if err != nil { - return nil, fmt.Errorf("failed to register cross-chain app request metric for handler_%d: %w", handlerID, err) + return fmt.Errorf("failed to register cross-chain app request metric for handler_%d: %w", handlerID, err) } crossChainAppRequestFailedTime, err := metric.NewAverager( @@ -159,7 +156,7 @@ func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSamp r.metrics, ) if err != nil { - return nil, fmt.Errorf("failed to register cross-chain app request failed metric for handler_%d: %w", handlerID, err) + return fmt.Errorf("failed to register cross-chain app request failed metric for handler_%d: %w", handlerID, err) } crossChainAppResponseTime, err := metric.NewAverager( @@ -169,7 +166,7 @@ func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSamp r.metrics, ) if err != nil { - return nil, fmt.Errorf("failed to register cross-chain app response metric for handler_%d: %w", handlerID, err) + return fmt.Errorf("failed to register cross-chain app response metric for handler_%d: %w", handlerID, err) } r.handlers[handlerID] = &meteredHandler{ @@ -190,13 +187,7 @@ func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSamp }, } - return &Client{ - handlerID: handlerID, - handlerPrefix: binary.AppendUvarint(nil, handlerID), - sender: r.sender, - router: r, - nodeSampler: nodeSampler, - }, nil + return nil } // AppRequest routes an AppRequest to a Handler based on the handler prefix. The @@ -204,7 +195,7 @@ func (r *Router) RegisterAppProtocol(handlerID uint64, handler Handler, nodeSamp // // Any error condition propagated outside Handler application logic is // considered fatal -func (r *Router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { +func (r *router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID uint32, deadline time.Time, request []byte) error { start := time.Now() parsedMsg, handler, ok := r.parse(request) if !ok { @@ -232,7 +223,7 @@ func (r *Router) AppRequest(ctx context.Context, nodeID ids.NodeID, requestID ui // // Any error condition propagated outside Handler application logic is // considered fatal -func (r *Router) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { +func (r *router) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, requestID uint32, appErr *common.AppError) error { start := time.Now() pending, ok := r.clearAppRequest(requestID) if !ok { @@ -250,7 +241,7 @@ func (r *Router) AppRequestFailed(ctx context.Context, nodeID ids.NodeID, reques // // Any error condition propagated outside Handler application logic is // considered fatal -func (r *Router) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { +func (r *router) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { start := time.Now() pending, ok := r.clearAppRequest(requestID) if !ok { @@ -268,7 +259,7 @@ func (r *Router) AppResponse(ctx context.Context, nodeID ids.NodeID, requestID u // // Any error condition propagated outside Handler application logic is // considered fatal -func (r *Router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte) error { +func (r *router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte) error { start := time.Now() parsedMsg, handler, ok := r.parse(gossip) if !ok { @@ -292,7 +283,7 @@ func (r *Router) AppGossip(ctx context.Context, nodeID ids.NodeID, gossip []byte // // Any error condition propagated outside Handler application logic is // considered fatal -func (r *Router) CrossChainAppRequest( +func (r *router) CrossChainAppRequest( ctx context.Context, chainID ids.ID, requestID uint32, @@ -325,7 +316,7 @@ func (r *Router) CrossChainAppRequest( // // Any error condition propagated outside Handler application logic is // considered fatal -func (r *Router) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *common.AppError) error { +func (r *router) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, requestID uint32, appErr *common.AppError) error { start := time.Now() pending, ok := r.clearCrossChainAppRequest(requestID) if !ok { @@ -343,7 +334,7 @@ func (r *Router) CrossChainAppRequestFailed(ctx context.Context, chainID ids.ID, // // Any error condition propagated outside Handler application logic is // considered fatal -func (r *Router) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { +func (r *router) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) error { start := time.Now() pending, ok := r.clearCrossChainAppRequest(requestID) if !ok { @@ -365,7 +356,7 @@ func (r *Router) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requ // - A boolean indicating that parsing succeeded. // // Invariant: Assumes [r.lock] isn't held. -func (r *Router) parse(msg []byte) ([]byte, *meteredHandler, bool) { +func (r *router) parse(msg []byte) ([]byte, *meteredHandler, bool) { handlerID, bytesRead := binary.Uvarint(msg) if bytesRead <= 0 { return nil, nil, false @@ -379,7 +370,7 @@ func (r *Router) parse(msg []byte) ([]byte, *meteredHandler, bool) { } // Invariant: Assumes [r.lock] isn't held. -func (r *Router) clearAppRequest(requestID uint32) (pendingAppRequest, bool) { +func (r *router) clearAppRequest(requestID uint32) (pendingAppRequest, bool) { r.lock.Lock() defer r.lock.Unlock() @@ -389,7 +380,7 @@ func (r *Router) clearAppRequest(requestID uint32) (pendingAppRequest, bool) { } // Invariant: Assumes [r.lock] isn't held. -func (r *Router) clearCrossChainAppRequest(requestID uint32) (pendingCrossChainAppRequest, bool) { +func (r *router) clearCrossChainAppRequest(requestID uint32) (pendingCrossChainAppRequest, bool) { r.lock.Lock() defer r.lock.Unlock() diff --git a/network/p2p/router_test.go b/network/p2p/router_test.go index 6ab1151f8288..e69de29bb2d1 100644 --- a/network/p2p/router_test.go +++ b/network/p2p/router_test.go @@ -1,366 +0,0 @@ -// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package p2p - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/stretchr/testify/require" - - "go.uber.org/mock/gomock" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/network/p2p/mocks" - "github.com/ava-labs/avalanchego/snow/engine/common" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/utils/set" -) - -var errFoo = &common.AppError{ - Code: 123, - Message: "foobar", -} - -func TestAppRequestResponse(t *testing.T) { - handlerID := uint64(0x0) - request := []byte("request") - response := []byte("response") - nodeID := ids.GenerateTestNodeID() - chainID := ids.GenerateTestID() - - ctxKey := new(string) - ctxVal := new(string) - *ctxKey = "foo" - *ctxVal = "bar" - - tests := []struct { - name string - requestFunc func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) - }{ - { - name: "app request", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - sender.EXPECT().SendAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) { - for range nodeIDs { - go func() { - require.NoError(t, router.AppRequest(ctx, nodeID, requestID, time.Time{}, request)) - }() - } - }).AnyTimes() - sender.EXPECT().SendAppResponse(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, _ ids.NodeID, requestID uint32, response []byte) { - go func() { - ctx = context.WithValue(ctx, ctxKey, ctxVal) - require.NoError(t, router.AppResponse(ctx, nodeID, requestID, response)) - }() - }).AnyTimes() - handler.EXPECT(). - AppRequest(context.Background(), nodeID, gomock.Any(), request). - DoAndReturn(func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, error) { - return response, nil - }) - - callback := func(ctx context.Context, actualNodeID ids.NodeID, actualResponse []byte, err error) { - defer wg.Done() - - require.NoError(t, err) - require.Equal(t, ctxVal, ctx.Value(ctxKey)) - require.Equal(t, nodeID, actualNodeID) - require.Equal(t, response, actualResponse) - } - - require.NoError(t, client.AppRequestAny(context.Background(), request, callback)) - }, - }, - { - name: "app request failed", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - sender.EXPECT().SendAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) { - for range nodeIDs { - go func() { - require.NoError(t, router.AppRequestFailed(ctx, nodeID, requestID, errFoo)) - }() - } - }) - - callback := func(_ context.Context, actualNodeID ids.NodeID, actualResponse []byte, err error) { - defer wg.Done() - - require.ErrorIs(t, err, errFoo) - require.Equal(t, nodeID, actualNodeID) - require.Nil(t, actualResponse) - } - - require.NoError(t, client.AppRequest(context.Background(), set.Of(nodeID), request, callback)) - }, - }, - { - name: "cross-chain app request", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - chainID := ids.GenerateTestID() - sender.EXPECT().SendCrossChainAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, chainID ids.ID, requestID uint32, request []byte) { - go func() { - require.NoError(t, router.CrossChainAppRequest(ctx, chainID, requestID, time.Time{}, request)) - }() - }).AnyTimes() - sender.EXPECT().SendCrossChainAppResponse(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, chainID ids.ID, requestID uint32, response []byte) { - go func() { - ctx = context.WithValue(ctx, ctxKey, ctxVal) - require.NoError(t, router.CrossChainAppResponse(ctx, chainID, requestID, response)) - }() - }).AnyTimes() - handler.EXPECT(). - CrossChainAppRequest(context.Background(), chainID, gomock.Any(), request). - DoAndReturn(func(context.Context, ids.ID, time.Time, []byte) ([]byte, error) { - return response, nil - }) - - callback := func(ctx context.Context, actualChainID ids.ID, actualResponse []byte, err error) { - defer wg.Done() - require.NoError(t, err) - require.Equal(t, ctxVal, ctx.Value(ctxKey)) - require.Equal(t, chainID, actualChainID) - require.Equal(t, response, actualResponse) - } - - require.NoError(t, client.CrossChainAppRequest(context.Background(), chainID, request, callback)) - }, - }, - { - name: "cross-chain app request failed", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - sender.EXPECT().SendCrossChainAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, chainID ids.ID, requestID uint32, request []byte) { - go func() { - require.NoError(t, - router.CrossChainAppRequestFailed(ctx, chainID, requestID, errFoo)) - }() - }) - - callback := func(_ context.Context, actualChainID ids.ID, actualResponse []byte, err error) { - defer wg.Done() - - require.ErrorIs(t, err, errFoo) - require.Equal(t, chainID, actualChainID) - require.Nil(t, actualResponse) - } - - require.NoError(t, client.CrossChainAppRequest(context.Background(), chainID, request, callback)) - }, - }, - { - name: "app gossip", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - sender.EXPECT().SendAppGossip(gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, gossip []byte) { - go func() { - require.NoError(t, router.AppGossip(ctx, nodeID, gossip)) - }() - }).AnyTimes() - handler.EXPECT(). - AppGossip(context.Background(), nodeID, request). - DoAndReturn(func(context.Context, ids.NodeID, []byte) error { - defer wg.Done() - return nil - }) - - require.NoError(t, client.AppGossip(context.Background(), request)) - }, - }, - { - name: "app gossip specific", - requestFunc: func(t *testing.T, router *Router, client *Client, sender *common.MockSender, handler *mocks.MockHandler, wg *sync.WaitGroup) { - sender.EXPECT().SendAppGossipSpecific(gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeIDs set.Set[ids.NodeID], gossip []byte) { - for n := range nodeIDs { - nodeID := n - go func() { - require.NoError(t, router.AppGossip(ctx, nodeID, gossip)) - }() - } - }).AnyTimes() - handler.EXPECT(). - AppGossip(context.Background(), nodeID, request). - DoAndReturn(func(context.Context, ids.NodeID, []byte) error { - defer wg.Done() - return nil - }) - - require.NoError(t, client.AppGossipSpecific(context.Background(), set.Of(nodeID), request)) - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - - sender := common.NewMockSender(ctrl) - handler := mocks.NewMockHandler(ctrl) - router := NewRouter(logging.NoLog{}, sender, prometheus.NewRegistry(), "") - peers := &Peers{} - require.NoError(peers.Connected(context.Background(), nodeID, nil)) - client, err := router.RegisterAppProtocol(handlerID, handler, peers) - require.NoError(err) - - wg := &sync.WaitGroup{} - wg.Add(1) - tt.requestFunc(t, router, client, sender, handler, wg) - wg.Wait() - }) - } -} - -func TestRouterDropMessage(t *testing.T) { - unregistered := byte(0x0) - - tests := []struct { - name string - requestFunc func(router *Router) error - err error - }{ - { - name: "drop unregistered app request message", - requestFunc: func(router *Router) error { - return router.AppRequest(context.Background(), ids.GenerateTestNodeID(), 0, time.Time{}, []byte{unregistered}) - }, - err: nil, - }, - { - name: "drop empty app request message", - requestFunc: func(router *Router) error { - return router.AppRequest(context.Background(), ids.GenerateTestNodeID(), 0, time.Time{}, []byte{}) - }, - err: nil, - }, - { - name: "drop unregistered cross-chain app request message", - requestFunc: func(router *Router) error { - return router.CrossChainAppRequest(context.Background(), ids.GenerateTestID(), 0, time.Time{}, []byte{unregistered}) - }, - err: nil, - }, - { - name: "drop empty cross-chain app request message", - requestFunc: func(router *Router) error { - return router.CrossChainAppRequest(context.Background(), ids.GenerateTestID(), 0, time.Time{}, []byte{}) - }, - err: nil, - }, - { - name: "drop unregistered gossip message", - requestFunc: func(router *Router) error { - return router.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte{unregistered}) - }, - err: nil, - }, - { - name: "drop empty gossip message", - requestFunc: func(router *Router) error { - return router.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte{}) - }, - err: nil, - }, - { - name: "drop unrequested app request failed", - requestFunc: func(router *Router) error { - return router.AppRequestFailed(context.Background(), ids.GenerateTestNodeID(), 0, errFoo) - }, - err: ErrUnrequestedResponse, - }, - { - name: "drop unrequested app response", - requestFunc: func(router *Router) error { - return router.AppResponse(context.Background(), ids.GenerateTestNodeID(), 0, nil) - }, - err: ErrUnrequestedResponse, - }, - { - name: "drop unrequested cross-chain request failed", - requestFunc: func(router *Router) error { - return router.CrossChainAppRequestFailed(context.Background(), ids.GenerateTestID(), 0, errFoo) - }, - err: ErrUnrequestedResponse, - }, - { - name: "drop unrequested cross-chain response", - requestFunc: func(router *Router) error { - return router.CrossChainAppResponse(context.Background(), ids.GenerateTestID(), 0, nil) - }, - err: ErrUnrequestedResponse, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require := require.New(t) - - router := NewRouter(logging.NoLog{}, nil, prometheus.NewRegistry(), "") - - err := tt.requestFunc(router) - require.ErrorIs(err, tt.err) - }) - } -} - -// It's possible for the request id to overflow and wrap around. -// If there are still pending requests with the same request id, we should -// not attempt to issue another request until the previous one has cleared. -func TestAppRequestDuplicateRequestIDs(t *testing.T) { - require := require.New(t) - ctrl := gomock.NewController(t) - - handler := mocks.NewMockHandler(ctrl) - sender := common.NewMockSender(ctrl) - router := NewRouter(logging.NoLog{}, sender, prometheus.NewRegistry(), "") - nodeID := ids.GenerateTestNodeID() - - requestSent := &sync.WaitGroup{} - sender.EXPECT().SendAppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - Do(func(ctx context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, request []byte) { - for range nodeIDs { - requestSent.Add(1) - go func() { - require.NoError(router.AppRequest(ctx, nodeID, requestID, time.Time{}, request)) - requestSent.Done() - }() - } - }).AnyTimes() - - timeout := &sync.WaitGroup{} - response := []byte("response") - handler.EXPECT().AppRequest(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()). - DoAndReturn(func(ctx context.Context, nodeID ids.NodeID, deadline time.Time, request []byte) ([]byte, error) { - timeout.Wait() - return response, nil - }).AnyTimes() - sender.EXPECT().SendAppResponse(gomock.Any(), gomock.Any(), gomock.Any(), response) - - peers := &Peers{} - require.NoError(peers.Connected(context.Background(), nodeID, nil)) - client, err := router.RegisterAppProtocol(0x1, handler, peers) - require.NoError(err) - - require.NoError(client.AppRequest(context.Background(), set.Of(nodeID), []byte{}, nil)) - requestSent.Wait() - - // force the router to use the same requestID - router.requestID = 1 - timeout.Add(1) - err = client.AppRequest(context.Background(), set.Of(nodeID), []byte{}, nil) - requestSent.Wait() - require.ErrorIs(err, ErrRequestPending) - - timeout.Done() -} diff --git a/network/p2p/validators.go b/network/p2p/validators.go index edad9b890430..a780c87f0d8c 100644 --- a/network/p2p/validators.go +++ b/network/p2p/validators.go @@ -22,11 +22,18 @@ var ( ) type ValidatorSet interface { - Has(ctx context.Context, nodeID ids.NodeID) bool + Has(ctx context.Context, nodeID ids.NodeID) bool // TODO return error } -func NewValidators(log logging.Logger, subnetID ids.ID, validators validators.State, maxValidatorSetStaleness time.Duration) *Validators { +func NewValidators( + peers *Peers, + log logging.Logger, + subnetID ids.ID, + validators validators.State, + maxValidatorSetStaleness time.Duration, +) *Validators { return &Validators{ + peers: peers, log: log, subnetID: subnetID, validators: validators, @@ -36,6 +43,7 @@ func NewValidators(log logging.Logger, subnetID ids.ID, validators validators.St // Validators contains a set of nodes that are staking. type Validators struct { + peers *Peers log logging.Logger subnetID ids.ID validators validators.State @@ -71,20 +79,33 @@ func (v *Validators) refresh(ctx context.Context) { v.lastUpdated = time.Now() } +// Sample returns a random sample of connected validators func (v *Validators) Sample(ctx context.Context, limit int) []ids.NodeID { v.lock.Lock() defer v.lock.Unlock() v.refresh(ctx) - return v.validatorIDs.Sample(limit) + validatorIDs := v.validatorIDs.Sample(limit) + sampled := validatorIDs[:0] + + for _, validatorID := range validatorIDs { + if !v.peers.has(validatorID) { + continue + } + + sampled = append(sampled, validatorID) + } + + return sampled } +// Has returns if nodeID is a connected validator func (v *Validators) Has(ctx context.Context, nodeID ids.NodeID) bool { v.lock.Lock() defer v.lock.Unlock() v.refresh(ctx) - return v.validatorIDs.Contains(nodeID) + return v.peers.has(nodeID) && v.validatorIDs.Contains(nodeID) } diff --git a/network/p2p/validators_test.go b/network/p2p/validators_test.go index 5db06f7a2efa..e721b4a978af 100644 --- a/network/p2p/validators_test.go +++ b/network/p2p/validators_test.go @@ -9,11 +9,14 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -151,9 +154,8 @@ func TestValidatorsSample(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctrl := gomock.NewController(t) - subnetID := ids.GenerateTestID() + ctrl := gomock.NewController(t) mockValidators := validators.NewMockState(ctrl) calls := make([]*gomock.Call, 0) @@ -177,7 +179,12 @@ func TestValidatorsSample(t *testing.T) { } gomock.InOrder(calls...) - v := NewValidators(logging.NoLog{}, subnetID, mockValidators, tt.maxStaleness) + network := NewNetwork(logging.NoLog{}, &common.SenderTest{}, prometheus.NewRegistry(), "") + ctx := context.Background() + require.NoError(network.Connected(ctx, nodeID1, nil)) + require.NoError(network.Connected(ctx, nodeID2, nil)) + + v := NewValidators(network.Peers, network.log, subnetID, mockValidators, tt.maxStaleness) for _, call := range tt.calls { v.lastUpdated = call.time sampled := v.Sample(context.Background(), call.limit) diff --git a/node/overridden_manager.go b/node/overridden_manager.go index 80295f8636ea..91d8c198a4c3 100644 --- a/node/overridden_manager.go +++ b/node/overridden_manager.go @@ -68,10 +68,6 @@ func (o *overriddenManager) Sample(_ ids.ID, size int) ([]ids.NodeID, error) { return o.manager.Sample(o.subnetID, size) } -func (o *overriddenManager) UniformSample(_ ids.ID, size int) ([]ids.NodeID, error) { - return o.manager.UniformSample(o.subnetID, size) -} - func (o *overriddenManager) GetMap(ids.ID) map[ids.NodeID]*validators.GetValidatorOutput { return o.manager.GetMap(o.subnetID) } diff --git a/proto/Dockerfile.buf b/proto/Dockerfile.buf index 40d0a5420b4a..3c8864e636b7 100644 --- a/proto/Dockerfile.buf +++ b/proto/Dockerfile.buf @@ -6,7 +6,7 @@ RUN apt-get update && apt -y install bash curl unzip git WORKDIR /opt RUN \ - curl -L https://golang.org/dl/go1.20.8.linux-amd64.tar.gz > golang.tar.gz && \ + curl -L https://golang.org/dl/go1.20.12.linux-amd64.tar.gz > golang.tar.gz && \ mkdir golang && \ tar -zxvf golang.tar.gz -C golang/ diff --git a/scripts/build_avalanche.sh b/scripts/build_avalanche.sh index 1db63eb946f4..dcfaed4c420d 100755 --- a/scripts/build_avalanche.sh +++ b/scripts/build_avalanche.sh @@ -27,7 +27,7 @@ done # Dockerfile # README.md # go.mod -go_version_minimum="1.20.10" +go_version_minimum="1.20.12" go_version() { go version | sed -nE -e 's/[^0-9.]+([0-9.]+).+/\1/p' diff --git a/scripts/build_fuzz.sh b/scripts/build_fuzz.sh index 0f7ad7de8ede..54ed02c27e21 100755 --- a/scripts/build_fuzz.sh +++ b/scripts/build_fuzz.sh @@ -1,5 +1,11 @@ #!/usr/bin/env bash +# First argument is the time, in seconds, to run each fuzz test for. +# If not provided, defaults to 1 second. +# +# Second argument is the directory to run fuzz tests in. +# If not provided, defaults to the current directory. + set -euo pipefail # Mostly taken from https://github.com/golang/go/issues/46312#issuecomment-1153345129 @@ -10,7 +16,9 @@ AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) source "$AVALANCHE_PATH"/scripts/constants.sh fuzzTime=${1:-1} -files=$(grep -r --include='**_test.go' --files-with-matches 'func Fuzz' .) +fuzzDir=${2:-.} + +files=$(grep -r --include='**_test.go' --files-with-matches 'func Fuzz' $fuzzDir) failed=false for file in ${files} do diff --git a/scripts/build_testnetctl.sh b/scripts/build_tmpnetctl.sh similarity index 71% rename from scripts/build_testnetctl.sh rename to scripts/build_tmpnetctl.sh index b84c9e40577d..132cf09ee38d 100755 --- a/scripts/build_testnetctl.sh +++ b/scripts/build_tmpnetctl.sh @@ -7,8 +7,8 @@ AVALANCHE_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Load the constants source "$AVALANCHE_PATH"/scripts/constants.sh -echo "Building testnetctl..." +echo "Building tmpnetctl..." go build -ldflags\ "-X github.com/ava-labs/avalanchego/version.GitCommit=$git_commit $static_ld_flags"\ - -o "$AVALANCHE_PATH/build/testnetctl"\ - "$AVALANCHE_PATH/tests/fixture/testnet/cmd/"*.go + -o "$AVALANCHE_PATH/build/tmpnetctl"\ + "$AVALANCHE_PATH/tests/fixture/tmpnet/cmd/"*.go diff --git a/scripts/constants.sh b/scripts/constants.sh index 433e424f7acc..c175bffd57cf 100755 --- a/scripts/constants.sh +++ b/scripts/constants.sh @@ -36,3 +36,6 @@ export CGO_CFLAGS="-O2 -D__BLST_PORTABLE__" # While CGO_ENABLED doesn't need to be explicitly set, it produces a much more # clear error due to the default value change in go1.20. export CGO_ENABLED=1 + +# Disable version control fallbacks +export GOPROXY="https://proxy.golang.org" diff --git a/scripts/tests.e2e.existing.sh b/scripts/tests.e2e.existing.sh new file mode 100755 index 000000000000..bc1f8104977b --- /dev/null +++ b/scripts/tests.e2e.existing.sh @@ -0,0 +1,63 @@ +#!/usr/bin/env bash + +set -euo pipefail + +################################################################ +# This script deploys a temporary network and configures +# tests.e2e.sh to execute the e2e suite against it. This +# validates that tmpnetctl is capable of starting a network and +# that the e2e suite is capable of executing against a network +# that it did not create. +################################################################ + +# e.g., +# ./scripts/build.sh +# ./scripts/tests.e2e.existing.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo +# E2E_SERIAL=1 ./scripts/tests.e2e.sh # Run tests serially +# AVALANCHEGO_PATH=./build/avalanchego ./scripts/tests.e2e.existing.sh # Customization of avalanchego path +if ! [[ "$0" =~ scripts/tests.e2e.existing.sh ]]; then + echo "must be run from repository root" + exit 255 +fi + +# Ensure an absolute path to avoid dependency on the working directory +# of script execution. +export AVALANCHEGO_PATH="$(realpath ${AVALANCHEGO_PATH:-./build/avalanchego})" + +# Provide visual separation between testing and setup/teardown +function print_separator { + printf '%*s\n' "${COLUMNS:-80}" '' | tr ' ' ─ +} + +# Ensure network cleanup on teardown +function cleanup { + print_separator + echo "cleaning up temporary network" + if [[ -n "${TMPNET_NETWORK_DIR:-}" ]]; then + ./build/tmpnetctl stop-network + fi +} +trap cleanup EXIT + +# Start a temporary network +./scripts/build_tmpnetctl.sh +print_separator +./build/tmpnetctl start-network + +# Determine the network configuration path from the latest symlink +LATEST_SYMLINK_PATH="${HOME}/.tmpnet/networks/latest" +if [[ -h "${LATEST_SYMLINK_PATH}" ]]; then + export TMPNET_NETWORK_DIR="$(realpath ${LATEST_SYMLINK_PATH})" +else + echo "failed to find configuration path: ${LATEST_SYMLINK_PATH} symlink not found" + exit 255 +fi + +print_separator +# - Setting E2E_USE_EXISTING_NETWORK configures tests.e2e.sh to use +# the temporary network identified by TMPNET_NETWORK_DIR. +# - Only a single test (selected with --ginkgo.focus-file) is required +# to validate that an existing network can be used by an e2e test +# suite run. Executing more tests would be duplicative of the testing +# performed against a network created by the test suite. +E2E_USE_EXISTING_NETWORK=1 ./scripts/tests.e2e.sh --ginkgo.focus-file=permissionless_subnets.go diff --git a/scripts/tests.e2e.persistent.sh b/scripts/tests.e2e.persistent.sh deleted file mode 100755 index fec3b3db0f88..000000000000 --- a/scripts/tests.e2e.persistent.sh +++ /dev/null @@ -1,60 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -################################################################ -# This script deploys a persistent local network and configures -# tests.e2e.sh to execute the e2e suite against it. -################################################################ - -# e.g., -# ./scripts/build.sh -# ./scripts/tests.e2e.persistent.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo -# E2E_SERIAL=1 ./scripts/tests.e2e.sh # Run tests serially -# AVALANCHEGO_PATH=./build/avalanchego ./scripts/tests.e2e.persistent.sh # Customization of avalanchego path -if ! [[ "$0" =~ scripts/tests.e2e.persistent.sh ]]; then - echo "must be run from repository root" - exit 255 -fi - -# Ensure an absolute path to avoid dependency on the working directory -# of script execution. -export AVALANCHEGO_PATH="$(realpath ${AVALANCHEGO_PATH:-./build/avalanchego})" - -# Provide visual separation between testing and setup/teardown -function print_separator { - printf '%*s\n' "${COLUMNS:-80}" '' | tr ' ' ─ -} - -# Ensure network cleanup on teardown -function cleanup { - print_separator - echo "cleaning up persistent network" - if [[ -n "${TESTNETCTL_NETWORK_DIR:-}" ]]; then - ./build/testnetctl stop-network - fi -} -trap cleanup EXIT - -# Start a persistent network -./scripts/build_testnetctl.sh -print_separator -./build/testnetctl start-network - -# Determine the network configuration path from the latest symlink -LATEST_SYMLINK_PATH="${HOME}/.testnetctl/networks/latest" -if [[ -h "${LATEST_SYMLINK_PATH}" ]]; then - export TESTNETCTL_NETWORK_DIR="$(realpath ${LATEST_SYMLINK_PATH})" -else - echo "failed to find configuration path: ${LATEST_SYMLINK_PATH} symlink not found" - exit 255 -fi - -print_separator -# - Setting E2E_USE_PERSISTENT_NETWORK configures tests.e2e.sh to use -# the persistent network identified by TESTNETCTL_NETWORK_DIR. -# - Only a single test (selected with --ginkgo.focus-file) is required -# to validate that a persistent network can be used by an e2e test -# suite run. Executing more tests would be duplicative of the testing -# performed against an ephemeral test network. -E2E_USE_PERSISTENT_NETWORK=1 ./scripts/tests.e2e.sh --ginkgo.focus-file=permissionless_subnets.go diff --git a/scripts/tests.e2e.sh b/scripts/tests.e2e.sh index c9acd924ae20..63718428b8fd 100755 --- a/scripts/tests.e2e.sh +++ b/scripts/tests.e2e.sh @@ -7,7 +7,7 @@ set -euo pipefail # ./scripts/tests.e2e.sh --ginkgo.label-filter=x # All arguments are supplied to ginkgo # E2E_SERIAL=1 ./scripts/tests.e2e.sh # Run tests serially # AVALANCHEGO_PATH=./build/avalanchego ./scripts/tests.e2e.sh # Customization of avalanchego path -# E2E_USE_PERSISTENT_NETWORK=1 TESTNETCTL_NETWORK_DIR=/path/to ./scripts/tests.e2e.sh # Execute against a persistent network +# E2E_USE_EXISTING_NETWORK=1 TMPNET_NETWORK_DIR=/path/to ./scripts/tests.e2e.sh # Execute against an existing network if ! [[ "$0" =~ scripts/tests.e2e.sh ]]; then echo "must be run from repository root" exit 255 @@ -23,16 +23,16 @@ source ./scripts/constants.sh ################################# echo "building e2e.test" # to install the ginkgo binary (required for test build and run) -go install -v github.com/onsi/ginkgo/v2/ginkgo@v2.1.4 +go install -v github.com/onsi/ginkgo/v2/ginkgo@v2.13.1 ACK_GINKGO_RC=true ginkgo build ./tests/e2e ./tests/e2e/e2e.test --help ################################# -# Since TESTNETCTL_NETWORK_DIR may be persistently set in the environment (e.g. to configure -# ginkgo or testnetctl), configuring the use of a persistent network with this script -# requires the extra step of setting E2E_USE_PERSISTENT_NETWORK=1. -if [[ -n "${E2E_USE_PERSISTENT_NETWORK:-}" && -n "${TESTNETCTL_NETWORK_DIR:-}" ]]; then - E2E_ARGS="--use-persistent-network" +# Since TMPNET_NETWORK_DIR may be set in the environment (e.g. to configure ginkgo +# or tmpnetctl), configuring the use of an existing network with this script +# requires the extra step of setting E2E_USE_EXISTING_NETWORK=1. +if [[ -n "${E2E_USE_EXISTING_NETWORK:-}" && -n "${TMPNET_NETWORK_DIR:-}" ]]; then + E2E_ARGS="--use-existing-network" else AVALANCHEGO_PATH="$(realpath ${AVALANCHEGO_PATH:-./build/avalanchego})" E2E_ARGS="--avalanchego-path=${AVALANCHEGO_PATH}" diff --git a/scripts/tests.upgrade.sh b/scripts/tests.upgrade.sh index 8da20b2d65c0..34d5617bd128 100755 --- a/scripts/tests.upgrade.sh +++ b/scripts/tests.upgrade.sh @@ -56,7 +56,7 @@ source ./scripts/constants.sh ################################# echo "building upgrade.test" # to install the ginkgo binary (required for test build and run) -go install -v github.com/onsi/ginkgo/v2/ginkgo@v2.1.4 +go install -v github.com/onsi/ginkgo/v2/ginkgo@v2.13.1 ACK_GINKGO_RC=true ginkgo build ./tests/upgrade ./tests/upgrade/upgrade.test --help diff --git a/snow/consensus/snowman/test_block.go b/snow/consensus/snowman/test_block.go index a02bf31787c1..f340a0291b00 100644 --- a/snow/consensus/snowman/test_block.go +++ b/snow/consensus/snowman/test_block.go @@ -48,6 +48,6 @@ func (b *TestBlock) Bytes() []byte { return b.BytesV } -func (b *TestBlock) Less(other *TestBlock) bool { - return b.HeightV < other.HeightV +func (b *TestBlock) Compare(other *TestBlock) int { + return utils.Compare(b.HeightV, other.HeightV) } diff --git a/snow/engine/avalanche/bootstrap/bootstrapper.go b/snow/engine/avalanche/bootstrap/bootstrapper.go index 967d65711abc..162937dc7860 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper.go @@ -401,8 +401,7 @@ func (b *bootstrapper) HealthCheck(ctx context.Context) (interface{}, error) { func (b *bootstrapper) fetch(ctx context.Context, vtxIDs ...ids.ID) error { b.needToFetch.Add(vtxIDs...) for b.needToFetch.Len() > 0 && b.outstandingRequests.Len() < maxOutstandingGetAncestorsRequests { - vtxID := b.needToFetch.CappedList(1)[0] - b.needToFetch.Remove(vtxID) + vtxID, _ := b.needToFetch.Pop() // Length checked in predicate above // Make sure we haven't already requested this vertex if b.outstandingRequests.HasValue(vtxID) { diff --git a/snow/engine/common/tracker/peers.go b/snow/engine/common/tracker/peers.go index ad9592209a5a..94d653a53b1f 100644 --- a/snow/engine/common/tracker/peers.go +++ b/snow/engine/common/tracker/peers.go @@ -33,6 +33,9 @@ type Peers interface { ConnectedPercent() float64 // TotalWeight returns the total validator weight TotalWeight() uint64 + // SampleValidator returns a randomly selected connected validator. If there + // are no currently connected validators then it will return false. + SampleValidator() (ids.NodeID, bool) // PreferredPeers returns the currently connected validators. If there are // no currently connected validators then it will return the currently // connected peers. @@ -108,6 +111,13 @@ func (p *lockedPeers) TotalWeight() uint64 { return p.peers.TotalWeight() } +func (p *lockedPeers) SampleValidator() (ids.NodeID, bool) { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.peers.SampleValidator() +} + func (p *lockedPeers) PreferredPeers() set.Set[ids.NodeID] { p.lock.RLock() defer p.lock.RUnlock() @@ -263,6 +273,10 @@ func (p *peerData) TotalWeight() uint64 { return p.totalWeight } +func (p *peerData) SampleValidator() (ids.NodeID, bool) { + return p.connectedValidators.Peek() +} + func (p *peerData) PreferredPeers() set.Set[ids.NodeID] { if p.connectedValidators.Len() == 0 { connectedPeers := set.NewSet[ids.NodeID](p.connectedPeers.Len()) diff --git a/snow/engine/snowman/bootstrap/bootstrapper.go b/snow/engine/snowman/bootstrap/bootstrapper.go index 0e2c7e0dab16..b819c9573b48 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper.go +++ b/snow/engine/snowman/bootstrap/bootstrapper.go @@ -781,6 +781,7 @@ func (b *Bootstrapper) Timeout(ctx context.Context) error { func (b *Bootstrapper) restartBootstrapping(ctx context.Context) error { b.Ctx.Log.Debug("Checking for new frontiers") b.restarted = true + b.outstandingRequests = bimap.New[common.Request, ids.ID]() return b.startBootstrapping(ctx) } diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index 83cbca730ba5..4f7e54882920 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -1423,3 +1423,124 @@ func TestBootstrapNoParseOnNew(t *testing.T) { ) require.NoError(err) } + +func TestBootstrapperReceiveStaleAncestorsMessage(t *testing.T) { + require := require.New(t) + + config, peerID, sender, vm := newConfig(t) + + var ( + blkID0 = ids.GenerateTestID() + blkBytes0 = utils.RandomBytes(1024) + blk0 = &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: blkID0, + StatusV: choices.Accepted, + }, + HeightV: 0, + BytesV: blkBytes0, + } + + blkID1 = ids.GenerateTestID() + blkBytes1 = utils.RandomBytes(1024) + blk1 = &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: blkID1, + StatusV: choices.Processing, + }, + ParentV: blk0.IDV, + HeightV: blk0.HeightV + 1, + BytesV: blkBytes1, + } + + blkID2 = ids.GenerateTestID() + blkBytes2 = utils.RandomBytes(1024) + blk2 = &snowman.TestBlock{ + TestDecidable: choices.TestDecidable{ + IDV: blkID2, + StatusV: choices.Processing, + }, + ParentV: blk1.IDV, + HeightV: blk1.HeightV + 1, + BytesV: blkBytes2, + } + ) + + vm.LastAcceptedF = func(context.Context) (ids.ID, error) { + return blk0.ID(), nil + } + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + require.Equal(blkID0, blkID) + return blk0, nil + } + bs, err := New( + config, + func(context.Context, uint32) error { + config.Ctx.State.Set(snow.EngineState{ + Type: p2p.EngineType_ENGINE_TYPE_SNOWMAN, + State: snow.NormalOp, + }) + return nil + }, + ) + require.NoError(err) + + vm.CantSetState = false + require.NoError(bs.Start(context.Background(), 0)) + + vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { + switch blkID { + case blkID0: + return blk0, nil + case blkID1: + if blk1.StatusV == choices.Accepted { + return blk1, nil + } + return nil, database.ErrNotFound + case blkID2: + if blk2.StatusV == choices.Accepted { + return blk2, nil + } + return nil, database.ErrNotFound + default: + require.FailNow(database.ErrNotFound.Error()) + return nil, database.ErrNotFound + } + } + vm.ParseBlockF = func(_ context.Context, blkBytes []byte) (snowman.Block, error) { + switch { + case bytes.Equal(blkBytes, blkBytes0): + return blk0, nil + case bytes.Equal(blkBytes, blkBytes1): + return blk1, nil + case bytes.Equal(blkBytes, blkBytes2): + return blk2, nil + default: + require.FailNow(errUnknownBlock.Error()) + return nil, errUnknownBlock + } + } + + requestIDs := map[ids.ID]uint32{} + sender.SendGetAncestorsF = func(_ context.Context, vdr ids.NodeID, reqID uint32, blkID ids.ID) { + require.Equal(peerID, vdr) + requestIDs[blkID] = reqID + } + + require.NoError(bs.startSyncing(context.Background(), []ids.ID{blkID1, blkID2})) // should request blk2 and blk1 + + reqIDBlk1, ok := requestIDs[blkID1] + require.True(ok) + reqIDBlk2, ok := requestIDs[blkID2] + require.True(ok) + + require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, [][]byte{blkBytes2, blkBytes1})) + + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) + require.Equal(choices.Accepted, blk0.Status()) + require.Equal(choices.Accepted, blk1.Status()) + require.Equal(choices.Accepted, blk2.Status()) + + require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk1, [][]byte{blkBytes1})) + require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) +} diff --git a/snow/engine/snowman/config.go b/snow/engine/snowman/config.go index ed63af2f4936..65a24a2ea816 100644 --- a/snow/engine/snowman/config.go +++ b/snow/engine/snowman/config.go @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" ) @@ -16,11 +17,12 @@ import ( type Config struct { common.AllGetsServer - Ctx *snow.ConsensusContext - VM block.ChainVM - Sender common.Sender - Validators validators.Manager - Params snowball.Parameters - Consensus snowman.Consensus - PartialSync bool + Ctx *snow.ConsensusContext + VM block.ChainVM + Sender common.Sender + Validators validators.Manager + ConnectedValidators tracker.Peers + Params snowball.Parameters + Consensus snowman.Consensus + PartialSync bool } diff --git a/snow/engine/snowman/config_test.go b/snow/engine/snowman/config_test.go index 23fc0fc39fd4..9611990d9d95 100644 --- a/snow/engine/snowman/config_test.go +++ b/snow/engine/snowman/config_test.go @@ -8,16 +8,18 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowball" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" + "github.com/ava-labs/avalanchego/snow/engine/common/tracker" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/snow/validators" ) func DefaultConfig() Config { return Config{ - Ctx: snow.DefaultConsensusContextTest(), - VM: &block.TestVM{}, - Sender: &common.SenderTest{}, - Validators: validators.NewManager(), + Ctx: snow.DefaultConsensusContextTest(), + VM: &block.TestVM{}, + Sender: &common.SenderTest{}, + Validators: validators.NewManager(), + ConnectedValidators: tracker.NewPeers(), Params: snowball.Parameters{ K: 1, AlphaPreference: 1, diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 7f06698cbab0..4b43dcda0acb 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -169,11 +169,10 @@ func (t *Transitive) Gossip(ctx context.Context) error { // Uniform sampling is used here to reduce bandwidth requirements of // nodes with a large amount of stake weight. - vdrIDs, err := t.Validators.UniformSample(t.Ctx.SubnetID, 1) - if err != nil { + vdrID, ok := t.ConnectedValidators.SampleValidator() + if !ok { t.Ctx.Log.Error("skipping block gossip", - zap.String("reason", "no validators"), - zap.Error(err), + zap.String("reason", "no connected validators"), ) return nil } @@ -190,9 +189,13 @@ func (t *Transitive) Gossip(ctx context.Context) error { } t.requestID++ - vdrSet := set.Of(vdrIDs...) - preferredID := t.Consensus.Preference() - t.Sender.SendPullQuery(ctx, vdrSet, t.requestID, preferredID, nextHeightToAccept) + t.Sender.SendPullQuery( + ctx, + set.Of(vdrID), + t.requestID, + t.Consensus.Preference(), + nextHeightToAccept, + ) } else { t.Ctx.Log.Debug("skipping block gossip", zap.String("reason", "blocks currently processing"), diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 26f6c1127bbf..738f20440c58 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -22,6 +22,7 @@ import ( "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/version" ) var ( @@ -41,6 +42,9 @@ func setup(t *testing.T, engCfg Config) (ids.NodeID, validators.Manager, *common vdr := ids.GenerateTestNodeID() require.NoError(vals.AddStaker(engCfg.Ctx.SubnetID, vdr, nil, ids.Empty, 1)) + require.NoError(engCfg.ConnectedValidators.Connected(context.Background(), vdr, version.CurrentApp)) + + vals.RegisterCallbackListener(engCfg.Ctx.SubnetID, engCfg.ConnectedValidators) sender := &common.SenderTest{T: t} engCfg.Sender = sender diff --git a/snow/validators/manager.go b/snow/validators/manager.go index 8cf634f29bd7..c42ea779d96b 100644 --- a/snow/validators/manager.go +++ b/snow/validators/manager.go @@ -85,10 +85,6 @@ type Manager interface { // If sampling the requested size isn't possible, an error will be returned. Sample(subnetID ids.ID, size int) ([]ids.NodeID, error) - // UniformSample returns a collection of validatorIDs in the subnet. - // If sampling the requested size isn't possible, an error will be returned. - UniformSample(subnetID ids.ID, size int) ([]ids.NodeID, error) - // Map of the validators in this subnet GetMap(subnetID ids.ID) map[ids.NodeID]*GetValidatorOutput @@ -257,21 +253,6 @@ func (m *manager) Sample(subnetID ids.ID, size int) ([]ids.NodeID, error) { return set.Sample(size) } -func (m *manager) UniformSample(subnetID ids.ID, size int) ([]ids.NodeID, error) { - if size == 0 { - return nil, nil - } - - m.lock.RLock() - set, exists := m.subnetToVdrs[subnetID] - m.lock.RUnlock() - if !exists { - return nil, ErrMissingValidators - } - - return set.UniformSample(size) -} - func (m *manager) GetMap(subnetID ids.ID) map[ids.NodeID]*GetValidatorOutput { m.lock.RLock() set, exists := m.subnetToVdrs[subnetID] diff --git a/snow/validators/set.go b/snow/validators/set.go index 564cd107153a..dfa294a70bbe 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -243,13 +243,6 @@ func (s *vdrSet) Sample(size int) ([]ids.NodeID, error) { return s.sample(size) } -func (s *vdrSet) UniformSample(size int) ([]ids.NodeID, error) { - s.lock.RLock() - defer s.lock.RUnlock() - - return s.uniformSample(size) -} - func (s *vdrSet) sample(size int) ([]ids.NodeID, error) { if !s.samplerInitialized { if err := s.sampler.Initialize(s.weights); err != nil { @@ -270,22 +263,6 @@ func (s *vdrSet) sample(size int) ([]ids.NodeID, error) { return list, nil } -func (s *vdrSet) uniformSample(size int) ([]ids.NodeID, error) { - uniform := sampler.NewUniform() - uniform.Initialize(uint64(len(s.vdrSlice))) - - indices, err := uniform.Sample(size) - if err != nil { - return nil, err - } - - list := make([]ids.NodeID, size) - for i, index := range indices { - list[i] = s.vdrSlice[index].NodeID - } - return list, nil -} - func (s *vdrSet) TotalWeight() (uint64, error) { s.lock.RLock() defer s.lock.RUnlock() diff --git a/tests/e2e/README.md b/tests/e2e/README.md index da7ca7a82612..032795b6436e 100644 --- a/tests/e2e/README.md +++ b/tests/e2e/README.md @@ -1,6 +1,6 @@ # Avalanche e2e test suites -- Works with fixture-managed networks. +- Works with fixture-managed temporary networks. - Compiles to a single binary with customizable configurations. ## Running tests @@ -57,44 +57,46 @@ packages. `x/transfer/virtuous.go` defines X-Chain transfer tests, labeled with `x`, which can be selected by `./tests/e2e/e2e.test --ginkgo.label-filter "x"`. -## Testing against a persistent network +## Testing against an existing network -By default, a new ephemeral test network will be started before each -test run. When developing e2e tests, it may be helpful to create a -persistent test network to test against. This can increase the speed -of iteration by removing the requirement to start a new network for -every invocation of the test under development. +By default, a new temporary test network will be started before each +test run and stopped at the end of the run. When developing e2e tests, +it may be helpful to create a temporary network that can be used +across multiple test runs. This can increase the speed of iteration by +removing the requirement to start a new network for every invocation +of the test under development. -To use a persistent network: +To create an temporary network for use across test runs: ```bash # From the root of the avalanchego repo -# Build the testnetctl binary -$ ./scripts/build_testnetctl.sh +# Build the tmpnetctl binary +$ ./scripts/build_tmpnetctl.sh # Start a new network -$ ./build/testnetctl start-network --avalanchego-path=/path/to/avalanchego +$ ./build/tmpnetctl start-network --avalanchego-path=/path/to/avalanchego ... -Started network 1000 @ /home/me/.testnetctl/networks/1000 +Started network 1000 @ /home/me/.tmpnet/networks/1000 -Configure testnetctl to target this network by default with one of the following statements: - - source /home/me/.testnetctl/networks/1000/network.env - - export TESTNETCTL_NETWORK_DIR=/home/me/.testnetctl/networks/1000 - - export TESTNETCTL_NETWORK_DIR=/home/me/.testnetctl/networks/latest +Configure tmpnetctl and the test suite to target this network by default +with one of the following statements: + - source /home/me/.tmpnet/networks/1000/network.env + - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/1000 + - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/latest -# Start a new test run using the persistent network +# Start a new test run using the existing network ginkgo -v ./tests/e2e -- \ --avalanchego-path=/path/to/avalanchego \ --ginkgo.focus-file=[name of file containing test] \ - --use-persistent-network \ + --use-existing-network \ --network-dir=/path/to/network # It is also possible to set the AVALANCHEGO_PATH env var instead of supplying --avalanchego-path -# and to set TESTNETCTL_NETWORK_DIR instead of supplying --network-dir. +# and to set TMPNET_NETWORK_DIR instead of supplying --network-dir. ``` -See the testnet fixture [README](../fixture/testnet/README.md) for more details. +See the tmpnet fixture [README](../fixture/tmpnet/README.md) for more details. ## Skipping bootstrap checks diff --git a/tests/e2e/c/dynamic_fees.go b/tests/e2e/c/dynamic_fees.go index 8f15b6d43caf..38bbd668079b 100644 --- a/tests/e2e/c/dynamic_fees.go +++ b/tests/e2e/c/dynamic_fees.go @@ -20,7 +20,7 @@ import ( "github.com/ava-labs/avalanchego/tests" "github.com/ava-labs/avalanchego/tests/fixture/e2e" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" ) @@ -47,7 +47,7 @@ var _ = e2e.DescribeCChain("[Dynamic Fees]", func() { ginkgo.By("initializing a coreth client") node := privateNetwork.GetNodes()[0] - nodeURI := testnet.NodeURI{ + nodeURI := tmpnet.NodeURI{ NodeID: node.GetID(), URI: node.GetProcessContext().URI, } diff --git a/tests/e2e/faultinjection/duplicate_node_id.go b/tests/e2e/faultinjection/duplicate_node_id.go index 1d865d840f51..9278c1bd5b8d 100644 --- a/tests/e2e/faultinjection/duplicate_node_id.go +++ b/tests/e2e/faultinjection/duplicate_node_id.go @@ -15,7 +15,7 @@ import ( "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests/fixture/e2e" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/set" ) @@ -27,7 +27,7 @@ var _ = ginkgo.Describe("Duplicate node handling", func() { nodes := network.GetNodes() ginkgo.By("creating new node") - node1 := e2e.AddEphemeralNode(network, testnet.FlagsMap{}) + node1 := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) e2e.WaitForHealthy(node1) ginkgo.By("checking that the new node is connected to its peers") @@ -35,7 +35,7 @@ var _ = ginkgo.Describe("Duplicate node handling", func() { ginkgo.By("creating a second new node with the same staking keypair as the first new node") node1Flags := node1.GetConfig().Flags - node2Flags := testnet.FlagsMap{ + node2Flags := tmpnet.FlagsMap{ config.StakingTLSKeyContentKey: node1Flags[config.StakingTLSKeyContentKey], config.StakingCertContentKey: node1Flags[config.StakingCertContentKey], // Construct a unique data dir to ensure the two nodes' data will be stored @@ -46,7 +46,7 @@ var _ = ginkgo.Describe("Duplicate node handling", func() { node2 := e2e.AddEphemeralNode(network, node2Flags) ginkgo.By("checking that the second new node fails to become healthy before timeout") - err := testnet.WaitForHealthy(e2e.DefaultContext(), node2) + err := tmpnet.WaitForHealthy(e2e.DefaultContext(), node2) require.ErrorIs(err, context.DeadlineExceeded) ginkgo.By("stopping the first new node") @@ -63,7 +63,7 @@ var _ = ginkgo.Describe("Duplicate node handling", func() { }) // Check that a new node is connected to existing nodes and vice versa -func checkConnectedPeers(existingNodes []testnet.Node, newNode testnet.Node) { +func checkConnectedPeers(existingNodes []tmpnet.Node, newNode tmpnet.Node) { require := require.New(ginkgo.GinkgoT()) // Collect the node ids of the new node's peers diff --git a/tests/e2e/p/interchain_workflow.go b/tests/e2e/p/interchain_workflow.go index 44a6912715ef..678f9b5cc204 100644 --- a/tests/e2e/p/interchain_workflow.go +++ b/tests/e2e/p/interchain_workflow.go @@ -19,7 +19,7 @@ import ( "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests/fixture/e2e" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/set" @@ -44,7 +44,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL ginkgo.By("checking that the network has a compatible minimum stake duration", func() { minStakeDuration := cast.ToDuration(network.GetConfig().DefaultFlags[config.MinStakeDurationKey]) - require.Equal(testnet.DefaultMinStakeDuration, minStakeDuration) + require.Equal(tmpnet.DefaultMinStakeDuration, minStakeDuration) }) ginkgo.By("creating wallet with a funded key to send from and recipient key to deliver to") @@ -87,7 +87,7 @@ var _ = e2e.DescribePChain("[Interchain Workflow]", ginkgo.Label(e2e.UsesCChainL } ginkgo.By("adding new node and waiting for it to report healthy") - node := e2e.AddEphemeralNode(network, testnet.FlagsMap{}) + node := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) e2e.WaitForHealthy(node) ginkgo.By("retrieving new node's id and pop") diff --git a/tests/e2e/p/staking_rewards.go b/tests/e2e/p/staking_rewards.go index 41a77985729b..475c3de261dd 100644 --- a/tests/e2e/p/staking_rewards.go +++ b/tests/e2e/p/staking_rewards.go @@ -20,7 +20,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" "github.com/ava-labs/avalanchego/tests/fixture/e2e" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/units" @@ -43,13 +43,13 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { ginkgo.By("checking that the network has a compatible minimum stake duration", func() { minStakeDuration := cast.ToDuration(network.GetConfig().DefaultFlags[config.MinStakeDurationKey]) - require.Equal(testnet.DefaultMinStakeDuration, minStakeDuration) + require.Equal(tmpnet.DefaultMinStakeDuration, minStakeDuration) }) ginkgo.By("adding alpha node, whose uptime should result in a staking reward") - alphaNode := e2e.AddEphemeralNode(network, testnet.FlagsMap{}) + alphaNode := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) ginkgo.By("adding beta node, whose uptime should not result in a staking reward") - betaNode := e2e.AddEphemeralNode(network, testnet.FlagsMap{}) + betaNode := e2e.AddEphemeralNode(network, tmpnet.FlagsMap{}) // Wait to check health until both nodes have started to minimize the duration // required for both nodes to report healthy. @@ -103,12 +103,18 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { betaNodeID, betaPOP, err := betaInfoClient.GetNodeID(e2e.DefaultContext()) require.NoError(err) + pvmClient := platformvm.NewClient(alphaNode.GetProcessContext().URI) + const ( delegationPercent = 0.10 // 10% delegationShare = reward.PercentDenominator * delegationPercent weight = 2_000 * units.Avax ) + ginkgo.By("retrieving supply before inserting validators") + supplyAtValidatorsStart, _, err := pvmClient.GetCurrentSupply(e2e.DefaultContext(), constants.PrimaryNetworkID) + require.NoError(err) + alphaValidatorStartTime := time.Now().Add(e2e.DefaultValidatorStartTimeDiff) alphaValidatorEndTime := alphaValidatorStartTime.Add(validationPeriod) tests.Outf("alpha node validation period starting at: %v\n", alphaValidatorStartTime) @@ -171,6 +177,10 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { require.NoError(err) }) + ginkgo.By("retrieving supply before inserting delegators") + supplyAtDelegatorsStart, _, err := pvmClient.GetCurrentSupply(e2e.DefaultContext(), constants.PrimaryNetworkID) + require.NoError(err) + gammaDelegatorStartTime := time.Now().Add(e2e.DefaultValidatorStartTimeDiff) tests.Outf("gamma delegation period starting at: %v\n", gammaDelegatorStartTime) @@ -227,8 +237,6 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { // delegation periods are shorter than the validation periods. time.Sleep(time.Until(betaValidatorEndTime)) - pvmClient := platformvm.NewClient(alphaNode.GetProcessContext().URI) - ginkgo.By("waiting until the alpha and beta nodes are no longer validators") e2e.Eventually(func() bool { validators, err := pvmClient.GetCurrentValidators(e2e.DefaultContext(), constants.PrimaryNetworkID, nil) @@ -270,11 +278,9 @@ var _ = ginkgo.Describe("[Staking Rewards]", func() { require.Len(rewardBalances, len(rewardKeys)) ginkgo.By("determining expected validation and delegation rewards") - currentSupply, _, err := pvmClient.GetCurrentSupply(e2e.DefaultContext(), constants.PrimaryNetworkID) - require.NoError(err) calculator := reward.NewCalculator(rewardConfig) - expectedValidationReward := calculator.Calculate(validationPeriod, weight, currentSupply) - potentialDelegationReward := calculator.Calculate(delegationPeriod, weight, currentSupply) + expectedValidationReward := calculator.Calculate(validationPeriod, weight, supplyAtValidatorsStart) + potentialDelegationReward := calculator.Calculate(delegationPeriod, weight, supplyAtDelegatorsStart) expectedDelegationFee, expectedDelegatorReward := reward.Split(potentialDelegationReward, delegationShare) ginkgo.By("checking expected rewards against actual rewards") diff --git a/tests/e2e/p/workflow.go b/tests/e2e/p/workflow.go index 3f0440ac49b6..8cc7c109d3a4 100644 --- a/tests/e2e/p/workflow.go +++ b/tests/e2e/p/workflow.go @@ -73,7 +73,7 @@ var _ = e2e.DescribePChain("[Workflow]", func() { vdrStartTime := time.Now().Add(validatorStartTimeDiff) // Use a random node ID to ensure that repeated test runs - // will succeed against a persistent network. + // will succeed against a network that persists across runs. validatorID, err := ids.ToNodeID(utils.RandomBytes(ids.NodeIDLen)) require.NoError(err) diff --git a/tests/fixture/e2e/env.go b/tests/fixture/e2e/env.go index 54a4676482e1..07c24866a9f0 100644 --- a/tests/fixture/e2e/env.go +++ b/tests/fixture/e2e/env.go @@ -16,8 +16,8 @@ import ( "github.com/ava-labs/avalanchego/tests" "github.com/ava-labs/avalanchego/tests/fixture" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" - "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet/local" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/vms/secp256k1fx" @@ -40,7 +40,7 @@ type TestEnvironment struct { // The directory where the test network configuration is stored NetworkDir string // URIs used to access the API endpoints of nodes of the network - URIs []testnet.NodeURI + URIs []tmpnet.NodeURI // The URI used to access the http server that allocates test data TestDataServerURI string @@ -57,16 +57,15 @@ func (te *TestEnvironment) Marshal() []byte { func NewTestEnvironment(flagVars *FlagVars) *TestEnvironment { require := require.New(ginkgo.GinkgoT()) - persistentNetworkDir := flagVars.PersistentNetworkDir() + networkDir := flagVars.NetworkDir() // Load or create a test network var network *local.LocalNetwork - if len(persistentNetworkDir) > 0 { - tests.Outf("{{yellow}}Using a persistent network configured at %s{{/}}\n", persistentNetworkDir) - + if len(networkDir) > 0 { var err error - network, err = local.ReadNetwork(persistentNetworkDir) + network, err = local.ReadNetwork(networkDir) require.NoError(err) + tests.Outf("{{yellow}}Using an existing network configured at %s{{/}}\n", network.Dir) } else { network = StartLocalNetwork(flagVars.AvalancheGoExecPath(), DefaultNetworkDir) } @@ -90,7 +89,7 @@ func NewTestEnvironment(flagVars *FlagVars) *TestEnvironment { // Retrieve a random URI to naively attempt to spread API load across // nodes. -func (te *TestEnvironment) GetRandomNodeURI() testnet.NodeURI { +func (te *TestEnvironment) GetRandomNodeURI() tmpnet.NodeURI { r := rand.New(rand.NewSource(time.Now().Unix())) //#nosec G404 nodeURI := te.URIs[r.Intn(len(te.URIs))] tests.Outf("{{blue}} targeting node %s with URI: %s{{/}}\n", nodeURI.NodeID, nodeURI.URI) @@ -98,7 +97,7 @@ func (te *TestEnvironment) GetRandomNodeURI() testnet.NodeURI { } // Retrieve the network to target for testing. -func (te *TestEnvironment) GetNetwork() testnet.Network { +func (te *TestEnvironment) GetNetwork() tmpnet.Network { network, err := local.ReadNetwork(te.NetworkDir) te.require.NoError(err) return network @@ -124,7 +123,7 @@ func (te *TestEnvironment) NewKeychain(count int) *secp256k1fx.Keychain { } // Create a new private network that is not shared with other tests. -func (te *TestEnvironment) NewPrivateNetwork() testnet.Network { +func (te *TestEnvironment) NewPrivateNetwork() tmpnet.Network { // Load the shared network to retrieve its path and exec path sharedNetwork, err := local.ReadNetwork(te.NetworkDir) te.require.NoError(err) diff --git a/tests/fixture/e2e/flags.go b/tests/fixture/e2e/flags.go index c7838cb7c761..23952b5dcd91 100644 --- a/tests/fixture/e2e/flags.go +++ b/tests/fixture/e2e/flags.go @@ -8,28 +8,31 @@ import ( "fmt" "os" - "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet/local" ) type FlagVars struct { - avalancheGoExecPath string - persistentNetworkDir string - usePersistentNetwork bool + avalancheGoExecPath string + networkDir string + useExistingNetwork bool } -func (v *FlagVars) PersistentNetworkDir() string { - if v.usePersistentNetwork && len(v.persistentNetworkDir) == 0 { - return os.Getenv(local.NetworkDirEnvName) +func (v *FlagVars) NetworkDir() string { + if !v.useExistingNetwork { + return "" } - return v.persistentNetworkDir + if len(v.networkDir) > 0 { + return v.networkDir + } + return os.Getenv(local.NetworkDirEnvName) } func (v *FlagVars) AvalancheGoExecPath() string { return v.avalancheGoExecPath } -func (v *FlagVars) UsePersistentNetwork() bool { - return v.usePersistentNetwork +func (v *FlagVars) UseExistingNetwork() bool { + return v.useExistingNetwork } func RegisterFlags() *FlagVars { @@ -38,19 +41,19 @@ func RegisterFlags() *FlagVars { &vars.avalancheGoExecPath, "avalanchego-path", os.Getenv(local.AvalancheGoPathEnvName), - fmt.Sprintf("avalanchego executable path (required if not using a persistent network). Also possible to configure via the %s env variable.", local.AvalancheGoPathEnvName), + fmt.Sprintf("avalanchego executable path (required if not using an existing network). Also possible to configure via the %s env variable.", local.AvalancheGoPathEnvName), ) flag.StringVar( - &vars.persistentNetworkDir, + &vars.networkDir, "network-dir", "", - fmt.Sprintf("[optional] the dir containing the configuration of a persistent network to target for testing. Useful for speeding up test development. Also possible to configure via the %s env variable.", local.NetworkDirEnvName), + fmt.Sprintf("[optional] the dir containing the configuration of an existing network to target for testing. Will only be used if --use-existing-network is specified. Also possible to configure via the %s env variable.", local.NetworkDirEnvName), ) flag.BoolVar( - &vars.usePersistentNetwork, - "use-persistent-network", + &vars.useExistingNetwork, + "use-existing-network", false, - "[optional] whether to target the persistent network identified by --network-dir.", + "[optional] whether to target the existing network identified by --network-dir.", ) return &vars diff --git a/tests/fixture/e2e/helpers.go b/tests/fixture/e2e/helpers.go index 15da611324e0..8b7eb5260b8c 100644 --- a/tests/fixture/e2e/helpers.go +++ b/tests/fixture/e2e/helpers.go @@ -22,8 +22,8 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/tests" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" - "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet/local" "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/wallet/subnet/primary" @@ -62,7 +62,7 @@ const ( ) // Create a new wallet for the provided keychain against the specified node URI. -func NewWallet(keychain *secp256k1fx.Keychain, nodeURI testnet.NodeURI) primary.Wallet { +func NewWallet(keychain *secp256k1fx.Keychain, nodeURI tmpnet.NodeURI) primary.Wallet { tests.Outf("{{blue}} initializing a new wallet for node %s with URI: %s {{/}}\n", nodeURI.NodeID, nodeURI.URI) baseWallet, err := primary.MakeWallet(DefaultContext(), &primary.WalletConfig{ URI: nodeURI.URI, @@ -81,7 +81,7 @@ func NewWallet(keychain *secp256k1fx.Keychain, nodeURI testnet.NodeURI) primary. } // Create a new eth client targeting the specified node URI. -func NewEthClient(nodeURI testnet.NodeURI) ethclient.Client { +func NewEthClient(nodeURI tmpnet.NodeURI) ethclient.Client { tests.Outf("{{blue}} initializing a new eth client for node %s with URI: %s {{/}}\n", nodeURI.NodeID, nodeURI.URI) nodeAddress := strings.Split(nodeURI.URI, "//")[1] uri := fmt.Sprintf("ws://%s/ext/bc/C/ws", nodeAddress) @@ -128,7 +128,7 @@ func Eventually(condition func() bool, waitFor time.Duration, tick time.Duration // Add an ephemeral node that is only intended to be used by a single test. Its ID and // URI are not intended to be returned from the Network instance to minimize // accessibility from other tests. -func AddEphemeralNode(network testnet.Network, flags testnet.FlagsMap) testnet.Node { +func AddEphemeralNode(network tmpnet.Network, flags tmpnet.FlagsMap) tmpnet.Node { require := require.New(ginkgo.GinkgoT()) node, err := network.AddEphemeralNode(ginkgo.GinkgoWriter, flags) @@ -145,11 +145,11 @@ func AddEphemeralNode(network testnet.Network, flags testnet.FlagsMap) testnet.N } // Wait for the given node to report healthy. -func WaitForHealthy(node testnet.Node) { +func WaitForHealthy(node tmpnet.Node) { // Need to use explicit context (vs DefaultContext()) to support use with DeferCleanup ctx, cancel := context.WithTimeout(context.Background(), DefaultTimeout) defer cancel() - require.NoError(ginkgo.GinkgoT(), testnet.WaitForHealthy(ctx, node)) + require.NoError(ginkgo.GinkgoT(), tmpnet.WaitForHealthy(ctx, node)) } // Sends an eth transaction, waits for the transaction receipt to be issued @@ -197,7 +197,7 @@ func WithSuggestedGasPrice(ethClient ethclient.Client) common.Option { } // Verify that a new node can bootstrap into the network. -func CheckBootstrapIsPossible(network testnet.Network) { +func CheckBootstrapIsPossible(network tmpnet.Network) { require := require.New(ginkgo.GinkgoT()) if len(os.Getenv(SkipBootstrapChecksEnvName)) > 0 { @@ -210,7 +210,7 @@ func CheckBootstrapIsPossible(network testnet.Network) { // checking for bootstrap implicitly on teardown via a function registered // with ginkgo.DeferCleanup. It's not possible to call DeferCleanup from // within a function called by DeferCleanup. - node, err := network.AddEphemeralNode(ginkgo.GinkgoWriter, testnet.FlagsMap{}) + node, err := network.AddEphemeralNode(ginkgo.GinkgoWriter, tmpnet.FlagsMap{}) require.NoError(err) defer func() { @@ -234,8 +234,8 @@ func StartLocalNetwork(avalancheGoExecPath string, networkDir string) *local.Loc ExecPath: avalancheGoExecPath, }, }, - testnet.DefaultNodeCount, - testnet.DefaultFundedKeyCount, + tmpnet.DefaultNodeCount, + tmpnet.DefaultFundedKeyCount, ) require.NoError(err) ginkgo.DeferCleanup(func() { diff --git a/tests/fixture/testnet/README.md b/tests/fixture/testnet/README.md deleted file mode 100644 index ef2e5fb4df75..000000000000 --- a/tests/fixture/testnet/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Test Network Fixture - -This package contains configuration and interfaces that are -independent of a given orchestration mechanism -(e.g. [local](local/README.md)). The intent is to enable tests to be -written against the interfaces defined in this package and for -implementation-specific details of test network orchestration to be -limited to test setup and teardown. diff --git a/tests/fixture/tmpnet/README.md b/tests/fixture/tmpnet/README.md new file mode 100644 index 000000000000..ca48d553105e --- /dev/null +++ b/tests/fixture/tmpnet/README.md @@ -0,0 +1,20 @@ +# tmpnet (temporary network fixture) + +This package contains configuration and interfaces that are +independent of a given orchestration mechanism +(e.g. [local](local/README.md)). The intent is to enable tests to be +written against the interfaces defined in this package and for +implementation-specific details of test network orchestration to be +limited to test setup and teardown. + +## What's in a name? + +The name of this package was originally `testnet` and its cli was +`testnetctl`. This name was chosen in ignorance that `testnet` +commonly refers to a persistent blockchain network used for testing. + +To avoid confusion, the name was changed to `tmpnet` and its cli +`tmpnetctl`. `tmpnet` is short for `temporary network` since the +networks it deploys are likely to live for a limited duration in +support of the development and testing of avalanchego and its related +repositories. diff --git a/tests/fixture/testnet/cmd/main.go b/tests/fixture/tmpnet/cmd/main.go similarity index 86% rename from tests/fixture/testnet/cmd/main.go rename to tests/fixture/tmpnet/cmd/main.go index 92dc846edca8..a9f5c1865291 100644 --- a/tests/fixture/testnet/cmd/main.go +++ b/tests/fixture/tmpnet/cmd/main.go @@ -13,8 +13,8 @@ import ( "github.com/spf13/cobra" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" - "github.com/ava-labs/avalanchego/tests/fixture/testnet/local" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet/local" "github.com/ava-labs/avalanchego/version" ) @@ -27,8 +27,8 @@ var ( func main() { rootCmd := &cobra.Command{ - Use: "testnetctl", - Short: "testnetctl commands", + Use: "tmpnetctl", + Short: "tmpnetctl commands", } versionCmd := &cobra.Command{ @@ -84,7 +84,7 @@ func main() { return err } - fmt.Fprintf(os.Stdout, "\nConfigure testnetctl to target this network by default with one of the following statements:") + fmt.Fprintf(os.Stdout, "\nConfigure tmpnetctl to target this network by default with one of the following statements:") fmt.Fprintf(os.Stdout, "\n - source %s\n", network.EnvFilePath()) fmt.Fprintf(os.Stdout, " - %s\n", network.EnvFileContents()) fmt.Fprintf(os.Stdout, " - export %s=%s\n", local.NetworkDirEnvName, latestSymlinkPath) @@ -94,8 +94,8 @@ func main() { } startNetworkCmd.PersistentFlags().StringVar(&rootDir, "root-dir", os.Getenv(local.RootDirEnvName), "The path to the root directory for local networks") startNetworkCmd.PersistentFlags().StringVar(&execPath, "avalanchego-path", os.Getenv(local.AvalancheGoPathEnvName), "The path to an avalanchego binary") - startNetworkCmd.PersistentFlags().Uint8Var(&nodeCount, "node-count", testnet.DefaultNodeCount, "Number of nodes the network should initially consist of") - startNetworkCmd.PersistentFlags().Uint8Var(&fundedKeyCount, "funded-key-count", testnet.DefaultFundedKeyCount, "Number of funded keys the network should start with") + startNetworkCmd.PersistentFlags().Uint8Var(&nodeCount, "node-count", tmpnet.DefaultNodeCount, "Number of nodes the network should initially consist of") + startNetworkCmd.PersistentFlags().Uint8Var(&fundedKeyCount, "funded-key-count", tmpnet.DefaultFundedKeyCount, "Number of funded keys the network should start with") rootCmd.AddCommand(startNetworkCmd) var networkDir string @@ -117,7 +117,7 @@ func main() { rootCmd.AddCommand(stopNetworkCmd) if err := rootCmd.Execute(); err != nil { - fmt.Fprintf(os.Stderr, "testnetctl failed: %v\n", err) + fmt.Fprintf(os.Stderr, "tmpnetctl failed: %v\n", err) os.Exit(1) } os.Exit(0) diff --git a/tests/fixture/testnet/common.go b/tests/fixture/tmpnet/common.go similarity index 98% rename from tests/fixture/testnet/common.go rename to tests/fixture/tmpnet/common.go index ab983e893e46..4b0281f45242 100644 --- a/tests/fixture/testnet/common.go +++ b/tests/fixture/tmpnet/common.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package testnet +package tmpnet import ( "context" diff --git a/tests/fixture/testnet/config.go b/tests/fixture/tmpnet/config.go similarity index 99% rename from tests/fixture/testnet/config.go rename to tests/fixture/tmpnet/config.go index 425aa646a690..f504eb84d20d 100644 --- a/tests/fixture/testnet/config.go +++ b/tests/fixture/tmpnet/config.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package testnet +package tmpnet import ( "encoding/base64" diff --git a/tests/fixture/testnet/interfaces.go b/tests/fixture/tmpnet/interfaces.go similarity index 97% rename from tests/fixture/testnet/interfaces.go rename to tests/fixture/tmpnet/interfaces.go index 2c1479ec48bd..2fd03cbc2a98 100644 --- a/tests/fixture/testnet/interfaces.go +++ b/tests/fixture/tmpnet/interfaces.go @@ -1,7 +1,7 @@ // Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package testnet +package tmpnet import ( "context" diff --git a/tests/fixture/testnet/local/README.md b/tests/fixture/tmpnet/local/README.md similarity index 86% rename from tests/fixture/testnet/local/README.md rename to tests/fixture/tmpnet/local/README.md index fdfbbdb4d58b..91af35a9b805 100644 --- a/tests/fixture/testnet/local/README.md +++ b/tests/fixture/tmpnet/local/README.md @@ -4,7 +4,7 @@ This package implements a simple orchestrator for the avalanchego nodes of a local network. Configuration is stored on disk, and nodes run as independent processes whose process details are also written to disk. Using the filesystem to store configuration and process details -allows for the `testnetctl` cli and e2e test fixture to orchestrate +allows for the `tmpnetctl` cli and e2e test fixture to orchestrate the same local networks without the use of an rpc daemon. ## Package details @@ -27,36 +27,36 @@ abstractions. ## Usage -### Via testnetctl +### Via tmpnetctl -A local network can be managed by the `testnetctl` cli tool: +A local network can be managed by the `tmpnetctl` cli tool: ```bash # From the root of the avalanchego repo -# Build the testnetctl binary -$ ./scripts/build_testnetctl.sh +# Build the tmpnetctl binary +$ ./scripts/build_tmpnetctl.sh # Start a new network -$ ./build/testnetctl start-network --avalanchego-path=/path/to/avalanchego +$ ./build/tmpnetctl start-network --avalanchego-path=/path/to/avalanchego ... -Started network 1000 @ /home/me/.testnetctl/networks/1000 +Started network 1000 @ /home/me/.tmpnet/networks/1000 -Configure testnetctl to target this network by default with one of the following statements: - - source /home/me/.testnetctl/networks/1000/network.env - - export TESTNETCTL_NETWORK_DIR=/home/me/.testnetctl/networks/1000 - - export TESTNETCTL_NETWORK_DIR=/home/me/.testnetctl/networks/latest +Configure tmpnetctl to target this network by default with one of the following statements: + - source /home/me/.tmpnet/networks/1000/network.env + - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/1000 + - export TMPNET_NETWORK_DIR=/home/me/.tmpnet/networks/latest # Stop the network -$ ./build/testnetctl stop-network --network-dir=/path/to/network +$ ./build/tmpnetctl stop-network --network-dir=/path/to/network ``` Note the export of the path ending in `latest`. This is a symlink that -set to the last network created by `testnetctl start-network`. Setting -the `TESTNETCTL_NETWORK_DIR` env var to this symlink ensures that -`testnetctl` commands and e2e execution with -`--use-persistent-network` will target the most recently deployed -local network. +is set to the last network created by `tmpnetctl start-network`. Setting +the `TMPNET_NETWORK_DIR` env var to this symlink ensures that +`tmpnetctl` commands and e2e execution with +`--use-existing-network` will target the most recently deployed local +network. ### Via code @@ -66,7 +66,7 @@ A local network can be managed in code: network, _ := local.StartNetwork( ctx, // Context used to limit duration of waiting for network health ginkgo.GinkgoWriter, // Writer to report progress of network start - "", // Use default root dir (~/.testnetctl) + "", // Use default root dir (~/.tmpnet) &local.LocalNetwork{ LocalConfig: local.LocalConfig{ ExecPath: "/path/to/avalanchego", // Defining the avalanchego exec path is required @@ -121,7 +121,7 @@ tests](../../../e2e/e2e_test.go). By default, nodes in a local network will be started with staking and API ports set to `0` to ensure that ports will be dynamically -chosen. The testnet fixture discovers the ports used by a given node +chosen. The tmpnet fixture discovers the ports used by a given node by reading the `[base-data-dir]/process.json` file written by avalanchego on node start. The use of dynamic ports supports testing with many local networks without having to manually select compatible @@ -133,7 +133,7 @@ A local network relies on configuration written to disk in the following structu ``` HOME -└── .testnetctl // Root path for tool +└── .tmpnet // Root path for the temporary network fixture └── networks // Default parent directory for local networks └── 1000 // The networkID is used to name the network dir and starts at 1000 ├── NodeID-37E8UK3x2YFsHE3RdALmfWcppcZ1eTuj9 // The ID of a node is the name of its data dir @@ -189,10 +189,10 @@ TODO(marun) Enable configuration of X-Chain and P-Chain. ### Network env -A shell script that sets the `TESTNETCTL_NETWORK_DIR` env var to the +A shell script that sets the `TMPNET_NETWORK_DIR` env var to the path of the network is stored at `[network-dir]/network.env`. Sourcing this file (i.e. `source network.env`) in a shell will configure ginkgo -e2e and the `testnetctl` cli to target the network path specified in +e2e and the `tmpnetctl` cli to target the network path specified in the env var. ### Node configuration diff --git a/tests/fixture/testnet/local/config.go b/tests/fixture/tmpnet/local/config.go similarity index 79% rename from tests/fixture/testnet/local/config.go rename to tests/fixture/tmpnet/local/config.go index 317975a2395c..70ef9a443185 100644 --- a/tests/fixture/testnet/local/config.go +++ b/tests/fixture/tmpnet/local/config.go @@ -7,15 +7,15 @@ import ( "time" "github.com/ava-labs/avalanchego/config" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" ) const ( // Constants defining the names of shell variables whose value can // configure local network orchestration. AvalancheGoPathEnvName = "AVALANCHEGO_PATH" - NetworkDirEnvName = "TESTNETCTL_NETWORK_DIR" - RootDirEnvName = "TESTNETCTL_ROOT_DIR" + NetworkDirEnvName = "TMPNET_NETWORK_DIR" + RootDirEnvName = "TMPNET_ROOT_DIR" DefaultNetworkStartTimeout = 2 * time.Minute DefaultNodeInitTimeout = 10 * time.Second @@ -23,9 +23,9 @@ const ( ) // A set of flags appropriate for local testing. -func LocalFlags() testnet.FlagsMap { +func LocalFlags() tmpnet.FlagsMap { // Supply only non-default configuration to ensure that default values will be used. - return testnet.FlagsMap{ + return tmpnet.FlagsMap{ config.NetworkPeerListGossipFreqKey: "250ms", config.NetworkMaxReconnectDelayKey: "1s", config.PublicIPKey: "127.0.0.1", @@ -37,16 +37,16 @@ func LocalFlags() testnet.FlagsMap { config.IndexEnabledKey: true, config.LogDisplayLevelKey: "INFO", config.LogLevelKey: "DEBUG", - config.MinStakeDurationKey: testnet.DefaultMinStakeDuration.String(), + config.MinStakeDurationKey: tmpnet.DefaultMinStakeDuration.String(), } } // C-Chain config for local testing. -func LocalCChainConfig() testnet.FlagsMap { +func LocalCChainConfig() tmpnet.FlagsMap { // Supply only non-default configuration to ensure that default // values will be used. Available C-Chain configuration options are // defined in the `github.com/ava-labs/coreth/evm` package. - return testnet.FlagsMap{ + return tmpnet.FlagsMap{ "log-level": "trace", } } diff --git a/tests/fixture/testnet/local/network.go b/tests/fixture/tmpnet/local/network.go similarity index 95% rename from tests/fixture/testnet/local/network.go rename to tests/fixture/tmpnet/local/network.go index 836a1489c2dd..70411d4afcde 100644 --- a/tests/fixture/testnet/local/network.go +++ b/tests/fixture/tmpnet/local/network.go @@ -18,7 +18,7 @@ import ( "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/genesis" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/perms" @@ -48,7 +48,7 @@ func GetDefaultRootDir() (string, error) { if err != nil { return "", err } - return filepath.Join(homeDir, ".testnetctl", "networks"), nil + return filepath.Join(homeDir, ".tmpnet", "networks"), nil } // Find the next available network ID by attempting to create a @@ -83,7 +83,7 @@ func FindNextNetworkID(rootDir string) (uint32, string, error) { // Defines the configuration required for a local network (i.e. one composed of local processes). type LocalNetwork struct { - testnet.NetworkConfig + tmpnet.NetworkConfig LocalConfig // Nodes with local configuration @@ -94,13 +94,13 @@ type LocalNetwork struct { } // Returns the configuration of the network in backend-agnostic form. -func (ln *LocalNetwork) GetConfig() testnet.NetworkConfig { +func (ln *LocalNetwork) GetConfig() tmpnet.NetworkConfig { return ln.NetworkConfig } // Returns the nodes of the network in backend-agnostic form. -func (ln *LocalNetwork) GetNodes() []testnet.Node { - nodes := make([]testnet.Node, 0, len(ln.Nodes)) +func (ln *LocalNetwork) GetNodes() []tmpnet.Node { + nodes := make([]tmpnet.Node, 0, len(ln.Nodes)) for _, node := range ln.Nodes { nodes = append(nodes, node) } @@ -108,12 +108,12 @@ func (ln *LocalNetwork) GetNodes() []testnet.Node { } // Adds a backend-agnostic ephemeral node to the network -func (ln *LocalNetwork) AddEphemeralNode(w io.Writer, flags testnet.FlagsMap) (testnet.Node, error) { +func (ln *LocalNetwork) AddEphemeralNode(w io.Writer, flags tmpnet.FlagsMap) (tmpnet.Node, error) { if flags == nil { - flags = testnet.FlagsMap{} + flags = tmpnet.FlagsMap{} } return ln.AddLocalNode(w, &LocalNode{ - NodeConfig: testnet.NodeConfig{ + NodeConfig: tmpnet.NodeConfig{ Flags: flags, }, }, true /* isEphemeral */) @@ -298,7 +298,7 @@ func (ln *LocalNetwork) PopulateNodeConfig(node *LocalNode, nodeParentDir string // Set values common to all nodes flags.SetDefaults(ln.DefaultFlags) - flags.SetDefaults(testnet.FlagsMap{ + flags.SetDefaults(tmpnet.FlagsMap{ config.GenesisFileKey: ln.GetGenesisPath(), config.ChainConfigDirKey: ln.GetChainConfigDir(), }) @@ -385,7 +385,7 @@ func (ln *LocalNetwork) WaitForHealthy(ctx context.Context, w io.Writer) error { } healthy, err := node.IsHealthy(ctx) - if err != nil && !errors.Is(err, testnet.ErrNotRunning) { + if err != nil && !errors.Is(err, tmpnet.ErrNotRunning) { return err } if !healthy { @@ -409,14 +409,14 @@ func (ln *LocalNetwork) WaitForHealthy(ctx context.Context, w io.Writer) error { // Retrieve API URIs for all running primary validator nodes. URIs for // ephemeral nodes are not returned. -func (ln *LocalNetwork) GetURIs() []testnet.NodeURI { - uris := make([]testnet.NodeURI, 0, len(ln.Nodes)) +func (ln *LocalNetwork) GetURIs() []tmpnet.NodeURI { + uris := make([]tmpnet.NodeURI, 0, len(ln.Nodes)) for _, node := range ln.Nodes { // Only append URIs that are not empty. A node may have an // empty URI if it was not running at the time // node.ReadProcessContext() was called. if len(node.URI) > 0 { - uris = append(uris, testnet.NodeURI{ + uris = append(uris, tmpnet.NodeURI{ NodeID: node.NodeID, URI: node.URI, }) @@ -458,7 +458,7 @@ func (ln *LocalNetwork) ReadGenesis() error { } func (ln *LocalNetwork) WriteGenesis() error { - bytes, err := testnet.DefaultJSONMarshal(ln.Genesis) + bytes, err := tmpnet.DefaultJSONMarshal(ln.Genesis) if err != nil { return fmt.Errorf("failed to marshal genesis: %w", err) } @@ -477,7 +477,7 @@ func (ln *LocalNetwork) GetCChainConfigPath() string { } func (ln *LocalNetwork) ReadCChainConfig() error { - chainConfig, err := testnet.ReadFlagsMap(ln.GetCChainConfigPath(), "C-Chain config") + chainConfig, err := tmpnet.ReadFlagsMap(ln.GetCChainConfigPath(), "C-Chain config") if err != nil { return err } @@ -496,7 +496,7 @@ func (ln *LocalNetwork) WriteCChainConfig() error { // Used to marshal/unmarshal persistent local network defaults. type localDefaults struct { - Flags testnet.FlagsMap + Flags tmpnet.FlagsMap ExecPath string FundedKeys []*secp256k1.PrivateKey } @@ -526,7 +526,7 @@ func (ln *LocalNetwork) WriteDefaults() error { ExecPath: ln.ExecPath, FundedKeys: ln.FundedKeys, } - bytes, err := testnet.DefaultJSONMarshal(defaults) + bytes, err := tmpnet.DefaultJSONMarshal(defaults) if err != nil { return fmt.Errorf("failed to marshal defaults: %w", err) } diff --git a/tests/fixture/testnet/local/network_test.go b/tests/fixture/tmpnet/local/network_test.go similarity index 100% rename from tests/fixture/testnet/local/network_test.go rename to tests/fixture/tmpnet/local/network_test.go diff --git a/tests/fixture/testnet/local/node.go b/tests/fixture/tmpnet/local/node.go similarity index 95% rename from tests/fixture/testnet/local/node.go rename to tests/fixture/tmpnet/local/node.go index 2de516825677..908d3fd5f474 100644 --- a/tests/fixture/testnet/local/node.go +++ b/tests/fixture/tmpnet/local/node.go @@ -23,7 +23,7 @@ import ( "github.com/ava-labs/avalanchego/config" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/node" - "github.com/ava-labs/avalanchego/tests/fixture/testnet" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" "github.com/ava-labs/avalanchego/utils/perms" ) @@ -42,7 +42,7 @@ type LocalConfig struct { // Stores the configuration and process details of a node in a local network. type LocalNode struct { - testnet.NodeConfig + tmpnet.NodeConfig LocalConfig node.NodeProcessContext @@ -51,8 +51,8 @@ type LocalNode struct { func NewLocalNode(dataDir string) *LocalNode { return &LocalNode{ - NodeConfig: testnet.NodeConfig{ - Flags: testnet.FlagsMap{ + NodeConfig: tmpnet.NodeConfig{ + Flags: tmpnet.FlagsMap{ config.DataDirKey: dataDir, }, }, @@ -76,7 +76,7 @@ func (n *LocalNode) GetID() ids.NodeID { } // Retrieve backend-agnostic node configuration. -func (n *LocalNode) GetConfig() testnet.NodeConfig { +func (n *LocalNode) GetConfig() tmpnet.NodeConfig { return n.NodeConfig } @@ -98,11 +98,11 @@ func (n *LocalNode) ReadConfig() error { if err != nil { return fmt.Errorf("failed to read local node config: %w", err) } - flags := testnet.FlagsMap{} + flags := tmpnet.FlagsMap{} if err := json.Unmarshal(bytes, &flags); err != nil { return fmt.Errorf("failed to unmarshal local node config: %w", err) } - config := testnet.NodeConfig{Flags: flags} + config := tmpnet.NodeConfig{Flags: flags} if err := config.EnsureNodeID(); err != nil { return err } @@ -115,7 +115,7 @@ func (n *LocalNode) WriteConfig() error { return fmt.Errorf("failed to create node dir: %w", err) } - bytes, err := testnet.DefaultJSONMarshal(n.Flags) + bytes, err := tmpnet.DefaultJSONMarshal(n.Flags) if err != nil { return fmt.Errorf("failed to marshal local node config: %w", err) } @@ -265,7 +265,7 @@ func (n *LocalNode) Stop() error { } // Wait for the node process to stop - ticker := time.NewTicker(testnet.DefaultNodeTickerInterval) + ticker := time.NewTicker(tmpnet.DefaultNodeTickerInterval) defer ticker.Stop() ctx, cancel := context.WithTimeout(context.Background(), DefaultNodeStopTimeout) defer cancel() @@ -295,7 +295,7 @@ func (n *LocalNode) IsHealthy(ctx context.Context) (bool, error) { return false, fmt.Errorf("failed to determine process status: %w", err) } if proc == nil { - return false, testnet.ErrNotRunning + return false, tmpnet.ErrNotRunning } // Check that the node is reporting healthy @@ -321,7 +321,7 @@ func (n *LocalNode) IsHealthy(ctx context.Context) (bool, error) { } func (n *LocalNode) WaitForProcessContext(ctx context.Context) error { - ticker := time.NewTicker(testnet.DefaultNodeTickerInterval) + ticker := time.NewTicker(tmpnet.DefaultNodeTickerInterval) defer ticker.Stop() ctx, cancel := context.WithTimeout(ctx, DefaultNodeInitTimeout) diff --git a/tests/fixture/testnet/local/node_test.go b/tests/fixture/tmpnet/local/node_test.go similarity index 100% rename from tests/fixture/testnet/local/node_test.go rename to tests/fixture/tmpnet/local/node_test.go diff --git a/utils/sampler/weighted_array.go b/utils/sampler/weighted_array.go index 0db1dda17af9..b2dcb593b79b 100644 --- a/utils/sampler/weighted_array.go +++ b/utils/sampler/weighted_array.go @@ -19,8 +19,8 @@ type weightedArrayElement struct { } // Note that this sorts in order of decreasing weight. -func (e weightedArrayElement) Less(other weightedArrayElement) bool { - return e.cumulativeWeight > other.cumulativeWeight +func (e weightedArrayElement) Compare(other weightedArrayElement) int { + return utils.Compare(other.cumulativeWeight, e.cumulativeWeight) } // Sampling is performed by executing a modified binary search over the provided diff --git a/utils/sampler/weighted_array_test.go b/utils/sampler/weighted_array_test.go index e10583633436..0f44869b7dee 100644 --- a/utils/sampler/weighted_array_test.go +++ b/utils/sampler/weighted_array_test.go @@ -4,24 +4,39 @@ package sampler import ( + "fmt" "testing" "github.com/stretchr/testify/require" ) -func TestWeightedArrayElementLess(t *testing.T) { - require := require.New(t) - - var elt1, elt2 weightedArrayElement - require.False(elt1.Less(elt2)) - require.False(elt2.Less(elt1)) - - elt1 = weightedArrayElement{ - cumulativeWeight: 1, +func TestWeightedArrayElementCompare(t *testing.T) { + tests := []struct { + a weightedArrayElement + b weightedArrayElement + expected int + }{ + { + a: weightedArrayElement{}, + b: weightedArrayElement{}, + expected: 0, + }, + { + a: weightedArrayElement{ + cumulativeWeight: 1, + }, + b: weightedArrayElement{ + cumulativeWeight: 2, + }, + expected: 1, + }, } - elt2 = weightedArrayElement{ - cumulativeWeight: 2, + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d_%d", test.a.cumulativeWeight, test.b.cumulativeWeight, test.expected), func(t *testing.T) { + require := require.New(t) + + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.False(elt1.Less(elt2)) - require.True(elt2.Less(elt1)) } diff --git a/utils/sampler/weighted_heap.go b/utils/sampler/weighted_heap.go index 4b7fb84df482..ef5a8feb6e9f 100644 --- a/utils/sampler/weighted_heap.go +++ b/utils/sampler/weighted_heap.go @@ -19,17 +19,16 @@ type weightedHeapElement struct { index int } -func (e weightedHeapElement) Less(other weightedHeapElement) bool { +// Compare the elements. Weight is in decreasing order. Index is in increasing +// order. +func (e weightedHeapElement) Compare(other weightedHeapElement) int { // By accounting for the initial index of the weights, this results in a // stable sort. We do this rather than using `sort.Stable` because of the // reported change in performance of the sort used. - if e.weight > other.weight { - return true + if weightCmp := utils.Compare(other.weight, e.weight); weightCmp != 0 { + return weightCmp } - if e.weight < other.weight { - return false - } - return e.index < other.index + return utils.Compare(e.index, other.index) } // Sampling is performed by executing a search over a tree of elements in the diff --git a/utils/sampler/weighted_heap_test.go b/utils/sampler/weighted_heap_test.go index 3187c14fa10a..eb9ff46ab276 100644 --- a/utils/sampler/weighted_heap_test.go +++ b/utils/sampler/weighted_heap_test.go @@ -23,57 +23,44 @@ func TestWeightedHeapInitialize(t *testing.T) { } } -func TestWeightedHeapElementLess(t *testing.T) { +func TestWeightedHeapElementCompare(t *testing.T) { type test struct { name string elt1 weightedHeapElement elt2 weightedHeapElement - expected bool + expected int } tests := []test{ { name: "all same", elt1: weightedHeapElement{}, elt2: weightedHeapElement{}, - expected: false, + expected: 0, }, { - name: "first lower weight", + name: "lower weight", elt1: weightedHeapElement{}, elt2: weightedHeapElement{ weight: 1, }, - expected: false, + expected: 1, }, { - name: "first higher weight", - elt1: weightedHeapElement{ - weight: 1, - }, - elt2: weightedHeapElement{}, - expected: true, - }, - { - name: "first higher index", + name: "higher index", elt1: weightedHeapElement{ index: 1, }, elt2: weightedHeapElement{}, - expected: false, - }, - { - name: "second higher index", - elt1: weightedHeapElement{}, - elt2: weightedHeapElement{ - index: 1, - }, - expected: true, + expected: 1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, tt.elt1.Less(tt.elt2)) + require := require.New(t) + + require.Equal(tt.expected, tt.elt1.Compare(tt.elt2)) + require.Equal(-tt.expected, tt.elt2.Compare(tt.elt1)) }) } } diff --git a/utils/sampler/weighted_linear.go b/utils/sampler/weighted_linear.go index a5d0e3b16711..268ba9ea2652 100644 --- a/utils/sampler/weighted_linear.go +++ b/utils/sampler/weighted_linear.go @@ -19,8 +19,8 @@ type weightedLinearElement struct { } // Note that this sorts in order of decreasing cumulative weight. -func (e weightedLinearElement) Less(other weightedLinearElement) bool { - return e.cumulativeWeight > other.cumulativeWeight +func (e weightedLinearElement) Compare(other weightedLinearElement) int { + return utils.Compare(other.cumulativeWeight, e.cumulativeWeight) } // Sampling is performed by executing a linear search over the provided elements diff --git a/utils/sampler/weighted_linear_test.go b/utils/sampler/weighted_linear_test.go index b34035017b4a..6757158ed8e6 100644 --- a/utils/sampler/weighted_linear_test.go +++ b/utils/sampler/weighted_linear_test.go @@ -4,24 +4,39 @@ package sampler import ( + "fmt" "testing" "github.com/stretchr/testify/require" ) -func TestWeightedLinearElementLess(t *testing.T) { - require := require.New(t) - - var elt1, elt2 weightedLinearElement - require.False(elt1.Less(elt2)) - require.False(elt2.Less(elt1)) - - elt1 = weightedLinearElement{ - cumulativeWeight: 1, +func TestWeightedLinearElementCompare(t *testing.T) { + tests := []struct { + a weightedLinearElement + b weightedLinearElement + expected int + }{ + { + a: weightedLinearElement{}, + b: weightedLinearElement{}, + expected: 0, + }, + { + a: weightedLinearElement{ + cumulativeWeight: 1, + }, + b: weightedLinearElement{ + cumulativeWeight: 2, + }, + expected: 1, + }, } - elt2 = weightedLinearElement{ - cumulativeWeight: 2, + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d_%d", test.a.cumulativeWeight, test.b.cumulativeWeight, test.expected), func(t *testing.T) { + require := require.New(t) + + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.False(elt1.Less(elt2)) - require.True(elt2.Less(elt1)) } diff --git a/utils/set/sampleable_set.go b/utils/set/sampleable_set.go index 0d22f40fbf8a..8c4c02461bd9 100644 --- a/utils/set/sampleable_set.go +++ b/utils/set/sampleable_set.go @@ -176,7 +176,7 @@ func (s *SampleableSet[_]) MarshalJSON() ([]byte, error) { } } // Sort for determinism - utils.SortBytes(elementBytes) + slices.SortFunc(elementBytes, bytes.Compare) // Build the JSON var ( diff --git a/utils/set/set.go b/utils/set/set.go index 29eb8fe11bd4..41d8bb712f10 100644 --- a/utils/set/set.go +++ b/utils/set/set.go @@ -9,6 +9,7 @@ import ( stdjson "encoding/json" "golang.org/x/exp/maps" + "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/json" @@ -118,27 +119,6 @@ func (s Set[T]) List() []T { return maps.Keys(s) } -// CappedList returns a list of length at most [size]. -// Size should be >= 0. If size < 0, returns nil. -func (s Set[T]) CappedList(size int) []T { - if size < 0 { - return nil - } - if l := s.Len(); l < size { - size = l - } - i := 0 - elts := make([]T, size) - for elt := range s { - if i >= size { - break - } - elts[i] = elt - i++ - } - return elts -} - // Equals returns true if the sets contain the same elements func (s Set[T]) Equals(other Set[T]) bool { return maps.Equal(s, other) @@ -182,7 +162,7 @@ func (s Set[_]) MarshalJSON() ([]byte, error) { i++ } // Sort for determinism - utils.SortBytes(eltBytes) + slices.SortFunc(eltBytes, bytes.Compare) // Build the JSON var ( @@ -205,7 +185,7 @@ func (s Set[_]) MarshalJSON() ([]byte, error) { return jsonBuf.Bytes(), errs.Err } -// Returns an element. If the set is empty, returns false +// Returns a random element. If the set is empty, returns false func (s *Set[T]) Peek() (T, bool) { for elt := range *s { return elt, true diff --git a/utils/set/set_test.go b/utils/set/set_test.go index bcba36944adf..4e0a3d1fa3ed 100644 --- a/utils/set/set_test.go +++ b/utils/set/set_test.go @@ -87,35 +87,6 @@ func TestOf(t *testing.T) { } } -func TestSetCappedList(t *testing.T) { - require := require.New(t) - s := Set[int]{} - - id := 0 - - require.Empty(s.CappedList(0)) - - s.Add(id) - - require.Empty(s.CappedList(0)) - require.Len(s.CappedList(1), 1) - require.Equal(s.CappedList(1)[0], id) - require.Len(s.CappedList(2), 1) - require.Equal(s.CappedList(2)[0], id) - - id2 := 1 - s.Add(id2) - - require.Empty(s.CappedList(0)) - require.Len(s.CappedList(1), 1) - require.Len(s.CappedList(2), 2) - require.Len(s.CappedList(3), 2) - gotList := s.CappedList(2) - require.Contains(gotList, id) - require.Contains(gotList, id2) - require.NotEqual(gotList[0], gotList[1]) -} - func TestSetClear(t *testing.T) { require := require.New(t) diff --git a/utils/sorting.go b/utils/sorting.go index 74f24abeb69f..cbb982a7f63b 100644 --- a/utils/sorting.go +++ b/utils/sorting.go @@ -12,32 +12,23 @@ import ( "github.com/ava-labs/avalanchego/utils/hashing" ) -// TODO can we handle sorting where the Less function relies on a codec? +// TODO can we handle sorting where the Compare function relies on a codec? type Sortable[T any] interface { - Less(T) bool + Compare(T) int } // Sorts the elements of [s]. func Sort[T Sortable[T]](s []T) { - slices.SortFunc(s, T.Less) + slices.SortFunc(s, T.Compare) } // Sorts the elements of [s] based on their hashes. func SortByHash[T ~[]byte](s []T) { - slices.SortFunc(s, func(i, j T) bool { + slices.SortFunc(s, func(i, j T) int { iHash := hashing.ComputeHash256(i) jHash := hashing.ComputeHash256(j) - return bytes.Compare(iHash, jHash) == -1 - }) -} - -// Sorts a 2D byte slice. -// Each byte slice is not sorted internally; the byte slices are sorted relative -// to one another. -func SortBytes[T ~[]byte](s []T) { - slices.SortFunc(s, func(i, j T) bool { - return bytes.Compare(i, j) == -1 + return bytes.Compare(iHash, jHash) }) } @@ -54,7 +45,7 @@ func IsSortedBytes[T ~[]byte](s []T) bool { // Returns true iff the elements in [s] are unique and sorted. func IsSortedAndUnique[T Sortable[T]](s []T) bool { for i := 0; i < len(s)-1; i++ { - if !s[i].Less(s[i+1]) { + if s[i].Compare(s[i+1]) >= 0 { return false } } @@ -87,3 +78,21 @@ func IsSortedAndUniqueByHash[T ~[]byte](s []T) bool { } return true } + +// Compare returns +// +// -1 if x is less than y, +// 0 if x equals y, +// 1 if x is greater than y. +// +// TODO: Remove after updating to go1.21. +func Compare[T constraints.Ordered](x, y T) int { + switch { + case x < y: + return -1 + case x > y: + return 1 + default: + return 0 + } +} diff --git a/utils/sorting_test.go b/utils/sorting_test.go index 464959dd9588..019834907686 100644 --- a/utils/sorting_test.go +++ b/utils/sorting_test.go @@ -4,9 +4,7 @@ package utils import ( - "math/rand" "testing" - "time" "github.com/stretchr/testify/require" ) @@ -15,8 +13,15 @@ var _ Sortable[sortable] = sortable(0) type sortable int -func (s sortable) Less(other sortable) bool { - return s < other +func (s sortable) Compare(other sortable) int { + switch { + case s < other: + return -1 + case s > other: + return 1 + default: + return 0 + } } func TestSortSliceSortable(t *testing.T) { @@ -59,23 +64,6 @@ func TestSortSliceSortable(t *testing.T) { require.Equal([]sortable{1, 2, 3}, s) } -func TestSortBytesIsSortedBytes(t *testing.T) { - require := require.New(t) - - seed := time.Now().UnixNano() - t.Log("Seed: ", seed) - rand := rand.New(rand.NewSource(seed)) //#nosec G404 - - slices := make([][]byte, 1024) - for j := 0; j < len(slices); j++ { - slices[j] = make([]byte, 32) - _, _ = rand.Read(slices[j]) - } - require.False(IsSortedBytes(slices)) - SortBytes(slices) - require.True(IsSortedBytes(slices)) -} - func TestIsSortedAndUniqueSortable(t *testing.T) { require := require.New(t) diff --git a/version/compatibility.json b/version/compatibility.json index c3b35ed515ec..d34dfb1a5a28 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -1,7 +1,8 @@ { "30": [ "v1.10.15", - "v1.10.16" + "v1.10.16", + "v1.10.17" ], "29": [ "v1.10.13", diff --git a/version/constants.go b/version/constants.go index a9bc3d2a6f91..f7c27f641395 100644 --- a/version/constants.go +++ b/version/constants.go @@ -22,7 +22,7 @@ var ( Current = &Semantic{ Major: 1, Minor: 10, - Patch: 16, + Patch: 17, } CurrentApp = &Application{ Major: Current.Major, diff --git a/vms/avm/block/executor/manager.go b/vms/avm/block/executor/manager.go index 48eea701bbd9..4ee6c37046ec 100644 --- a/vms/avm/block/executor/manager.go +++ b/vms/avm/block/executor/manager.go @@ -43,8 +43,8 @@ type Manager interface { // preferred state. This should *not* be used to verify transactions in a block. VerifyTx(tx *txs.Tx) error - // VerifyUniqueInputs verifies that the inputs are not duplicated in the - // provided blk or any of its ancestors pinned in memory. + // VerifyUniqueInputs returns nil iff no blocks in the inclusive + // ancestry of [blkID] consume an input in [inputs]. VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error } diff --git a/vms/avm/genesis.go b/vms/avm/genesis.go index 506d2465d691..20ed5c6ba474 100644 --- a/vms/avm/genesis.go +++ b/vms/avm/genesis.go @@ -19,6 +19,6 @@ type GenesisAsset struct { txs.CreateAssetTx `serialize:"true"` } -func (g *GenesisAsset) Less(other *GenesisAsset) bool { - return g.Alias < other.Alias +func (g *GenesisAsset) Compare(other *GenesisAsset) int { + return utils.Compare(g.Alias, other.Alias) } diff --git a/vms/avm/genesis_test.go b/vms/avm/genesis_test.go index 10c7aac40295..8c26e2a13dae 100644 --- a/vms/avm/genesis_test.go +++ b/vms/avm/genesis_test.go @@ -4,24 +4,39 @@ package avm import ( + "fmt" "testing" "github.com/stretchr/testify/require" ) -func TestGenesisAssetLess(t *testing.T) { - require := require.New(t) - - var g1, g2 GenesisAsset - require.False(g1.Less(&g2)) - require.False(g2.Less(&g1)) - - g1 = GenesisAsset{ - Alias: "a", +func TestGenesisAssetCompare(t *testing.T) { + tests := []struct { + a *GenesisAsset + b *GenesisAsset + expected int + }{ + { + a: &GenesisAsset{}, + b: &GenesisAsset{}, + expected: 0, + }, + { + a: &GenesisAsset{ + Alias: "a", + }, + b: &GenesisAsset{ + Alias: "aa", + }, + expected: -1, + }, } - g2 = GenesisAsset{ - Alias: "aa", + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a.Alias, test.b.Alias, test.expected), func(t *testing.T) { + require := require.New(t) + + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.True(g1.Less(&g2)) - require.False(g2.Less(&g1)) } diff --git a/vms/avm/txs/initial_state.go b/vms/avm/txs/initial_state.go index a50c88c0a294..5df1bc3fd8b6 100644 --- a/vms/avm/txs/initial_state.go +++ b/vms/avm/txs/initial_state.go @@ -59,8 +59,8 @@ func (is *InitialState) Verify(c codec.Manager, numFxs int) error { return nil } -func (is *InitialState) Less(other *InitialState) bool { - return is.FxIndex < other.FxIndex +func (is *InitialState) Compare(other *InitialState) int { + return utils.Compare(is.FxIndex, other.FxIndex) } func (is *InitialState) Sort(c codec.Manager) { diff --git a/vms/avm/txs/initial_state_test.go b/vms/avm/txs/initial_state_test.go index f54f54b3b9ed..15cf6eb284e1 100644 --- a/vms/avm/txs/initial_state_test.go +++ b/vms/avm/txs/initial_state_test.go @@ -5,6 +5,7 @@ package txs import ( "errors" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -159,14 +160,31 @@ func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { require.NoError(is.Verify(m, numFxs)) } -func TestInitialStateLess(t *testing.T) { - require := require.New(t) - - var is1, is2 InitialState - require.False(is1.Less(&is2)) - require.False(is2.Less(&is1)) +func TestInitialStateCompare(t *testing.T) { + tests := []struct { + a *InitialState + b *InitialState + expected int + }{ + { + a: &InitialState{}, + b: &InitialState{}, + expected: 0, + }, + { + a: &InitialState{ + FxIndex: 1, + }, + b: &InitialState{}, + expected: 1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%d_%d_%d", test.a.FxIndex, test.b.FxIndex, test.expected), func(t *testing.T) { + require := require.New(t) - is1.FxIndex = 1 - require.False(is1.Less(&is2)) - require.True(is2.Less(&is1)) + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) + } } diff --git a/vms/avm/txs/operation.go b/vms/avm/txs/operation.go index 4b4cb27aa46b..ded7671e618a 100644 --- a/vms/avm/txs/operation.go +++ b/vms/avm/txs/operation.go @@ -48,16 +48,16 @@ type operationAndCodec struct { codec codec.Manager } -func (o *operationAndCodec) Less(other *operationAndCodec) bool { +func (o *operationAndCodec) Compare(other *operationAndCodec) int { oBytes, err := o.codec.Marshal(CodecVersion, o.op) if err != nil { - return false + return 0 } otherBytes, err := o.codec.Marshal(CodecVersion, other.op) if err != nil { - return false + return 0 } - return bytes.Compare(oBytes, otherBytes) == -1 + return bytes.Compare(oBytes, otherBytes) } func SortOperations(ops []*Operation, c codec.Manager) { diff --git a/vms/components/avax/transferables.go b/vms/components/avax/transferables.go index 3134ac68ff4a..44bafeef8d6c 100644 --- a/vms/components/avax/transferables.go +++ b/vms/components/avax/transferables.go @@ -161,8 +161,8 @@ func (in *TransferableInput) Verify() error { } } -func (in *TransferableInput) Less(other *TransferableInput) bool { - return in.UTXOID.Less(&other.UTXOID) +func (in *TransferableInput) Compare(other *TransferableInput) int { + return in.UTXOID.Compare(&other.UTXOID) } type innerSortTransferableInputsWithSigners struct { diff --git a/vms/components/avax/utxo_id.go b/vms/components/avax/utxo_id.go index 26fe8b83fb98..5b81f2f091a9 100644 --- a/vms/components/avax/utxo_id.go +++ b/vms/components/avax/utxo_id.go @@ -91,16 +91,11 @@ func (utxo *UTXOID) Verify() error { } } -func (utxo *UTXOID) Less(other *UTXOID) bool { +func (utxo *UTXOID) Compare(other *UTXOID) int { utxoID, utxoIndex := utxo.InputSource() otherID, otherIndex := other.InputSource() - - switch bytes.Compare(utxoID[:], otherID[:]) { - case -1: - return true - case 0: - return utxoIndex < otherIndex - default: - return false + if txIDComp := bytes.Compare(utxoID[:], otherID[:]); txIDComp != 0 { + return txIDComp } + return utils.Compare(utxoIndex, otherIndex) } diff --git a/vms/components/avax/utxo_id_test.go b/vms/components/avax/utxo_id_test.go index 5652fa1afa69..e21ef620428b 100644 --- a/vms/components/avax/utxo_id_test.go +++ b/vms/components/avax/utxo_id_test.go @@ -50,56 +50,43 @@ func TestUTXOID(t *testing.T) { require.Equal(utxoID.InputID(), newUTXOID.InputID()) } -func TestUTXOIDLess(t *testing.T) { +func TestUTXOIDCompare(t *testing.T) { type test struct { name string id1 UTXOID id2 UTXOID - expected bool + expected int } tests := []*test{ { name: "same", id1: UTXOID{}, id2: UTXOID{}, - expected: false, + expected: 0, }, { - name: "first id smaller", + name: "id smaller", id1: UTXOID{}, id2: UTXOID{ TxID: ids.ID{1}, }, - expected: true, + expected: -1, }, { - name: "first id larger", - id1: UTXOID{ - TxID: ids.ID{1}, - }, - id2: UTXOID{}, - expected: false, - }, - { - name: "first index smaller", + name: "index smaller", id1: UTXOID{}, id2: UTXOID{ OutputIndex: 1, }, - expected: true, - }, - { - name: "first index larger", - id1: UTXOID{ - OutputIndex: 1, - }, - id2: UTXOID{}, - expected: false, + expected: -1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, tt.id1.Less(&tt.id2)) + require := require.New(t) + + require.Equal(tt.expected, tt.id1.Compare(&tt.id2)) + require.Equal(-tt.expected, tt.id2.Compare(&tt.id1)) }) } } diff --git a/vms/platformvm/api/static_service.go b/vms/platformvm/api/static_service.go index ea1cfcb2d63f..7f37b247b34b 100644 --- a/vms/platformvm/api/static_service.go +++ b/vms/platformvm/api/static_service.go @@ -50,30 +50,25 @@ type UTXO struct { } // TODO can we define this on *UTXO? -func (utxo UTXO) Less(other UTXO) bool { - if utxo.Locktime < other.Locktime { - return true - } else if utxo.Locktime > other.Locktime { - return false +func (utxo UTXO) Compare(other UTXO) int { + if locktimeCmp := utils.Compare(utxo.Locktime, other.Locktime); locktimeCmp != 0 { + return locktimeCmp } - - if utxo.Amount < other.Amount { - return true - } else if utxo.Amount > other.Amount { - return false + if amountCmp := utils.Compare(utxo.Amount, other.Amount); amountCmp != 0 { + return amountCmp } utxoAddr, err := bech32ToID(utxo.Address) if err != nil { - return false + return 0 } otherAddr, err := bech32ToID(other.Address) if err != nil { - return false + return 0 } - return utxoAddr.Less(otherAddr) + return utxoAddr.Compare(otherAddr) } // TODO: Refactor APIStaker, APIValidators and merge them together for diff --git a/vms/platformvm/api/static_service_test.go b/vms/platformvm/api/static_service_test.go index 8bcbf4e766db..402a5fc7c766 100644 --- a/vms/platformvm/api/static_service_test.go +++ b/vms/platformvm/api/static_service_test.go @@ -237,7 +237,7 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { require.Len(validators, 3) } -func TestUTXOLess(t *testing.T) { +func TestUTXOCompare(t *testing.T) { var ( smallerAddr = ids.ShortID{} largerAddr = ids.ShortID{1} @@ -251,72 +251,49 @@ func TestUTXOLess(t *testing.T) { name string utxo1 UTXO utxo2 UTXO - expected bool + expected int } tests := []test{ { name: "both empty", utxo1: UTXO{}, utxo2: UTXO{}, - expected: false, + expected: 0, }, { - name: "first locktime smaller", + name: "locktime smaller", utxo1: UTXO{}, utxo2: UTXO{ Locktime: 1, }, - expected: true, + expected: -1, }, { - name: "first locktime larger", - utxo1: UTXO{ - Locktime: 1, - }, - utxo2: UTXO{}, - expected: false, - }, - { - name: "first amount smaller", + name: "amount smaller", utxo1: UTXO{}, utxo2: UTXO{ Amount: 1, }, - expected: true, + expected: -1, }, { - name: "first amount larger", - utxo1: UTXO{ - Amount: 1, - }, - utxo2: UTXO{}, - expected: false, - }, - { - name: "first address smaller", + name: "address smaller", utxo1: UTXO{ Address: smallerAddrStr, }, utxo2: UTXO{ Address: largerAddrStr, }, - expected: true, - }, - { - name: "first address larger", - utxo1: UTXO{ - Address: largerAddrStr, - }, - utxo2: UTXO{ - Address: smallerAddrStr, - }, - expected: false, + expected: -1, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, tt.utxo1.Less(tt.utxo2)) + require := require.New(t) + + require.Equal(tt.expected, tt.utxo1.Compare(tt.utxo2)) + require.Equal(-tt.expected, tt.utxo2.Compare(tt.utxo1)) }) } } diff --git a/vms/platformvm/block/builder/builder.go b/vms/platformvm/block/builder/builder.go index b8476e19b032..79c05992bf53 100644 --- a/vms/platformvm/block/builder/builder.go +++ b/vms/platformvm/block/builder/builder.go @@ -126,28 +126,11 @@ func (b *builder) buildBlock() (block.Block, error) { return nil, fmt.Errorf("%w: %s", state.ErrMissingParentState, preferredID) } - timestamp := b.txExecutorBackend.Clk.Time() - if parentTime := preferred.Timestamp(); parentTime.After(timestamp) { - timestamp = parentTime - } - // [timestamp] = max(now, parentTime) - - nextStakerChangeTime, err := txexecutor.GetNextStakerChangeTime(preferredState) + timestamp, timeWasCapped, err := txexecutor.NextBlockTime(preferredState, b.txExecutorBackend.Clk) if err != nil { return nil, fmt.Errorf("could not calculate next staker change time: %w", err) } - // timeWasCapped means that [timestamp] was reduced to - // [nextStakerChangeTime]. It is used as a flag for [buildApricotBlock] to - // be willing to issue an advanceTimeTx. It is also used as a flag for - // [buildBanffBlock] to force the issuance of an empty block to advance - // the time forward; if there are no available transactions. - timeWasCapped := !timestamp.Before(nextStakerChangeTime) - if timeWasCapped { - timestamp = nextStakerChangeTime - } - // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) - return buildBlock( b, preferredID, @@ -254,6 +237,7 @@ func buildBlock( parentID, height, rewardValidatorTx, + []*txs.Tx{}, // TODO: Populate with StandardBlock txs ) } diff --git a/vms/platformvm/block/builder/builder_test.go b/vms/platformvm/block/builder/builder_test.go index 04abf5d65b71..434d5b7b2552 100644 --- a/vms/platformvm/block/builder/builder_test.go +++ b/vms/platformvm/block/builder/builder_test.go @@ -15,26 +15,22 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" blockexecutor "github.com/ava-labs/avalanchego/vms/platformvm/block/executor" txbuilder "github.com/ava-labs/avalanchego/vms/platformvm/txs/builder" txexecutor "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) -var errTestingDropped = errors.New("testing dropped") - -// shows that a locally generated CreateChainTx can be added to mempool and then -// removed by inclusion in a block func TestBlockBuilderAddLocalTx(t *testing.T) { require := require.New(t) @@ -44,17 +40,24 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { require.NoError(shutdownEnvironment(env)) }() - // add a tx to it - tx := getValidTx(env.txBuilder, t) + // Create a valid transaction + tx, err := env.txBuilder.NewCreateChainTx( + testSubnet1.ID(), + nil, + constants.AVMID, + nil, + "chain name", + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + ) + require.NoError(err) txID := tx.ID() - env.sender.SendAppGossipF = func(context.Context, []byte) error { - return nil - } + // Issue the transaction require.NoError(env.network.IssueTx(context.Background(), tx)) require.True(env.mempool.Has(txID)) - // show that build block include that tx and removes it from mempool + // [BuildBlock] should build a block with the transaction blkIntf, err := env.Builder.BuildBlock(context.Background()) require.NoError(err) @@ -63,7 +66,9 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { require.Len(blk.Txs(), 1) require.Equal(txID, blk.Txs()[0].ID()) + // Mempool should not contain the transaction or have marked it as dropped require.False(env.mempool.Has(txID)) + require.NoError(env.mempool.GetDropReason(txID)) } func TestPreviouslyDroppedTxsCanBeReAddedToMempool(t *testing.T) { @@ -75,45 +80,67 @@ func TestPreviouslyDroppedTxsCanBeReAddedToMempool(t *testing.T) { require.NoError(shutdownEnvironment(env)) }() - // create candidate tx - tx := getValidTx(env.txBuilder, t) + // Create a valid transaction + tx, err := env.txBuilder.NewCreateChainTx( + testSubnet1.ID(), + nil, + constants.AVMID, + nil, + "chain name", + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + ) + require.NoError(err) txID := tx.ID() - // A tx simply added to mempool is obviously not marked as dropped - require.NoError(env.mempool.Add(tx)) + // Issue the transaction + require.NoError(env.network.IssueTx(context.Background(), tx)) require.True(env.mempool.Has(txID)) + + // Transaction should not be marked as dropped when added to the mempool reason := env.mempool.GetDropReason(txID) require.NoError(reason) - // When a tx is marked as dropped, it is still available to allow re-issuance + // Mark the transaction as dropped + errTestingDropped := errors.New("testing dropped") env.mempool.MarkDropped(txID, errTestingDropped) - require.True(env.mempool.Has(txID)) // still available reason = env.mempool.GetDropReason(txID) require.ErrorIs(reason, errTestingDropped) - // A previously dropped tx, popped then re-added to mempool, - // is not dropped anymore + // Dropped transactions should still be in the mempool + require.True(env.mempool.Has(txID)) + + // Remove the transaction from the mempool env.mempool.Remove([]*txs.Tx{tx}) - require.NoError(env.mempool.Add(tx)) + // Issue the transaction again + require.NoError(env.network.IssueTx(context.Background(), tx)) require.True(env.mempool.Has(txID)) + + // When issued again, the mempool should not be marked as dropped reason = env.mempool.GetDropReason(txID) require.NoError(reason) } func TestNoErrorOnUnexpectedSetPreferenceDuringBootstrapping(t *testing.T) { + require := require.New(t) + env := newEnvironment(t) env.ctx.Lock.Lock() env.isBootstrapped.Set(false) - env.ctx.Log = logging.NoWarn{} defer func() { - require.NoError(t, shutdownEnvironment(env)) + require.NoError(shutdownEnvironment(env)) }() - require.False(t, env.blkManager.SetPreference(ids.GenerateTestID())) // should not panic + require.True(env.blkManager.SetPreference(ids.GenerateTestID())) // should not panic } func TestGetNextStakerToReward(t *testing.T) { + var ( + now = time.Now() + txID = ids.GenerateTestID() + ) + type test struct { name string timestamp time.Time @@ -123,10 +150,6 @@ func TestGetNextStakerToReward(t *testing.T) { expectedErr error } - var ( - now = time.Now() - txID = ids.GenerateTestID() - ) tests := []test{ { name: "end of time", @@ -295,49 +318,35 @@ func TestGetNextStakerToReward(t *testing.T) { } func TestBuildBlock(t *testing.T) { + env := newEnvironment(t) + env.ctx.Lock.Lock() + defer func() { + require.NoError(t, shutdownEnvironment(env)) + }() + var ( - parentID = ids.GenerateTestID() - height = uint64(1337) - output = &avax.TransferableOutput{ - Asset: avax.Asset{ID: ids.GenerateTestID()}, - Out: &secp256k1fx.TransferOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{ids.GenerateTestShortID()}, - }, - }, - } - now = time.Now() + now = env.backend.Clk.Time() + parentID = ids.GenerateTestID() + height = uint64(1337) parentTimestamp = now.Add(-2 * time.Second) - transactions = []*txs.Tx{{ - Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{{ - Asset: avax.Asset{ID: ids.GenerateTestID()}, - In: &secp256k1fx.TransferInput{ - Input: secp256k1fx.Input{ - SigIndices: []uint32{0}, - }, - }, - }}, - Outs: []*avax.TransferableOutput{output}, - }}, - Validator: txs.Validator{ - // Shouldn't be dropped - Start: uint64(now.Add(2 * txexecutor.SyncBound).Unix()), - }, - StakeOuts: []*avax.TransferableOutput{output}, - RewardsOwner: &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{ids.GenerateTestShortID()}, - }, - }, - Creds: []verify.Verifiable{ - &secp256k1fx.Credential{ - Sigs: [][secp256k1.SignatureLen]byte{{1, 3, 3, 7}}, - }, - }, - }} - stakerTxID = ids.GenerateTestID() + stakerTxID = ids.GenerateTestID() + + defaultValidatorStake = 100 * units.MilliAvax + validatorStartTime = now.Add(2 * txexecutor.SyncBound) + validatorEndTime = validatorStartTime.Add(360 * 24 * time.Hour) + ) + + tx, err := env.txBuilder.NewAddValidatorTx( + defaultValidatorStake, + uint64(validatorStartTime.Unix()), + uint64(validatorEndTime.Unix()), + ids.GenerateTestNodeID(), + preFundedKeys[0].PublicKey().Address(), + reward.PercentDenominator, + []*secp256k1.PrivateKey{preFundedKeys[0]}, + preFundedKeys[0].PublicKey().Address(), ) + require.NoError(t, err) type test struct { name string @@ -357,7 +366,7 @@ func TestBuildBlock(t *testing.T) { // The tx builder should be asked to build a reward tx txBuilder := txbuilder.NewMockBuilder(ctrl) - txBuilder.EXPECT().NewRewardValidatorTx(stakerTxID).Return(transactions[0], nil) + txBuilder.EXPECT().NewRewardValidatorTx(stakerTxID).Return(tx, nil) return &builder{ Mempool: mempool, @@ -388,7 +397,8 @@ func TestBuildBlock(t *testing.T) { parentTimestamp, parentID, height, - transactions[0], + tx, + []*txs.Tx{}, ) require.NoError(err) return expectedBlk @@ -403,7 +413,7 @@ func TestBuildBlock(t *testing.T) { // There are txs. mempool.EXPECT().DropExpiredStakerTxs(gomock.Any()).Return([]ids.ID{}) mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return(transactions) + mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{tx}) return &builder{ Mempool: mempool, } @@ -435,7 +445,7 @@ func TestBuildBlock(t *testing.T) { parentTimestamp, parentID, height, - transactions, + []*txs.Tx{tx}, ) require.NoError(err) return expectedBlk @@ -553,7 +563,7 @@ func TestBuildBlock(t *testing.T) { // There is a tx. mempool.EXPECT().DropExpiredStakerTxs(gomock.Any()).Return([]ids.ID{}) mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{transactions[0]}) + mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{tx}) clk := &mockable.Clock{} clk.Set(now) @@ -591,7 +601,7 @@ func TestBuildBlock(t *testing.T) { parentTimestamp, parentID, height, - []*txs.Tx{transactions[0]}, + []*txs.Tx{tx}, ) require.NoError(err) return expectedBlk @@ -607,7 +617,7 @@ func TestBuildBlock(t *testing.T) { // There is a staker tx. mempool.EXPECT().DropExpiredStakerTxs(gomock.Any()).Return([]ids.ID{}) mempool.EXPECT().HasTxs().Return(true) - mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{transactions[0]}) + mempool.EXPECT().PeekTxs(targetBlockSize).Return([]*txs.Tx{tx}) clk := &mockable.Clock{} clk.Set(now) @@ -645,7 +655,7 @@ func TestBuildBlock(t *testing.T) { parentTimestamp, parentID, height, - []*txs.Tx{transactions[0]}, + []*txs.Tx{tx}, ) require.NoError(err) return expectedBlk diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index de37d08ff0dd..36a6822ce3e2 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -437,17 +437,3 @@ func shutdownEnvironment(env *environment) error { env.baseDB.Close(), ) } - -func getValidTx(txBuilder txbuilder.Builder, t *testing.T) *txs.Tx { - tx, err := txBuilder.NewCreateChainTx( - testSubnet1.ID(), - nil, - constants.AVMID, - nil, - "chain name", - []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, - ids.ShortEmpty, - ) - require.NoError(t, err) - return tx -} diff --git a/vms/platformvm/block/executor/backend.go b/vms/platformvm/block/executor/backend.go index fd0e75d0f664..4d915047f560 100644 --- a/vms/platformvm/block/executor/backend.go +++ b/vms/platformvm/block/executor/backend.go @@ -4,15 +4,19 @@ package executor import ( + "errors" "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" + "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" ) +var errConflictingParentTxs = errors.New("block contains a transaction that conflicts with a transaction in a parent block") + // Shared fields used by visitors. type backend struct { mempool.Mempool @@ -95,3 +99,28 @@ func (b *backend) getTimestamp(blkID ids.ID) time.Time { // so we just return the chain time. return b.state.GetTimestamp() } + +// verifyUniqueInputs returns nil iff no blocks in the inclusive +// ancestry of [blkID] consume an input in [inputs]. +func (b *backend) verifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { + if inputs.Len() == 0 { + return nil + } + + // Check for conflicts in ancestors. + for { + state, ok := b.blkIDToState[blkID] + if !ok { + // The parent state isn't pinned in memory. + // This means the parent must be accepted already. + return nil + } + + if state.inputs.Overlaps(inputs) { + return errConflictingParentTxs + } + + blk := state.statelessBlock + blkID = blk.Parent() + } +} diff --git a/vms/platformvm/block/executor/manager.go b/vms/platformvm/block/executor/manager.go index 9af9cbce2c4a..ebecbf968e5f 100644 --- a/vms/platformvm/block/executor/manager.go +++ b/vms/platformvm/block/executor/manager.go @@ -107,9 +107,9 @@ func (m *manager) NewBlock(blk block.Block) snowman.Block { } } -func (m *manager) SetPreference(blockID ids.ID) (updated bool) { - updated = m.preferred == blockID - m.preferred = blockID +func (m *manager) SetPreference(blkID ids.ID) bool { + updated := m.preferred != blkID + m.preferred = blkID return updated } diff --git a/vms/platformvm/block/executor/manager_test.go b/vms/platformvm/block/executor/manager_test.go index 8ee784c4f9f1..ce887d987992 100644 --- a/vms/platformvm/block/executor/manager_test.go +++ b/vms/platformvm/block/executor/manager_test.go @@ -72,3 +72,18 @@ func TestManagerLastAccepted(t *testing.T) { require.Equal(t, lastAcceptedID, manager.LastAccepted()) } + +func TestManagerSetPreference(t *testing.T) { + require := require.New(t) + + initialPreference := ids.GenerateTestID() + manager := &manager{ + preferred: initialPreference, + } + require.False(manager.SetPreference(initialPreference)) + + newPreference := ids.GenerateTestID() + require.True(manager.SetPreference(newPreference)) + require.False(manager.SetPreference(newPreference)) + require.True(manager.SetPreference(initialPreference)) +} diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index b69708f8ca9f..77c36c08dec3 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -258,6 +258,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height(), blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -287,6 +288,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -304,6 +306,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -321,6 +324,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -342,6 +346,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, invalidTx, + []*txs.Tx{}, ) require.NoError(err) @@ -357,6 +362,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -373,6 +379,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { parentID, banffParentBlk.Height()+1, blkTx, + []*txs.Tx{}, ) require.NoError(err) @@ -662,6 +669,7 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) @@ -819,6 +827,7 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -931,6 +940,7 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -1017,6 +1027,7 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -1108,6 +1119,7 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) @@ -1198,6 +1210,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) @@ -1288,6 +1301,7 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { parentBlk.ID(), parentBlk.Height()+1, s0RewardTx, + []*txs.Tx{}, ) require.NoError(err) propBlk = env.blkManager.NewBlock(statelessProposalBlock) diff --git a/vms/platformvm/block/executor/rejector_test.go b/vms/platformvm/block/executor/rejector_test.go index 3ccd9c0d66b1..5e06a885fd5e 100644 --- a/vms/platformvm/block/executor/rejector_test.go +++ b/vms/platformvm/block/executor/rejector_test.go @@ -44,6 +44,7 @@ func TestRejectBlock(t *testing.T) { }, Creds: []verify.Verifiable{}, }, + []*txs.Tx{}, ) }, rejectFunc: func(r *rejector, b block.Block) error { diff --git a/vms/platformvm/block/executor/verifier.go b/vms/platformvm/block/executor/verifier.go index c4f25f992da8..abc2aa4e257c 100644 --- a/vms/platformvm/block/executor/verifier.go +++ b/vms/platformvm/block/executor/verifier.go @@ -9,7 +9,6 @@ import ( "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" @@ -26,7 +25,6 @@ var ( errIncorrectBlockHeight = errors.New("incorrect block height") errChildBlockEarlierThanParent = errors.New("proposed timestamp before current chain time") errConflictingBatchTxs = errors.New("block contains conflicting transactions") - errConflictingParentTxs = errors.New("block contains a transaction that conflicts with a transaction in a parent block") errOptionBlockTimestampNotMatchingParent = errors.New("option block proposed timestamp not matching parent block one") ) @@ -203,7 +201,7 @@ func (v *verifier) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { atomicExecutor.OnAccept.AddTx(b.Tx, status.Committed) - if err := v.verifyUniqueInputs(b, atomicExecutor.Inputs); err != nil { + if err := v.verifyUniqueInputs(b.Parent(), atomicExecutor.Inputs); err != nil { return err } @@ -441,7 +439,7 @@ func (v *verifier) standardBlock( } } - if err := v.verifyUniqueInputs(b, blkState.inputs); err != nil { + if err := v.verifyUniqueInputs(b.Parent(), blkState.inputs); err != nil { return err } @@ -461,28 +459,3 @@ func (v *verifier) standardBlock( v.Mempool.Remove(b.Transactions) return nil } - -// verifyUniqueInputs verifies that the inputs of the given block are not -// duplicated in any of the parent blocks pinned in memory. -func (v *verifier) verifyUniqueInputs(block block.Block, inputs set.Set[ids.ID]) error { - if inputs.Len() == 0 { - return nil - } - - // Check for conflicts in ancestors. - for { - parentID := block.Parent() - parentState, ok := v.blkIDToState[parentID] - if !ok { - // The parent state isn't pinned in memory. - // This means the parent must be accepted already. - return nil - } - - if parentState.inputs.Overlaps(inputs) { - return errConflictingParentTxs - } - - block = parentState.statelessBlock - } -} diff --git a/vms/platformvm/block/parse_test.go b/vms/platformvm/block/parse_test.go index 906824effd96..799310aa2f51 100644 --- a/vms/platformvm/block/parse_test.go +++ b/vms/platformvm/block/parse_test.go @@ -25,12 +25,12 @@ func TestStandardBlocks(t *testing.T) { blkTimestamp := time.Now() parentID := ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'I', 'D'} height := uint64(2022) - txs, err := testDecisionTxs() + decisionTxs, err := testDecisionTxs() require.NoError(err) for _, cdc := range []codec.Manager{Codec, GenesisCodec} { // build block - apricotStandardBlk, err := NewApricotStandardBlock(parentID, height, txs) + apricotStandardBlk, err := NewApricotStandardBlock(parentID, height, decisionTxs) require.NoError(err) // parse block @@ -44,10 +44,10 @@ func TestStandardBlocks(t *testing.T) { require.Equal(apricotStandardBlk.Height(), parsed.Height()) require.IsType(&ApricotStandardBlock{}, parsed) - require.Equal(txs, parsed.Txs()) + require.Equal(decisionTxs, parsed.Txs()) // check that banff standard block can be built and parsed - banffStandardBlk, err := NewBanffStandardBlock(blkTimestamp, parentID, height, txs) + banffStandardBlk, err := NewBanffStandardBlock(blkTimestamp, parentID, height, decisionTxs) require.NoError(err) // parse block @@ -61,7 +61,7 @@ func TestStandardBlocks(t *testing.T) { require.Equal(banffStandardBlk.Height(), parsed.Height()) require.IsType(&BanffStandardBlock{}, parsed) parsedBanffStandardBlk := parsed.(*BanffStandardBlock) - require.Equal(txs, parsedBanffStandardBlk.Txs()) + require.Equal(decisionTxs, parsedBanffStandardBlk.Txs()) // timestamp check for banff blocks only require.Equal(banffStandardBlk.Timestamp(), parsedBanffStandardBlk.Timestamp()) @@ -77,7 +77,9 @@ func TestProposalBlocks(t *testing.T) { blkTimestamp := time.Now() parentID := ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'I', 'D'} height := uint64(2022) - tx, err := testProposalTx() + proposalTx, err := testProposalTx() + require.NoError(err) + decisionTxs, err := testDecisionTxs() require.NoError(err) for _, cdc := range []codec.Manager{Codec, GenesisCodec} { @@ -85,7 +87,7 @@ func TestProposalBlocks(t *testing.T) { apricotProposalBlk, err := NewApricotProposalBlock( parentID, height, - tx, + proposalTx, ) require.NoError(err) @@ -101,14 +103,15 @@ func TestProposalBlocks(t *testing.T) { require.IsType(&ApricotProposalBlock{}, parsed) parsedApricotProposalBlk := parsed.(*ApricotProposalBlock) - require.Equal([]*txs.Tx{tx}, parsedApricotProposalBlk.Txs()) + require.Equal([]*txs.Tx{proposalTx}, parsedApricotProposalBlk.Txs()) // check that banff proposal block can be built and parsed banffProposalBlk, err := NewBanffProposalBlock( blkTimestamp, parentID, height, - tx, + proposalTx, + []*txs.Tx{}, ) require.NoError(err) @@ -119,17 +122,47 @@ func TestProposalBlocks(t *testing.T) { // compare content require.Equal(banffProposalBlk.ID(), parsed.ID()) require.Equal(banffProposalBlk.Bytes(), parsed.Bytes()) - require.Equal(banffProposalBlk.Parent(), banffProposalBlk.Parent()) + require.Equal(banffProposalBlk.Parent(), parsed.Parent()) require.Equal(banffProposalBlk.Height(), parsed.Height()) require.IsType(&BanffProposalBlock{}, parsed) parsedBanffProposalBlk := parsed.(*BanffProposalBlock) - require.Equal([]*txs.Tx{tx}, parsedBanffProposalBlk.Txs()) + require.Equal([]*txs.Tx{proposalTx}, parsedBanffProposalBlk.Txs()) // timestamp check for banff blocks only require.Equal(banffProposalBlk.Timestamp(), parsedBanffProposalBlk.Timestamp()) // backward compatibility check require.Equal(parsedApricotProposalBlk.Txs(), parsedBanffProposalBlk.Txs()) + + // check that banff proposal block with decisionTxs can be built and parsed + banffProposalBlkWithDecisionTxs, err := NewBanffProposalBlock( + blkTimestamp, + parentID, + height, + proposalTx, + decisionTxs, + ) + require.NoError(err) + + // parse block + parsed, err = Parse(cdc, banffProposalBlkWithDecisionTxs.Bytes()) + require.NoError(err) + + // compare content + require.Equal(banffProposalBlkWithDecisionTxs.ID(), parsed.ID()) + require.Equal(banffProposalBlkWithDecisionTxs.Bytes(), parsed.Bytes()) + require.Equal(banffProposalBlkWithDecisionTxs.Parent(), parsed.Parent()) + require.Equal(banffProposalBlkWithDecisionTxs.Height(), parsed.Height()) + require.IsType(&BanffProposalBlock{}, parsed) + parsedBanffProposalBlkWithDecisionTxs := parsed.(*BanffProposalBlock) + + l := len(decisionTxs) + expectedTxs := make([]*txs.Tx, l+1) + copy(expectedTxs, decisionTxs) + expectedTxs[l] = proposalTx + require.Equal(expectedTxs, parsedBanffProposalBlkWithDecisionTxs.Txs()) + + require.Equal(banffProposalBlkWithDecisionTxs.Timestamp(), parsedBanffProposalBlkWithDecisionTxs.Timestamp()) } } @@ -224,7 +257,7 @@ func TestAtomicBlock(t *testing.T) { require := require.New(t) parentID := ids.ID{'p', 'a', 'r', 'e', 'n', 't', 'I', 'D'} height := uint64(2022) - tx, err := testAtomicTx() + atomicTx, err := testAtomicTx() require.NoError(err) for _, cdc := range []codec.Manager{Codec, GenesisCodec} { @@ -232,7 +265,7 @@ func TestAtomicBlock(t *testing.T) { atomicBlk, err := NewApricotAtomicBlock( parentID, height, - tx, + atomicTx, ) require.NoError(err) @@ -248,7 +281,7 @@ func TestAtomicBlock(t *testing.T) { require.IsType(&ApricotAtomicBlock{}, parsed) parsedAtomicBlk := parsed.(*ApricotAtomicBlock) - require.Equal([]*txs.Tx{tx}, parsedAtomicBlk.Txs()) + require.Equal([]*txs.Tx{atomicTx}, parsedAtomicBlk.Txs()) } } diff --git a/vms/platformvm/block/proposal_block.go b/vms/platformvm/block/proposal_block.go index 05e23b649949..1986218aa0a5 100644 --- a/vms/platformvm/block/proposal_block.go +++ b/vms/platformvm/block/proposal_block.go @@ -28,6 +28,18 @@ type BanffProposalBlock struct { ApricotProposalBlock `serialize:"true"` } +func (b *BanffProposalBlock) initialize(bytes []byte) error { + if err := b.ApricotProposalBlock.initialize(bytes); err != nil { + return err + } + for _, tx := range b.Transactions { + if err := tx.Initialize(txs.Codec); err != nil { + return fmt.Errorf("failed to initialize tx: %w", err) + } + } + return nil +} + func (b *BanffProposalBlock) InitCtx(ctx *snow.Context) { for _, tx := range b.Transactions { tx.Unsigned.InitCtx(ctx) @@ -39,6 +51,14 @@ func (b *BanffProposalBlock) Timestamp() time.Time { return time.Unix(int64(b.Time), 0) } +func (b *BanffProposalBlock) Txs() []*txs.Tx { + l := len(b.Transactions) + txs := make([]*txs.Tx, l+1) + copy(txs, b.Transactions) + txs[l] = b.Tx + return txs +} + func (b *BanffProposalBlock) Visit(v Visitor) error { return v.BanffProposalBlock(b) } @@ -47,16 +67,18 @@ func NewBanffProposalBlock( timestamp time.Time, parentID ids.ID, height uint64, - tx *txs.Tx, + proposalTx *txs.Tx, + decisionTxs []*txs.Tx, ) (*BanffProposalBlock, error) { blk := &BanffProposalBlock{ - Time: uint64(timestamp.Unix()), + Transactions: decisionTxs, + Time: uint64(timestamp.Unix()), ApricotProposalBlock: ApricotProposalBlock{ CommonBlock: CommonBlock{ PrntID: parentID, Hght: height, }, - Tx: tx, + Tx: proposalTx, }, } return blk, initialize(blk) diff --git a/vms/platformvm/block/proposal_block_test.go b/vms/platformvm/block/proposal_block_test.go index 9c1038c51c98..bdb65e4a2404 100644 --- a/vms/platformvm/block/proposal_block_test.go +++ b/vms/platformvm/block/proposal_block_test.go @@ -10,53 +10,70 @@ import ( "github.com/stretchr/testify/require" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/vms/components/avax" - "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) func TestNewBanffProposalBlock(t *testing.T) { - require := require.New(t) - timestamp := time.Now().Truncate(time.Second) parentID := ids.GenerateTestID() height := uint64(1337) + proposalTx, err := testProposalTx() + require.NoError(t, err) + decisionTxs, err := testDecisionTxs() + require.NoError(t, err) + + type test struct { + name string + proposalTx *txs.Tx + decisionTxs []*txs.Tx + } - tx := &txs.Tx{ - Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{ - BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }, - StakeOuts: []*avax.TransferableOutput{}, - Validator: txs.Validator{}, - RewardsOwner: &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{}, - }, + tests := []test{ + { + name: "no decision txs", + proposalTx: proposalTx, + decisionTxs: []*txs.Tx{}, + }, + { + name: "decision txs", + proposalTx: proposalTx, + decisionTxs: decisionTxs, }, - Creds: []verify.Verifiable{}, } - require.NoError(tx.Initialize(txs.Codec)) - blk, err := NewBanffProposalBlock( - timestamp, - parentID, - height, - tx, - ) - require.NoError(err) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) - // Make sure the block and tx are initialized - require.NotEmpty(blk.Bytes()) - require.NotEmpty(blk.Tx.Bytes()) - require.NotEqual(ids.Empty, blk.Tx.ID()) - require.Equal(tx.Bytes(), blk.Tx.Bytes()) - require.Equal(timestamp, blk.Timestamp()) - require.Equal(parentID, blk.Parent()) - require.Equal(height, blk.Height()) + blk, err := NewBanffProposalBlock( + timestamp, + parentID, + height, + test.proposalTx, + test.decisionTxs, + ) + require.NoError(err) + + require.NotEmpty(blk.Bytes()) + require.Equal(parentID, blk.Parent()) + require.Equal(height, blk.Height()) + require.Equal(timestamp, blk.Timestamp()) + + l := len(test.decisionTxs) + expectedTxs := make([]*txs.Tx, l+1) + copy(expectedTxs, test.decisionTxs) + expectedTxs[l] = test.proposalTx + + blkTxs := blk.Txs() + require.Equal(expectedTxs, blkTxs) + for i, blkTx := range blkTxs { + expectedTx := expectedTxs[i] + require.NotEmpty(blkTx.Bytes()) + require.NotEqual(ids.Empty, blkTx.ID()) + require.Equal(expectedTx.Bytes(), blkTx.Bytes()) + } + }) + } } func TestNewApricotProposalBlock(t *testing.T) { @@ -64,37 +81,28 @@ func TestNewApricotProposalBlock(t *testing.T) { parentID := ids.GenerateTestID() height := uint64(1337) - - tx := &txs.Tx{ - Unsigned: &txs.AddValidatorTx{ - BaseTx: txs.BaseTx{ - BaseTx: avax.BaseTx{ - Ins: []*avax.TransferableInput{}, - Outs: []*avax.TransferableOutput{}, - }, - }, - StakeOuts: []*avax.TransferableOutput{}, - Validator: txs.Validator{}, - RewardsOwner: &secp256k1fx.OutputOwners{ - Addrs: []ids.ShortID{}, - }, - }, - Creds: []verify.Verifiable{}, - } - require.NoError(tx.Initialize(txs.Codec)) + proposalTx, err := testProposalTx() + require.NoError(err) blk, err := NewApricotProposalBlock( parentID, height, - tx, + proposalTx, ) require.NoError(err) - // Make sure the block and tx are initialized require.NotEmpty(blk.Bytes()) - require.NotEmpty(blk.Tx.Bytes()) - require.NotEqual(ids.Empty, blk.Tx.ID()) - require.Equal(tx.Bytes(), blk.Tx.Bytes()) require.Equal(parentID, blk.Parent()) require.Equal(height, blk.Height()) + + expectedTxs := []*txs.Tx{proposalTx} + + blkTxs := blk.Txs() + require.Equal(blkTxs, expectedTxs) + for i, blkTx := range blkTxs { + expectedTx := expectedTxs[i] + require.NotEmpty(blkTx.Bytes()) + require.NotEqual(ids.Empty, blkTx.ID()) + require.Equal(expectedTx.Bytes(), blkTx.Bytes()) + } } diff --git a/vms/platformvm/block/standard_block.go b/vms/platformvm/block/standard_block.go index a088a9eab696..a3a7ee6fed39 100644 --- a/vms/platformvm/block/standard_block.go +++ b/vms/platformvm/block/standard_block.go @@ -58,7 +58,7 @@ func (b *ApricotStandardBlock) initialize(bytes []byte) error { b.CommonBlock.initialize(bytes) for _, tx := range b.Transactions { if err := tx.Initialize(txs.Codec); err != nil { - return fmt.Errorf("failed to sign block: %w", err) + return fmt.Errorf("failed to initialize tx: %w", err) } } return nil diff --git a/vms/platformvm/state/diff.go b/vms/platformvm/state/diff.go index 1aafcf079969..d509fa69e0dd 100644 --- a/vms/platformvm/state/diff.go +++ b/vms/platformvm/state/diff.go @@ -47,10 +47,8 @@ type diff struct { subnetOwners map[ids.ID]fx.Owner // Subnet ID --> Tx that transforms the subnet transformedSubnets map[ids.ID]*txs.Tx - cachedSubnets []*txs.Tx - addedChains map[ids.ID][]*txs.Tx - cachedChains map[ids.ID][]*txs.Tx + addedChains map[ids.ID][]*txs.Tx addedRewardUTXOs map[ids.ID][]*avax.UTXO @@ -259,41 +257,8 @@ func (d *diff) GetPendingStakerIterator() (StakerIterator, error) { return d.pendingStakerDiffs.GetStakerIterator(parentIterator), nil } -func (d *diff) GetSubnets() ([]*txs.Tx, error) { - if len(d.addedSubnets) == 0 { - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - return parentState.GetSubnets() - } - - if len(d.cachedSubnets) != 0 { - return d.cachedSubnets, nil - } - - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - subnets, err := parentState.GetSubnets() - if err != nil { - return nil, err - } - newSubnets := make([]*txs.Tx, len(subnets)+len(d.addedSubnets)) - copy(newSubnets, subnets) - for i, subnet := range d.addedSubnets { - newSubnets[i+len(subnets)] = subnet - } - d.cachedSubnets = newSubnets - return newSubnets, nil -} - func (d *diff) AddSubnet(createSubnetTx *txs.Tx) { d.addedSubnets = append(d.addedSubnets, createSubnetTx) - if d.cachedSubnets != nil { - d.cachedSubnets = append(d.cachedSubnets, createSubnetTx) - } } func (d *diff) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) { @@ -339,48 +304,6 @@ func (d *diff) AddSubnetTransformation(transformSubnetTxIntf *txs.Tx) { } } -func (d *diff) GetChains(subnetID ids.ID) ([]*txs.Tx, error) { - addedChains := d.addedChains[subnetID] - if len(addedChains) == 0 { - // No chains have been added to this subnet - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - return parentState.GetChains(subnetID) - } - - // There have been chains added to the requested subnet - - if d.cachedChains == nil { - // This is the first time we are going to be caching the subnet chains - d.cachedChains = make(map[ids.ID][]*txs.Tx) - } - - cachedChains, cached := d.cachedChains[subnetID] - if cached { - return cachedChains, nil - } - - // This chain wasn't cached yet - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - chains, err := parentState.GetChains(subnetID) - if err != nil { - return nil, err - } - - newChains := make([]*txs.Tx, len(chains)+len(addedChains)) - copy(newChains, chains) - for i, chain := range addedChains { - newChains[i+len(chains)] = chain - } - d.cachedChains[subnetID] = newChains - return newChains, nil -} - func (d *diff) AddChain(createChainTx *txs.Tx) { tx := createChainTx.Unsigned.(*txs.CreateChainTx) if d.addedChains == nil { @@ -390,12 +313,6 @@ func (d *diff) AddChain(createChainTx *txs.Tx) { } else { d.addedChains[tx.SubnetID] = append(d.addedChains[tx.SubnetID], createChainTx) } - - cachedChains, cached := d.cachedChains[tx.SubnetID] - if !cached { - return - } - d.cachedChains[tx.SubnetID] = append(cachedChains, createChainTx) } func (d *diff) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) { @@ -425,18 +342,6 @@ func (d *diff) AddTx(tx *txs.Tx, status status.Status) { } } -func (d *diff) GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) { - if utxos, exists := d.addedRewardUTXOs[txID]; exists { - return utxos, nil - } - - parentState, ok := d.stateVersions.GetState(d.parentID) - if !ok { - return nil, fmt.Errorf("%w: %s", ErrMissingParentState, d.parentID) - } - return parentState.GetRewardUTXOs(txID) -} - func (d *diff) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) { if d.addedRewardUTXOs == nil { d.addedRewardUTXOs = make(map[ids.ID][]*avax.UTXO) diff --git a/vms/platformvm/state/diff_test.go b/vms/platformvm/state/diff_test.go index 9b833f8482a8..50c87b2d3a53 100644 --- a/vms/platformvm/state/diff_test.go +++ b/vms/platformvm/state/diff_test.go @@ -250,15 +250,28 @@ func TestDiffSubnet(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state, _ := newInitializedState(require) + + // Initialize parent with one subnet + parentStateCreateSubnetTx := &txs.Tx{ + Unsigned: &txs.CreateSubnetTx{ + Owner: fx.NewMockOwner(ctrl), + }, + } + state.AddSubnet(parentStateCreateSubnetTx) + + // Verify parent returns one subnet + subnets, err := state.GetSubnets() + require.NoError(err) + require.Equal([]*txs.Tx{ + parentStateCreateSubnetTx, + }, subnets) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() - d, err := NewDiff(lastAcceptedID, states) + diff, err := NewDiff(lastAcceptedID, states) require.NoError(err) // Put a subnet @@ -267,60 +280,67 @@ func TestDiffSubnet(t *testing.T) { Owner: fx.NewMockOwner(ctrl), }, } - d.AddSubnet(createSubnetTx) + diff.AddSubnet(createSubnetTx) - // Assert that we get the subnet back - // [state] returns 1 subnet. - parentStateCreateSubnetTx := &txs.Tx{ - Unsigned: &txs.CreateSubnetTx{ - Owner: fx.NewMockOwner(ctrl), - }, - } - state.EXPECT().GetSubnets().Return([]*txs.Tx{parentStateCreateSubnetTx}, nil).Times(1) - gotSubnets, err := d.GetSubnets() + // Apply diff to parent state + require.NoError(diff.Apply(state)) + + // Verify parent now returns two subnets + subnets, err = state.GetSubnets() require.NoError(err) - require.Len(gotSubnets, 2) - require.Equal(gotSubnets[0], parentStateCreateSubnetTx) - require.Equal(gotSubnets[1], createSubnetTx) + require.Equal([]*txs.Tx{ + parentStateCreateSubnetTx, + createSubnetTx, + }, subnets) } func TestDiffChain(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state, _ := newInitializedState(require) + subnetID := ids.GenerateTestID() + + // Initialize parent with one chain + parentStateCreateChainTx := &txs.Tx{ + Unsigned: &txs.CreateChainTx{ + SubnetID: subnetID, + }, + } + state.AddChain(parentStateCreateChainTx) + + // Verify parent returns one chain + chains, err := state.GetChains(subnetID) + require.NoError(err) + require.Equal([]*txs.Tx{ + parentStateCreateChainTx, + }, chains) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() - d, err := NewDiff(lastAcceptedID, states) + diff, err := NewDiff(lastAcceptedID, states) require.NoError(err) // Put a chain - subnetID := ids.GenerateTestID() createChainTx := &txs.Tx{ Unsigned: &txs.CreateChainTx{ - SubnetID: subnetID, + SubnetID: subnetID, // note this is the same subnet as [parentStateCreateChainTx] }, } - d.AddChain(createChainTx) + diff.AddChain(createChainTx) - // Assert that we get the chain back - // [state] returns 1 chain. - parentStateCreateChainTx := &txs.Tx{ - Unsigned: &txs.CreateChainTx{ - SubnetID: subnetID, // note this is the same subnet as [createChainTx] - }, - } - state.EXPECT().GetChains(subnetID).Return([]*txs.Tx{parentStateCreateChainTx}, nil).Times(1) - gotChains, err := d.GetChains(subnetID) + // Apply diff to parent state + require.NoError(diff.Apply(state)) + + // Verify parent now returns two chains + chains, err = state.GetChains(subnetID) require.NoError(err) - require.Len(gotChains, 2) - require.Equal(parentStateCreateChainTx, gotChains[0]) - require.Equal(createChainTx, gotChains[1]) + require.Equal([]*txs.Tx{ + parentStateCreateChainTx, + createChainTx, + }, chains) } func TestDiffTx(t *testing.T) { @@ -377,45 +397,46 @@ func TestDiffRewardUTXO(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - state := NewMockState(ctrl) - // Called in NewDiff - state.EXPECT().GetTimestamp().Return(time.Now()).Times(1) + state, _ := newInitializedState(require) + + txID := ids.GenerateTestID() + + // Initialize parent with one reward UTXO + parentRewardUTXO := &avax.UTXO{ + UTXOID: avax.UTXOID{TxID: txID}, + } + state.AddRewardUTXO(txID, parentRewardUTXO) + + // Verify parent returns the reward UTXO + rewardUTXOs, err := state.GetRewardUTXOs(txID) + require.NoError(err) + require.Equal([]*avax.UTXO{ + parentRewardUTXO, + }, rewardUTXOs) states := NewMockVersions(ctrl) lastAcceptedID := ids.GenerateTestID() states.EXPECT().GetState(lastAcceptedID).Return(state, true).AnyTimes() - d, err := NewDiff(lastAcceptedID, states) + diff, err := NewDiff(lastAcceptedID, states) require.NoError(err) // Put a reward UTXO - txID := ids.GenerateTestID() rewardUTXO := &avax.UTXO{ UTXOID: avax.UTXOID{TxID: txID}, } - d.AddRewardUTXO(txID, rewardUTXO) + diff.AddRewardUTXO(txID, rewardUTXO) - { - // Assert that we get the UTXO back - gotRewardUTXOs, err := d.GetRewardUTXOs(txID) - require.NoError(err) - require.Len(gotRewardUTXOs, 1) - require.Equal(rewardUTXO, gotRewardUTXOs[0]) - } + // Apply diff to parent state + require.NoError(diff.Apply(state)) - { - // Assert that we can get a UTXO from the parent state - // [state] returns 1 UTXO. - txID2 := ids.GenerateTestID() - parentRewardUTXO := &avax.UTXO{ - UTXOID: avax.UTXOID{TxID: txID2}, - } - state.EXPECT().GetRewardUTXOs(txID2).Return([]*avax.UTXO{parentRewardUTXO}, nil).Times(1) - gotParentRewardUTXOs, err := d.GetRewardUTXOs(txID2) - require.NoError(err) - require.Len(gotParentRewardUTXOs, 1) - require.Equal(parentRewardUTXO, gotParentRewardUTXOs[0]) - } + // Verify parent now returns two reward UTXOs + rewardUTXOs, err = state.GetRewardUTXOs(txID) + require.NoError(err) + require.Equal([]*avax.UTXO{ + parentRewardUTXO, + rewardUTXO, + }, rewardUTXOs) } func TestDiffUTXO(t *testing.T) { @@ -496,25 +517,6 @@ func assertChainsEqual(t *testing.T, expected, actual Chain) { require.NoError(err) require.Equal(expectedCurrentSupply, actualCurrentSupply) - - expectedSubnets, expectedErr := expected.GetSubnets() - actualSubnets, actualErr := actual.GetSubnets() - require.Equal(expectedErr, actualErr) - if expectedErr == nil { - require.Equal(expectedSubnets, actualSubnets) - - for _, subnet := range expectedSubnets { - subnetID := subnet.ID() - - expectedChains, expectedErr := expected.GetChains(subnetID) - actualChains, actualErr := actual.GetChains(subnetID) - require.Equal(expectedErr, actualErr) - if expectedErr != nil { - continue - } - require.Equal(expectedChains, actualChains) - } - } } func TestDiffSubnetOwner(t *testing.T) { diff --git a/vms/platformvm/state/mock_state.go b/vms/platformvm/state/mock_state.go index 465b3fda2bac..8a3aac7d81f1 100644 --- a/vms/platformvm/state/mock_state.go +++ b/vms/platformvm/state/mock_state.go @@ -180,21 +180,6 @@ func (mr *MockChainMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockChain)(nil).DeleteUTXO), arg0) } -// GetChains mocks base method. -func (m *MockChain) GetChains(arg0 ids.ID) ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChains", arg0) - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetChains indicates an expected call of GetChains. -func (mr *MockChainMockRecorder) GetChains(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChains", reflect.TypeOf((*MockChain)(nil).GetChains), arg0) -} - // GetCurrentDelegatorIterator mocks base method. func (m *MockChain) GetCurrentDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { m.ctrl.T.Helper() @@ -315,21 +300,6 @@ func (mr *MockChainMockRecorder) GetPendingValidator(arg0, arg1 interface{}) *go return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockChain)(nil).GetPendingValidator), arg0, arg1) } -// GetRewardUTXOs mocks base method. -func (m *MockChain) GetRewardUTXOs(arg0 ids.ID) ([]*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRewardUTXOs", arg0) - ret0, _ := ret[0].([]*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRewardUTXOs indicates an expected call of GetRewardUTXOs. -func (mr *MockChainMockRecorder) GetRewardUTXOs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockChain)(nil).GetRewardUTXOs), arg0) -} - // GetSubnetOwner mocks base method. func (m *MockChain) GetSubnetOwner(arg0 ids.ID) (fx.Owner, error) { m.ctrl.T.Helper() @@ -360,21 +330,6 @@ func (mr *MockChainMockRecorder) GetSubnetTransformation(arg0 interface{}) *gomo return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockChain)(nil).GetSubnetTransformation), arg0) } -// GetSubnets mocks base method. -func (m *MockChain) GetSubnets() ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnets") - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnets indicates an expected call of GetSubnets. -func (mr *MockChainMockRecorder) GetSubnets() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnets", reflect.TypeOf((*MockChain)(nil).GetSubnets)) -} - // GetTimestamp mocks base method. func (m *MockChain) GetTimestamp() time.Time { m.ctrl.T.Helper() @@ -687,21 +642,6 @@ func (mr *MockDiffMockRecorder) DeleteUTXO(arg0 interface{}) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteUTXO", reflect.TypeOf((*MockDiff)(nil).DeleteUTXO), arg0) } -// GetChains mocks base method. -func (m *MockDiff) GetChains(arg0 ids.ID) ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetChains", arg0) - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetChains indicates an expected call of GetChains. -func (mr *MockDiffMockRecorder) GetChains(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChains", reflect.TypeOf((*MockDiff)(nil).GetChains), arg0) -} - // GetCurrentDelegatorIterator mocks base method. func (m *MockDiff) GetCurrentDelegatorIterator(arg0 ids.ID, arg1 ids.NodeID) (StakerIterator, error) { m.ctrl.T.Helper() @@ -822,21 +762,6 @@ func (mr *MockDiffMockRecorder) GetPendingValidator(arg0, arg1 interface{}) *gom return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPendingValidator", reflect.TypeOf((*MockDiff)(nil).GetPendingValidator), arg0, arg1) } -// GetRewardUTXOs mocks base method. -func (m *MockDiff) GetRewardUTXOs(arg0 ids.ID) ([]*avax.UTXO, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetRewardUTXOs", arg0) - ret0, _ := ret[0].([]*avax.UTXO) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetRewardUTXOs indicates an expected call of GetRewardUTXOs. -func (mr *MockDiffMockRecorder) GetRewardUTXOs(arg0 interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRewardUTXOs", reflect.TypeOf((*MockDiff)(nil).GetRewardUTXOs), arg0) -} - // GetSubnetOwner mocks base method. func (m *MockDiff) GetSubnetOwner(arg0 ids.ID) (fx.Owner, error) { m.ctrl.T.Helper() @@ -867,21 +792,6 @@ func (mr *MockDiffMockRecorder) GetSubnetTransformation(arg0 interface{}) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnetTransformation", reflect.TypeOf((*MockDiff)(nil).GetSubnetTransformation), arg0) } -// GetSubnets mocks base method. -func (m *MockDiff) GetSubnets() ([]*txs.Tx, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetSubnets") - ret0, _ := ret[0].([]*txs.Tx) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// GetSubnets indicates an expected call of GetSubnets. -func (mr *MockDiffMockRecorder) GetSubnets() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSubnets", reflect.TypeOf((*MockDiff)(nil).GetSubnets)) -} - // GetTimestamp mocks base method. func (m *MockDiff) GetTimestamp() time.Time { m.ctrl.T.Helper() diff --git a/vms/platformvm/state/staker.go b/vms/platformvm/state/staker.go index 37bc512e36cb..2488e4aff79e 100644 --- a/vms/platformvm/state/staker.go +++ b/vms/platformvm/state/staker.go @@ -83,7 +83,7 @@ func (s *Staker) Less(than *Staker) bool { return bytes.Compare(s.TxID[:], than.TxID[:]) == -1 } -func NewCurrentStaker(txID ids.ID, staker txs.Staker, potentialReward uint64) (*Staker, error) { +func NewCurrentStaker(txID ids.ID, staker txs.ScheduledStaker, potentialReward uint64) (*Staker, error) { publicKey, _, err := staker.PublicKey() if err != nil { return nil, err @@ -103,7 +103,7 @@ func NewCurrentStaker(txID ids.ID, staker txs.Staker, potentialReward uint64) (* }, nil } -func NewPendingStaker(txID ids.ID, staker txs.Staker) (*Staker, error) { +func NewPendingStaker(txID ids.ID, staker txs.ScheduledStaker) (*Staker, error) { publicKey, _, err := staker.PublicKey() if err != nil { return nil, err diff --git a/vms/platformvm/state/staker_test.go b/vms/platformvm/state/staker_test.go index 747f442e5eda..9482e33b793a 100644 --- a/vms/platformvm/state/staker_test.go +++ b/vms/platformvm/state/staker_test.go @@ -144,11 +144,11 @@ func TestNewCurrentStaker(t *testing.T) { subnetID := ids.GenerateTestID() weight := uint64(12345) startTime := time.Now() - endTime := time.Now() + endTime := startTime.Add(time.Hour) potentialReward := uint64(54321) currentPriority := txs.SubnetPermissionedValidatorCurrentPriority - stakerTx := txs.NewMockStaker(ctrl) + stakerTx := txs.NewMockScheduledStaker(ctrl) stakerTx.EXPECT().NodeID().Return(nodeID) stakerTx.EXPECT().PublicKey().Return(publicKey, true, nil) stakerTx.EXPECT().SubnetID().Return(subnetID) @@ -192,7 +192,7 @@ func TestNewPendingStaker(t *testing.T) { endTime := time.Now() pendingPriority := txs.SubnetPermissionedValidatorPendingPriority - stakerTx := txs.NewMockStaker(ctrl) + stakerTx := txs.NewMockScheduledStaker(ctrl) stakerTx.EXPECT().NodeID().Return(nodeID) stakerTx.EXPECT().PublicKey().Return(publicKey, true, nil) stakerTx.EXPECT().SubnetID().Return(subnetID) diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 4b2b59cf2f70..36a49428020c 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -106,10 +106,8 @@ type Chain interface { GetCurrentSupply(subnetID ids.ID) (uint64, error) SetCurrentSupply(subnetID ids.ID, cs uint64) - GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) AddRewardUTXO(txID ids.ID, utxo *avax.UTXO) - GetSubnets() ([]*txs.Tx, error) AddSubnet(createSubnetTx *txs.Tx) GetSubnetOwner(subnetID ids.ID) (fx.Owner, error) @@ -118,7 +116,6 @@ type Chain interface { GetSubnetTransformation(subnetID ids.ID) (*txs.Tx, error) AddSubnetTransformation(transformSubnetTx *txs.Tx) - GetChains(subnetID ids.ID) ([]*txs.Tx, error) AddChain(createChainTx *txs.Tx) GetTx(txID ids.ID) (*txs.Tx, status.Status, error) @@ -140,6 +137,10 @@ type State interface { GetBlockIDAtHeight(height uint64) (ids.ID, error) + GetRewardUTXOs(txID ids.ID) ([]*avax.UTXO, error) + GetSubnets() ([]*txs.Tx, error) + GetChains(subnetID ids.ID) ([]*txs.Tx, error) + // ApplyValidatorWeightDiffs iterates from [startHeight] towards the genesis // block until it has applied all of the diffs up to and including // [endHeight]. Applying the diffs modifies [validators]. @@ -1319,9 +1320,13 @@ func (s *state) syncGenesis(genesisBlk block.Block, genesis *genesis.Genesis) er // Persist primary network validator set at genesis for _, vdrTx := range genesis.Validators { - validatorTx, ok := vdrTx.Unsigned.(txs.ValidatorTx) + // We expect genesis validator txs to be either AddValidatorTx or + // AddPermissionlessValidatorTx. + // + // TODO: Enforce stricter type check + validatorTx, ok := vdrTx.Unsigned.(txs.ScheduledStaker) if !ok { - return fmt.Errorf("expected tx type txs.ValidatorTx but got %T", vdrTx.Unsigned) + return fmt.Errorf("expected a scheduled staker but got %T", vdrTx.Unsigned) } stakeAmount := validatorTx.Weight() @@ -1447,7 +1452,12 @@ func (s *state) loadCurrentValidators() error { } tx, _, err := s.GetTx(txID) if err != nil { - return err + return fmt.Errorf("failed loading validator transaction txID %s, %w", txID, err) + } + + stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) + if !ok { + return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } metadataBytes := validatorIt.Value() @@ -1460,11 +1470,6 @@ func (s *state) loadCurrentValidators() error { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) - } - staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) if err != nil { return err @@ -1491,17 +1496,18 @@ func (s *state) loadCurrentValidators() error { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) + stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) if !ok { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } metadataBytes := subnetValidatorIt.Value() + startTime := stakerTx.StartTime() metadata := &validatorMetadata{ txID: txID, // use the start time as the fallback value // in case it's not stored in the database - LastUpdated: uint64(stakerTx.StartTime().Unix()), + LastUpdated: uint64(startTime.Unix()), } if err := parseValidatorMetadata(metadataBytes, metadata); err != nil { return err @@ -1537,6 +1543,11 @@ func (s *state) loadCurrentValidators() error { return err } + stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) + if !ok { + return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) + } + metadata := &delegatorMetadata{ txID: txID, } @@ -1545,11 +1556,6 @@ func (s *state) loadCurrentValidators() error { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) - if !ok { - return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) - } - staker, err := NewCurrentStaker(txID, stakerTx, metadata.PotentialReward) if err != nil { return err @@ -1594,7 +1600,7 @@ func (s *state) loadPendingValidators() error { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) + stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) if !ok { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } @@ -1629,7 +1635,7 @@ func (s *state) loadPendingValidators() error { return err } - stakerTx, ok := tx.Unsigned.(txs.Staker) + stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) if !ok { return fmt.Errorf("expected tx type txs.Staker but got %T", tx.Unsigned) } diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index 3c36310c0576..1d0e02938f1c 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -617,7 +617,7 @@ func TestParsedStateBlock(t *testing.T) { Unsigned: &txs.RewardValidatorTx{ TxID: ids.GenerateTestID(), }, - }) + }, []*txs.Tx{}) require.NoError(err) blks = append(blks, blk) } diff --git a/vms/platformvm/txs/add_delegator_tx.go b/vms/platformvm/txs/add_delegator_tx.go index 4f6fbe395b02..af328cc693bf 100644 --- a/vms/platformvm/txs/add_delegator_tx.go +++ b/vms/platformvm/txs/add_delegator_tx.go @@ -19,7 +19,8 @@ import ( ) var ( - _ DelegatorTx = (*AddDelegatorTx)(nil) + _ DelegatorTx = (*AddDelegatorTx)(nil) + _ ScheduledStaker = (*AddDelegatorTx)(nil) errDelegatorWeightMismatch = errors.New("delegator weight is not equal to total stake weight") errStakeMustBeAVAX = errors.New("stake must be AVAX") diff --git a/vms/platformvm/txs/add_permissionless_delegator_tx.go b/vms/platformvm/txs/add_permissionless_delegator_tx.go index 43db685d7629..346a80dd61f7 100644 --- a/vms/platformvm/txs/add_permissionless_delegator_tx.go +++ b/vms/platformvm/txs/add_permissionless_delegator_tx.go @@ -17,7 +17,10 @@ import ( "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) -var _ DelegatorTx = (*AddPermissionlessDelegatorTx)(nil) +var ( + _ DelegatorTx = (*AddPermissionlessDelegatorTx)(nil) + _ ScheduledStaker = (*AddPermissionlessDelegatorTx)(nil) +) // AddPermissionlessDelegatorTx is an unsigned addPermissionlessDelegatorTx type AddPermissionlessDelegatorTx struct { diff --git a/vms/platformvm/txs/add_permissionless_validator_tx.go b/vms/platformvm/txs/add_permissionless_validator_tx.go index 8f313ae000b9..34b13129ade8 100644 --- a/vms/platformvm/txs/add_permissionless_validator_tx.go +++ b/vms/platformvm/txs/add_permissionless_validator_tx.go @@ -21,7 +21,8 @@ import ( ) var ( - _ ValidatorTx = (*AddPermissionlessValidatorTx)(nil) + _ ValidatorTx = (*AddPermissionlessValidatorTx)(nil) + _ ScheduledStaker = (*AddPermissionlessDelegatorTx)(nil) errEmptyNodeID = errors.New("validator nodeID cannot be empty") errNoStake = errors.New("no stake") diff --git a/vms/platformvm/txs/add_subnet_validator_tx.go b/vms/platformvm/txs/add_subnet_validator_tx.go index 0ac3474e1bd6..53fd43562c02 100644 --- a/vms/platformvm/txs/add_subnet_validator_tx.go +++ b/vms/platformvm/txs/add_subnet_validator_tx.go @@ -14,7 +14,8 @@ import ( ) var ( - _ StakerTx = (*AddSubnetValidatorTx)(nil) + _ StakerTx = (*AddSubnetValidatorTx)(nil) + _ ScheduledStaker = (*AddSubnetValidatorTx)(nil) errAddPrimaryNetworkValidator = errors.New("can't add primary network validator with AddSubnetValidatorTx") ) diff --git a/vms/platformvm/txs/add_validator_tx.go b/vms/platformvm/txs/add_validator_tx.go index be6a93c2e42b..a0e82dba7c77 100644 --- a/vms/platformvm/txs/add_validator_tx.go +++ b/vms/platformvm/txs/add_validator_tx.go @@ -19,7 +19,8 @@ import ( ) var ( - _ ValidatorTx = (*AddValidatorTx)(nil) + _ ValidatorTx = (*AddValidatorTx)(nil) + _ ScheduledStaker = (*AddValidatorTx)(nil) errTooManyShares = fmt.Errorf("a staker can only require at most %d shares from delegators", reward.PercentDenominator) ) diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index 7fae0e78a85b..8a0c6046d67f 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -7,6 +7,7 @@ import ( "errors" "fmt" "math" + "time" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" @@ -92,7 +93,6 @@ func verifyAddValidatorTx( } duration := tx.Validator.Duration() - switch { case tx.Validator.Wght < backend.Config.MinValidatorStake: // Ensure validator is staking at least the minimum amount @@ -123,16 +123,12 @@ func verifyAddValidatorTx( return outs, nil } - currentTimestamp := chainState.GetTimestamp() - // Ensure the proposed validator starts after the current time - startTime := tx.StartTime() - if !currentTimestamp.Before(startTime) { - return nil, fmt.Errorf( - "%w: %s >= %s", - ErrTimestampNotBeforeStartTime, - currentTimestamp, - startTime, - ) + var ( + currentTimestamp = chainState.GetTimestamp() + startTime = tx.StartTime() + ) + if err := verifyStakerStartTime(currentTimestamp, startTime); err != nil { + return nil, err } _, err := GetValidator(chainState, constants.PrimaryNetworkID, tx.Validator.NodeID) @@ -165,14 +161,9 @@ func verifyAddValidatorTx( return nil, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if startTime.After(maxStartTime) { - return nil, ErrFutureStakeTime - } - - return outs, nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return outs, verifyStakerStartsSoon(currentTimestamp, startTime) } // verifyAddSubnetValidatorTx carries out the validation for an @@ -203,16 +194,12 @@ func verifyAddSubnetValidatorTx( return nil } - currentTimestamp := chainState.GetTimestamp() - // Ensure the proposed validator starts after the current timestamp - validatorStartTime := tx.StartTime() - if !currentTimestamp.Before(validatorStartTime) { - return fmt.Errorf( - "%w: %s >= %s", - ErrTimestampNotBeforeStartTime, - currentTimestamp, - validatorStartTime, - ) + var ( + currentTimestamp = chainState.GetTimestamp() + startTime = tx.StartTime() + ) + if err := verifyStakerStartTime(currentTimestamp, startTime); err != nil { + return err } _, err := GetValidator(chainState, tx.SubnetValidator.Subnet, tx.Validator.NodeID) @@ -255,14 +242,9 @@ func verifyAddSubnetValidatorTx( return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if validatorStartTime.After(maxStartTime) { - return ErrFutureStakeTime - } - - return nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return verifyStakerStartsSoon(currentTimestamp, startTime) } // Returns the representation of [tx.NodeID] validating [tx.Subnet]. @@ -372,16 +354,12 @@ func verifyAddDelegatorTx( return outs, nil } - currentTimestamp := chainState.GetTimestamp() - // Ensure the proposed validator starts after the current timestamp - validatorStartTime := tx.StartTime() - if !currentTimestamp.Before(validatorStartTime) { - return nil, fmt.Errorf( - "%w: %s >= %s", - ErrTimestampNotBeforeStartTime, - currentTimestamp, - validatorStartTime, - ) + var ( + currentTimestamp = chainState.GetTimestamp() + startTime = tx.StartTime() + ) + if err := verifyStakerStartTime(currentTimestamp, startTime); err != nil { + return nil, err } primaryNetworkValidator, err := GetValidator(chainState, constants.PrimaryNetworkID, tx.Validator.NodeID) @@ -438,14 +416,9 @@ func verifyAddDelegatorTx( return nil, fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if validatorStartTime.After(maxStartTime) { - return nil, ErrFutureStakeTime - } - - return outs, nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return outs, verifyStakerStartsSoon(currentTimestamp, startTime) } // verifyAddPermissionlessValidatorTx carries out the validation for an @@ -465,16 +438,12 @@ func verifyAddPermissionlessValidatorTx( return nil } - currentTimestamp := chainState.GetTimestamp() - // Ensure the proposed validator starts after the current time - startTime := tx.StartTime() - if !currentTimestamp.Before(startTime) { - return fmt.Errorf( - "%w: %s >= %s", - ErrTimestampNotBeforeStartTime, - currentTimestamp, - startTime, - ) + var ( + currentTimestamp = chainState.GetTimestamp() + startTime = tx.StartTime() + ) + if err := verifyStakerStartTime(currentTimestamp, startTime); err != nil { + return err } validatorRules, err := getValidatorRules(backend, chainState, tx.Subnet) @@ -482,8 +451,10 @@ func verifyAddPermissionlessValidatorTx( return err } - duration := tx.Validator.Duration() - stakedAssetID := tx.StakeOuts[0].AssetID() + var ( + duration = tx.Validator.Duration() + stakedAssetID = tx.StakeOuts[0].AssetID() + ) switch { case tx.Validator.Wght < validatorRules.minValidatorStake: // Ensure validator is staking at least the minimum amount @@ -562,14 +533,9 @@ func verifyAddPermissionlessValidatorTx( return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if startTime.After(maxStartTime) { - return ErrFutureStakeTime - } - - return nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return verifyStakerStartsSoon(currentTimestamp, startTime) } // verifyAddPermissionlessDelegatorTx carries out the validation for an @@ -589,15 +555,12 @@ func verifyAddPermissionlessDelegatorTx( return nil } - currentTimestamp := chainState.GetTimestamp() - // Ensure the proposed validator starts after the current timestamp - startTime := tx.StartTime() - if !currentTimestamp.Before(startTime) { - return fmt.Errorf( - "chain timestamp (%s) not before validator's start time (%s)", - currentTimestamp, - startTime, - ) + var ( + currentTimestamp = chainState.GetTimestamp() + startTime = tx.StartTime() + ) + if err := verifyStakerStartTime(currentTimestamp, startTime); err != nil { + return err } delegatorRules, err := getDelegatorRules(backend, chainState, tx.Subnet) @@ -605,8 +568,10 @@ func verifyAddPermissionlessDelegatorTx( return err } - duration := tx.Validator.Duration() - stakedAssetID := tx.StakeOuts[0].AssetID() + var ( + duration = tx.Validator.Duration() + stakedAssetID = tx.StakeOuts[0].AssetID() + ) switch { case tx.Validator.Wght < delegatorRules.minDelegatorStake: // Ensure delegator is staking at least the minimum amount @@ -706,14 +671,9 @@ func verifyAddPermissionlessDelegatorTx( return fmt.Errorf("%w: %w", ErrFlowCheckFailed, err) } - // Make sure the tx doesn't start too far in the future. This is done last - // to allow the verifier visitor to explicitly check for this error. - maxStartTime := currentTimestamp.Add(MaxFutureStartTime) - if startTime.After(maxStartTime) { - return ErrFutureStakeTime - } - - return nil + // verifyStakerStartsSoon is checked last to allow + // the verifier visitor to explicitly check for this error. + return verifyStakerStartsSoon(currentTimestamp, startTime) } // Returns an error if the given tx is invalid. @@ -762,3 +722,25 @@ func verifyTransferSubnetOwnershipTx( return nil } + +// Ensure the proposed validator starts after the current time +func verifyStakerStartTime(chainTime, stakerTime time.Time) error { + if !chainTime.Before(stakerTime) { + return fmt.Errorf( + "%w: %s >= %s", + ErrTimestampNotBeforeStartTime, + chainTime, + stakerTime, + ) + } + return nil +} + +func verifyStakerStartsSoon(chainTime, stakerStartTime time.Time) error { + // Make sure the tx doesn't start too far in the future. + maxStartTime := chainTime.Add(MaxFutureStartTime) + if stakerStartTime.After(maxStartTime) { + return ErrFutureStakeTime + } + return nil +} diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index 63069cb5d5d5..b9930075875a 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -290,13 +290,11 @@ func (e *StandardTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingValidator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) @@ -321,16 +319,13 @@ func (e *StandardTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingValidator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) - return nil } @@ -344,16 +339,13 @@ func (e *StandardTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingDelegator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) - return nil } @@ -444,13 +436,11 @@ func (e *StandardTxExecutor) AddPermissionlessValidatorTx(tx *txs.AddPermissionl return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingValidator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) @@ -478,16 +468,13 @@ func (e *StandardTxExecutor) AddPermissionlessDelegatorTx(tx *txs.AddPermissionl return err } - txID := e.Tx.ID() - newStaker, err := state.NewPendingStaker(txID, tx) - if err != nil { + if err := e.putStaker(tx); err != nil { return err } - e.State.PutPendingDelegator(newStaker) + txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) - return nil } @@ -511,7 +498,6 @@ func (e *StandardTxExecutor) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwn txID := e.Tx.ID() avax.Consume(e.State, tx.Ins) avax.Produce(e.State, txID, tx.Outs) - return nil } @@ -539,9 +525,29 @@ func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { return err } + txID := e.Tx.ID() // Consume the UTXOS avax.Consume(e.State, tx.Ins) // Produce the UTXOS - avax.Produce(e.State, e.Tx.ID(), tx.Outs) + avax.Produce(e.State, txID, tx.Outs) + return nil +} + +// Creates the staker as defined in [stakerTx] and adds it to [e.State]. +func (e *StandardTxExecutor) putStaker(stakerTx txs.ScheduledStaker) error { + txID := e.Tx.ID() + staker, err := state.NewPendingStaker(txID, stakerTx) + if err != nil { + return err + } + + switch priority := staker.Priority; { + case priority.IsPendingValidator(): + e.State.PutPendingValidator(staker) + case priority.IsPendingDelegator(): + e.State.PutPendingDelegator(staker) + default: + return fmt.Errorf("staker %s, unexpected priority %d", staker.TxID, priority) + } return nil } diff --git a/vms/platformvm/txs/executor/tx_mempool_verifier.go b/vms/platformvm/txs/executor/tx_mempool_verifier.go index f6eff499c2ec..378b742ae484 100644 --- a/vms/platformvm/txs/executor/tx_mempool_verifier.go +++ b/vms/platformvm/txs/executor/tx_mempool_verifier.go @@ -9,6 +9,7 @@ import ( "time" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) @@ -108,7 +109,7 @@ func (v *MempoolTxVerifier) standardBaseState() (state.Diff, error) { return nil, err } - nextBlkTime, err := v.nextBlockTime(state) + nextBlkTime, _, err := NextBlockTime(state, v.Clk) if err != nil { return nil, err } @@ -123,20 +124,26 @@ func (v *MempoolTxVerifier) standardBaseState() (state.Diff, error) { return state, nil } -func (v *MempoolTxVerifier) nextBlockTime(state state.Diff) (time.Time, error) { +func NextBlockTime(state state.Chain, clk *mockable.Clock) (time.Time, bool, error) { var ( - parentTime = state.GetTimestamp() - nextBlkTime = v.Clk.Time() + timestamp = clk.Time() + parentTime = state.GetTimestamp() ) - if parentTime.After(nextBlkTime) { - nextBlkTime = parentTime + if parentTime.After(timestamp) { + timestamp = parentTime } + // [timestamp] = max(now, parentTime) + nextStakerChangeTime, err := GetNextStakerChangeTime(state) if err != nil { - return time.Time{}, fmt.Errorf("could not calculate next staker change time: %w", err) + return time.Time{}, false, fmt.Errorf("failed getting next staker change time: %w", err) } - if !nextBlkTime.Before(nextStakerChangeTime) { - nextBlkTime = nextStakerChangeTime + + // timeWasCapped means that [timestamp] was reduced to [nextStakerChangeTime] + timeWasCapped := !timestamp.Before(nextStakerChangeTime) + if timeWasCapped { + timestamp = nextStakerChangeTime } - return nextBlkTime, nil + // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) + return timestamp, timeWasCapped, nil } diff --git a/vms/platformvm/txs/mempool/mempool.go b/vms/platformvm/txs/mempool/mempool.go index ce0d6a96f071..bd219910bd9f 100644 --- a/vms/platformvm/txs/mempool/mempool.go +++ b/vms/platformvm/txs/mempool/mempool.go @@ -280,7 +280,7 @@ func (m *mempool) DropExpiredStakerTxs(minStartTime time.Time) []ids.ID { txIter := m.unissuedTxs.NewIterator() for txIter.Next() { tx := txIter.Value() - stakerTx, ok := tx.Unsigned.(txs.Staker) + stakerTx, ok := tx.Unsigned.(txs.ScheduledStaker) if !ok { continue } diff --git a/vms/platformvm/txs/mock_scheduled_staker.go b/vms/platformvm/txs/mock_scheduled_staker.go new file mode 100644 index 000000000000..ce1a22b4eda0 --- /dev/null +++ b/vms/platformvm/txs/mock_scheduled_staker.go @@ -0,0 +1,151 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: github.com/ava-labs/avalanchego/vms/platformvm/txs (interfaces: ScheduledStaker) + +// Package txs is a generated GoMock package. +package txs + +import ( + reflect "reflect" + time "time" + + ids "github.com/ava-labs/avalanchego/ids" + bls "github.com/ava-labs/avalanchego/utils/crypto/bls" + gomock "go.uber.org/mock/gomock" +) + +// MockScheduledStaker is a mock of ScheduledStaker interface. +type MockScheduledStaker struct { + ctrl *gomock.Controller + recorder *MockScheduledStakerMockRecorder +} + +// MockScheduledStakerMockRecorder is the mock recorder for MockScheduledStaker. +type MockScheduledStakerMockRecorder struct { + mock *MockScheduledStaker +} + +// NewMockScheduledStaker creates a new mock instance. +func NewMockScheduledStaker(ctrl *gomock.Controller) *MockScheduledStaker { + mock := &MockScheduledStaker{ctrl: ctrl} + mock.recorder = &MockScheduledStakerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockScheduledStaker) EXPECT() *MockScheduledStakerMockRecorder { + return m.recorder +} + +// CurrentPriority mocks base method. +func (m *MockScheduledStaker) CurrentPriority() Priority { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CurrentPriority") + ret0, _ := ret[0].(Priority) + return ret0 +} + +// CurrentPriority indicates an expected call of CurrentPriority. +func (mr *MockScheduledStakerMockRecorder) CurrentPriority() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CurrentPriority", reflect.TypeOf((*MockScheduledStaker)(nil).CurrentPriority)) +} + +// EndTime mocks base method. +func (m *MockScheduledStaker) EndTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EndTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// EndTime indicates an expected call of EndTime. +func (mr *MockScheduledStakerMockRecorder) EndTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EndTime", reflect.TypeOf((*MockScheduledStaker)(nil).EndTime)) +} + +// NodeID mocks base method. +func (m *MockScheduledStaker) NodeID() ids.NodeID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "NodeID") + ret0, _ := ret[0].(ids.NodeID) + return ret0 +} + +// NodeID indicates an expected call of NodeID. +func (mr *MockScheduledStakerMockRecorder) NodeID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockScheduledStaker)(nil).NodeID)) +} + +// PendingPriority mocks base method. +func (m *MockScheduledStaker) PendingPriority() Priority { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PendingPriority") + ret0, _ := ret[0].(Priority) + return ret0 +} + +// PendingPriority indicates an expected call of PendingPriority. +func (mr *MockScheduledStakerMockRecorder) PendingPriority() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PendingPriority", reflect.TypeOf((*MockScheduledStaker)(nil).PendingPriority)) +} + +// PublicKey mocks base method. +func (m *MockScheduledStaker) PublicKey() (*bls.PublicKey, bool, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PublicKey") + ret0, _ := ret[0].(*bls.PublicKey) + ret1, _ := ret[1].(bool) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// PublicKey indicates an expected call of PublicKey. +func (mr *MockScheduledStakerMockRecorder) PublicKey() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublicKey", reflect.TypeOf((*MockScheduledStaker)(nil).PublicKey)) +} + +// StartTime mocks base method. +func (m *MockScheduledStaker) StartTime() time.Time { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "StartTime") + ret0, _ := ret[0].(time.Time) + return ret0 +} + +// StartTime indicates an expected call of StartTime. +func (mr *MockScheduledStakerMockRecorder) StartTime() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockScheduledStaker)(nil).StartTime)) +} + +// SubnetID mocks base method. +func (m *MockScheduledStaker) SubnetID() ids.ID { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SubnetID") + ret0, _ := ret[0].(ids.ID) + return ret0 +} + +// SubnetID indicates an expected call of SubnetID. +func (mr *MockScheduledStakerMockRecorder) SubnetID() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SubnetID", reflect.TypeOf((*MockScheduledStaker)(nil).SubnetID)) +} + +// Weight mocks base method. +func (m *MockScheduledStaker) Weight() uint64 { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Weight") + ret0, _ := ret[0].(uint64) + return ret0 +} + +// Weight indicates an expected call of Weight. +func (mr *MockScheduledStakerMockRecorder) Weight() *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Weight", reflect.TypeOf((*MockScheduledStaker)(nil).Weight)) +} diff --git a/vms/platformvm/txs/mock_staker.go b/vms/platformvm/txs/mock_staker.go index e01ca66cf9e3..f74c2534ca39 100644 --- a/vms/platformvm/txs/mock_staker.go +++ b/vms/platformvm/txs/mock_staker.go @@ -81,20 +81,6 @@ func (mr *MockStakerMockRecorder) NodeID() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "NodeID", reflect.TypeOf((*MockStaker)(nil).NodeID)) } -// PendingPriority mocks base method. -func (m *MockStaker) PendingPriority() Priority { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "PendingPriority") - ret0, _ := ret[0].(Priority) - return ret0 -} - -// PendingPriority indicates an expected call of PendingPriority. -func (mr *MockStakerMockRecorder) PendingPriority() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PendingPriority", reflect.TypeOf((*MockStaker)(nil).PendingPriority)) -} - // PublicKey mocks base method. func (m *MockStaker) PublicKey() (*bls.PublicKey, bool, error) { m.ctrl.T.Helper() @@ -111,20 +97,6 @@ func (mr *MockStakerMockRecorder) PublicKey() *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PublicKey", reflect.TypeOf((*MockStaker)(nil).PublicKey)) } -// StartTime mocks base method. -func (m *MockStaker) StartTime() time.Time { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "StartTime") - ret0, _ := ret[0].(time.Time) - return ret0 -} - -// StartTime indicates an expected call of StartTime. -func (mr *MockStakerMockRecorder) StartTime() *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "StartTime", reflect.TypeOf((*MockStaker)(nil).StartTime)) -} - // SubnetID mocks base method. func (m *MockStaker) SubnetID() ids.ID { m.ctrl.T.Helper() diff --git a/vms/platformvm/txs/staker_tx.go b/vms/platformvm/txs/staker_tx.go index 049d3519375f..1cdcdcc337a8 100644 --- a/vms/platformvm/txs/staker_tx.go +++ b/vms/platformvm/txs/staker_tx.go @@ -48,9 +48,13 @@ type Staker interface { // PublicKey returns the BLS public key registered by this transaction. If // there was no key registered by this transaction, it will return false. PublicKey() (*bls.PublicKey, bool, error) - StartTime() time.Time EndTime() time.Time Weight() uint64 - PendingPriority() Priority CurrentPriority() Priority } + +type ScheduledStaker interface { + Staker + StartTime() time.Time + PendingPriority() Priority +} diff --git a/vms/platformvm/validator_set_property_test.go b/vms/platformvm/validator_set_property_test.go index 2ac0d4358d7d..189e220679ff 100644 --- a/vms/platformvm/validator_set_property_test.go +++ b/vms/platformvm/validator_set_property_test.go @@ -373,7 +373,6 @@ func addPrimaryValidatorWithoutBLSKey(vm *VM, data *validatorInputData) (*state. } func internalAddValidator(vm *VM, signedTx *txs.Tx) (*state.Staker, error) { - stakerTx := signedTx.Unsigned.(txs.StakerTx) if err := vm.Network.IssueTx(context.Background(), signedTx); err != nil { return nil, fmt.Errorf("could not add tx to mempool: %w", err) } @@ -393,6 +392,7 @@ func internalAddValidator(vm *VM, signedTx *txs.Tx) (*state.Staker, error) { } // move time ahead, promoting the validator to current + stakerTx := signedTx.Unsigned.(txs.ScheduledStaker) currentTime := stakerTx.StartTime() vm.clock.Set(currentTime) vm.state.SetTimestamp(currentTime) diff --git a/vms/platformvm/warp/signature_test.go b/vms/platformvm/warp/signature_test.go index b3eaa88bbfe8..c721eb62938d 100644 --- a/vms/platformvm/warp/signature_test.go +++ b/vms/platformvm/warp/signature_test.go @@ -39,8 +39,8 @@ type testValidator struct { vdr *Validator } -func (v *testValidator) Less(o *testValidator) bool { - return v.vdr.Less(o.vdr) +func (v *testValidator) Compare(o *testValidator) int { + return v.vdr.Compare(o.vdr) } func newTestValidator() *testValidator { diff --git a/vms/platformvm/warp/validator.go b/vms/platformvm/warp/validator.go index 42ff34e7cb5e..5e193ae1815e 100644 --- a/vms/platformvm/warp/validator.go +++ b/vms/platformvm/warp/validator.go @@ -39,8 +39,8 @@ type Validator struct { NodeIDs []ids.NodeID } -func (v *Validator) Less(o *Validator) bool { - return bytes.Compare(v.PublicKeyBytes, o.PublicKeyBytes) < 0 +func (v *Validator) Compare(o *Validator) int { + return bytes.Compare(v.PublicKeyBytes, o.PublicKeyBytes) } // GetCanonicalValidatorSet returns the validator set of [subnetID] at diff --git a/vms/proposervm/batched_vm_test.go b/vms/proposervm/batched_vm_test.go index 684d91ceb3df..c55c5ffc13b4 100644 --- a/vms/proposervm/batched_vm_test.go +++ b/vms/proposervm/batched_vm_test.go @@ -1020,12 +1020,14 @@ func initTestRemoteProposerVM( proVM := New( coreVM, - proBlkStartTime, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: proBlkStartTime, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ diff --git a/vms/proposervm/block.go b/vms/proposervm/block.go index 489d325f8f70..fdea5464edb8 100644 --- a/vms/proposervm/block.go +++ b/vms/proposervm/block.go @@ -124,12 +124,11 @@ func (p *postForkCommonComponents) Verify( // If the node is currently syncing - we don't assume that the P-chain has // been synced up to this point yet. if p.vm.consensusState == snow.NormalOp { - childID := child.ID() currentPChainHeight, err := p.vm.ctx.ValidatorState.GetCurrentHeight(ctx) if err != nil { p.vm.ctx.Log.Error("block verification failed", zap.String("reason", "failed to get current P-Chain height"), - zap.Stringer("blkID", childID), + zap.Stringer("blkID", child.ID()), zap.Error(err), ) return err @@ -142,18 +141,11 @@ func (p *postForkCommonComponents) Verify( ) } - childHeight := child.Height() - proposerID := child.Proposer() - minDelay, err := p.vm.Windower.Delay(ctx, childHeight, parentPChainHeight, proposerID, proposer.MaxVerifyWindows) + delay, err := p.verifyBlockDelay(ctx, parentTimestamp, parentPChainHeight, child) if err != nil { return err } - delay := childTimestamp.Sub(parentTimestamp) - if delay < minDelay { - return errProposerWindowNotStarted - } - // Verify the signature of the node shouldHaveProposer := delay < proposer.MaxVerifyDelay if err := child.SignedBlock.Verify(shouldHaveProposer, p.vm.ctx.ChainID); err != nil { @@ -161,9 +153,8 @@ func (p *postForkCommonComponents) Verify( } p.vm.ctx.Log.Debug("verified post-fork block", - zap.Stringer("blkID", childID), + zap.Stringer("blkID", child.ID()), zap.Time("parentTimestamp", parentTimestamp), - zap.Duration("minDelay", minDelay), zap.Time("blockTimestamp", childTimestamp), ) } @@ -202,37 +193,15 @@ func (p *postForkCommonComponents) buildChild( return nil, err } - delay := newTimestamp.Sub(parentTimestamp) - if delay < proposer.MaxBuildDelay { - parentHeight := p.innerBlk.Height() - proposerID := p.vm.ctx.NodeID - minDelay, err := p.vm.Windower.Delay(ctx, parentHeight+1, parentPChainHeight, proposerID, proposer.MaxBuildWindows) - if err != nil { - p.vm.ctx.Log.Error("unexpected build block failure", - zap.String("reason", "failed to calculate required timestamp delay"), - zap.Stringer("parentID", parentID), - zap.Error(err), - ) - return nil, err - } - - if delay < minDelay { - // It's not our turn to propose a block yet. This is likely caused - // by having previously notified the consensus engine to attempt to - // build a block on top of a block that is no longer the preferred - // block. - p.vm.ctx.Log.Debug("build block dropped", - zap.Time("parentTimestamp", parentTimestamp), - zap.Duration("minDelay", minDelay), - zap.Time("blockTimestamp", newTimestamp), - ) - - // In case the inner VM only issued one pendingTxs message, we - // should attempt to re-handle that once it is our turn to build the - // block. - p.vm.notifyInnerBlockReady() - return nil, errProposerWindowNotStarted - } + shouldBuildUnsignedBlock, err := p.shouldBuildUnsignedBlock( + ctx, + parentID, + parentTimestamp, + parentPChainHeight, + newTimestamp, + ) + if err != nil { + return nil, err } var innerBlock snowman.Block @@ -249,7 +218,7 @@ func (p *postForkCommonComponents) buildChild( // Build the child var statelessChild block.SignedBlock - if delay >= proposer.MaxVerifyDelay { + if shouldBuildUnsignedBlock { statelessChild, err = block.BuildUnsigned( parentID, newTimestamp, @@ -261,10 +230,10 @@ func (p *postForkCommonComponents) buildChild( parentID, newTimestamp, pChainHeight, - p.vm.stakingCertLeaf, + p.vm.StakingCertLeaf, innerBlock.Bytes(), p.vm.ctx.ChainID, - p.vm.stakingLeafSigner, + p.vm.StakingLeafSigner, ) } if err != nil { @@ -334,3 +303,75 @@ func verifyIsNotOracleBlock(ctx context.Context, b snowman.Block) error { return err } } + +func (p *postForkCommonComponents) verifyBlockDelay( + ctx context.Context, + parentTimestamp time.Time, + parentPChainHeight uint64, + blk *postForkBlock, +) (time.Duration, error) { + var ( + blkTimestamp = blk.Timestamp() + childHeight = blk.Height() + proposerID = blk.Proposer() + ) + minDelay, err := p.vm.Windower.Delay(ctx, childHeight, parentPChainHeight, proposerID, proposer.MaxVerifyWindows) + if err != nil { + return 0, err + } + + delay := blkTimestamp.Sub(parentTimestamp) + if delay < minDelay { + return 0, errProposerWindowNotStarted + } + + return delay, nil +} + +func (p *postForkCommonComponents) shouldBuildUnsignedBlock( + ctx context.Context, + parentID ids.ID, + parentTimestamp time.Time, + parentPChainHeight uint64, + newTimestamp time.Time, +) (bool, error) { + delay := newTimestamp.Sub(parentTimestamp) + if delay >= proposer.MaxBuildDelay { + // time for any node to build an unsigned block + return true, nil + } + + parentHeight := p.innerBlk.Height() + proposerID := p.vm.ctx.NodeID + minDelay, err := p.vm.Windower.Delay(ctx, parentHeight+1, parentPChainHeight, proposerID, proposer.MaxBuildWindows) + if err != nil { + p.vm.ctx.Log.Error("unexpected build block failure", + zap.String("reason", "failed to calculate required timestamp delay"), + zap.Stringer("parentID", parentID), + zap.Error(err), + ) + return false, err + } + + if delay >= minDelay { + // it's time for this node to propose a block. It'll be signed or unsigned + // depending on the delay + return delay >= proposer.MaxVerifyDelay, nil + } + + // It's not our turn to propose a block yet. This is likely caused + // by having previously notified the consensus engine to attempt to + // build a block on top of a block that is no longer the preferred + // block. + p.vm.ctx.Log.Debug("build block dropped", + zap.Time("parentTimestamp", parentTimestamp), + zap.Duration("minDelay", minDelay), + zap.Time("blockTimestamp", newTimestamp), + ) + + // In case the inner VM only issued one pendingTxs message, we + // should attempt to re-handle that once it is our turn to build the + // block. + p.vm.notifyInnerBlockReady() + return false, errProposerWindowNotStarted +} diff --git a/vms/proposervm/block_test.go b/vms/proposervm/block_test.go index 04ac66ecf34e..5cca837a7a84 100644 --- a/vms/proposervm/block_test.go +++ b/vms/proposervm/block_test.go @@ -60,15 +60,17 @@ func TestPostForkCommonComponents_buildChild(t *testing.T) { pk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) require.NoError(err) vm := &VM{ + Config: Config{ + StakingCertLeaf: &staking.Certificate{}, + StakingLeafSigner: pk, + }, ChainVM: innerVM, blockBuilderVM: innerBlockBuilderVM, ctx: &snow.Context{ ValidatorState: vdrState, Log: logging.NoLog{}, }, - Windower: windower, - stakingCertLeaf: &staking.Certificate{}, - stakingLeafSigner: pk, + Windower: windower, } blk := &postForkCommonComponents{ diff --git a/vms/proposervm/config.go b/vms/proposervm/config.go new file mode 100644 index 000000000000..96645c9489a8 --- /dev/null +++ b/vms/proposervm/config.go @@ -0,0 +1,32 @@ +// Copyright (C) 2019-2023, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package proposervm + +import ( + "crypto" + "time" + + "github.com/ava-labs/avalanchego/staking" +) + +type Config struct { + // Time at which proposerVM activates its congestion control mechanism + ActivationTime time.Time + + // Minimal P-chain height referenced upon block building + MinimumPChainHeight uint64 + + // Configurable minimal delay among blocks issued consecutively + MinBlkDelay time.Duration + + // Maximal number of block indexed. + // Zero signals all blocks are indexed. + NumHistoricalBlocks uint64 + + // Block signer + StakingLeafSigner crypto.Signer + + // Block certificate + StakingCertLeaf *staking.Certificate +} diff --git a/vms/proposervm/height_indexed_vm.go b/vms/proposervm/height_indexed_vm.go index 99b911c5be64..6c8d6967ee14 100644 --- a/vms/proposervm/height_indexed_vm.go +++ b/vms/proposervm/height_indexed_vm.go @@ -136,7 +136,7 @@ func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error { zap.Uint64("height", height), ) - if vm.numHistoricalBlocks == 0 { + if vm.NumHistoricalBlocks == 0 { return nil } @@ -145,13 +145,13 @@ func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error { // is why <= is used rather than <. This prevents the user from only storing // the last accepted block, which can never be safe due to the non-atomic // commits between the proposervm database and the innerVM's database. - if blocksSinceFork <= vm.numHistoricalBlocks { + if blocksSinceFork <= vm.NumHistoricalBlocks { return nil } // Note: heightToDelete is >= forkHeight, so it is guaranteed not to // underflow. - heightToDelete := height - vm.numHistoricalBlocks - 1 + heightToDelete := height - vm.NumHistoricalBlocks - 1 blockToDelete, err := vm.State.GetBlockIDAtHeight(heightToDelete) if err == database.ErrNotFound { // Block may have already been deleted. This can happen due to a @@ -180,7 +180,7 @@ func (vm *VM) storeHeightEntry(height uint64, blkID ids.ID) error { // TODO: Support async deletion of old blocks. func (vm *VM) pruneOldBlocks() error { - if vm.numHistoricalBlocks == 0 { + if vm.NumHistoricalBlocks == 0 { return nil } @@ -194,7 +194,7 @@ func (vm *VM) pruneOldBlocks() error { // // Note: vm.lastAcceptedHeight is guaranteed to be >= height, so the // subtraction can never underflow. - for vm.lastAcceptedHeight-height > vm.numHistoricalBlocks { + for vm.lastAcceptedHeight-height > vm.NumHistoricalBlocks { blockToDelete, err := vm.State.GetBlockIDAtHeight(height) if err != nil { return err diff --git a/vms/proposervm/post_fork_block_test.go b/vms/proposervm/post_fork_block_test.go index 659bdd1e5fd3..25dfd4f63e8f 100644 --- a/vms/proposervm/post_fork_block_test.go +++ b/vms/proposervm/post_fork_block_test.go @@ -70,10 +70,10 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { ids.Empty, // refer unknown parent time.Time{}, 0, // pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerOracleBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) proBlk = postForkBlock{ @@ -155,10 +155,10 @@ func TestBlockVerify_PostForkBlock_ParentChecks(t *testing.T) { ids.Empty, // refer unknown parent childCoreBlk.Timestamp(), pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) childProBlk := postForkBlock{ @@ -260,10 +260,10 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { prntProBlk.ID(), childCoreBlk.Timestamp(), pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) childProBlk := postForkBlock{ @@ -287,10 +287,10 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { prntProBlk.ID(), beforeWinStart, pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) childProBlk.SignedBlock = childSlb @@ -305,10 +305,10 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { prntProBlk.ID(), atWindowStart, pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) childProBlk.SignedBlock = childSlb @@ -322,10 +322,10 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { prntProBlk.ID(), afterWindowStart, pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) childProBlk.SignedBlock = childSlb @@ -350,10 +350,10 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { prntProBlk.ID(), afterSubWinEnd, pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) childProBlk.SignedBlock = childSlb @@ -431,10 +431,10 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { prntProBlk.ID(), childCoreBlk.Timestamp(), prntBlkPChainHeight-1, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) childProBlk := postForkBlock{ @@ -611,10 +611,10 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) parentBlk.ID(), childCoreBlk.Timestamp(), prntBlkPChainHeight-1, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, childCoreBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) childProBlk := postForkBlock{ @@ -986,10 +986,10 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { postForkOracleBlk.ID(), postForkOracleBlk.Timestamp().Add(proposer.WindowDuration), postForkOracleBlk.PChainHeight(), - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, oracleCoreBlk.opts[0].Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) diff --git a/vms/proposervm/post_fork_option_test.go b/vms/proposervm/post_fork_option_test.go index 8e93e2aad05f..e142b391a348 100644 --- a/vms/proposervm/post_fork_option_test.go +++ b/vms/proposervm/post_fork_option_test.go @@ -660,12 +660,14 @@ func TestOptionTimestampValidity(t *testing.T) { ctx := proVM.ctx proVM = New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) coreVM.InitializeF = func( diff --git a/vms/proposervm/pre_fork_block.go b/vms/proposervm/pre_fork_block.go index ed665e473910..8e952d9d7758 100644 --- a/vms/proposervm/pre_fork_block.go +++ b/vms/proposervm/pre_fork_block.go @@ -97,7 +97,7 @@ func (b *preForkBlock) getInnerBlk() snowman.Block { func (b *preForkBlock) verifyPreForkChild(ctx context.Context, child *preForkBlock) error { parentTimestamp := b.Timestamp() - if !parentTimestamp.Before(b.vm.activationTime) { + if !parentTimestamp.Before(b.vm.ActivationTime) { if err := verifyIsOracleBlock(ctx, b.Block); err != nil { return err } @@ -135,7 +135,7 @@ func (b *preForkBlock) verifyPostForkChild(ctx context.Context, child *postForkB currentPChainHeight, ) } - if childPChainHeight < b.vm.minimumPChainHeight { + if childPChainHeight < b.vm.MinimumPChainHeight { return errPChainHeightTooLow } @@ -150,7 +150,7 @@ func (b *preForkBlock) verifyPostForkChild(ctx context.Context, child *postForkB // if the *preForkBlock is the last *preForkBlock before activation takes effect // (its timestamp is at or after the activation time) parentTimestamp := b.Timestamp() - if parentTimestamp.Before(b.vm.activationTime) { + if parentTimestamp.Before(b.vm.ActivationTime) { return errProposersNotActivated } @@ -181,7 +181,7 @@ func (*preForkBlock) verifyPostForkOption(context.Context, *postForkOption) erro func (b *preForkBlock) buildChild(ctx context.Context) (Block, error) { parentTimestamp := b.Timestamp() - if parentTimestamp.Before(b.vm.activationTime) { + if parentTimestamp.Before(b.vm.ActivationTime) { // The chain hasn't forked yet innerBlock, err := b.vm.ChainVM.BuildBlock(ctx) if err != nil { @@ -210,7 +210,7 @@ func (b *preForkBlock) buildChild(ctx context.Context) (Block, error) { // The child's P-Chain height is proposed as the optimal P-Chain height that // is at least the minimum height - pChainHeight, err := b.vm.optimalPChainHeight(ctx, b.vm.minimumPChainHeight) + pChainHeight, err := b.vm.optimalPChainHeight(ctx, b.vm.MinimumPChainHeight) if err != nil { b.vm.ctx.Log.Error("unexpected build block failure", zap.String("reason", "failed to calculate optimal P-chain height"), diff --git a/vms/proposervm/pre_fork_block_test.go b/vms/proposervm/pre_fork_block_test.go index 1366482a0d9b..4308f9ba05f1 100644 --- a/vms/proposervm/pre_fork_block_test.go +++ b/vms/proposervm/pre_fork_block_test.go @@ -342,10 +342,10 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { coreGenBlk.ID(), coreBlk.Timestamp(), 0, // pChainHeight - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, coreBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) postForkChild := &postForkBlock{ @@ -740,10 +740,10 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { firstBlock.ID(), // refer unknown parent firstBlock.Timestamp(), 0, // pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, coreBlk.opts[0].Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) @@ -798,7 +798,7 @@ func TestPreForkBlock_BuildBlockWithContext(t *testing.T) { // Should call BuildBlock since proposervm is not activated innerBlk.EXPECT().Timestamp().Return(time.Time{}) - vm.activationTime = mockable.MaxTime + vm.ActivationTime = mockable.MaxTime gotChild, err = blk.buildChild(context.Background()) require.NoError(err) diff --git a/vms/proposervm/proposer/validators.go b/vms/proposervm/proposer/validators.go index ba60a088003a..89ed964d5983 100644 --- a/vms/proposervm/proposer/validators.go +++ b/vms/proposervm/proposer/validators.go @@ -15,6 +15,6 @@ type validatorData struct { weight uint64 } -func (d validatorData) Less(other validatorData) bool { - return d.id.Less(other.id) +func (d validatorData) Compare(other validatorData) int { + return d.id.Compare(other.id) } diff --git a/vms/proposervm/proposer/validators_test.go b/vms/proposervm/proposer/validators_test.go index 2f7913d01e2e..8be1f4c23d99 100644 --- a/vms/proposervm/proposer/validators_test.go +++ b/vms/proposervm/proposer/validators_test.go @@ -4,6 +4,7 @@ package proposer import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -11,16 +12,31 @@ import ( "github.com/ava-labs/avalanchego/ids" ) -func TestValidatorDataLess(t *testing.T) { - require := require.New(t) - - var v1, v2 validatorData - require.False(v1.Less(v2)) - require.False(v2.Less(v1)) +func TestValidatorDataCompare(t *testing.T) { + tests := []struct { + a validatorData + b validatorData + expected int + }{ + { + a: validatorData{}, + b: validatorData{}, + expected: 0, + }, + { + a: validatorData{ + id: ids.BuildTestNodeID([]byte{1}), + }, + b: validatorData{}, + expected: 1, + }, + } + for _, test := range tests { + t.Run(fmt.Sprintf("%s_%s_%d", test.a.id, test.b.id, test.expected), func(t *testing.T) { + require := require.New(t) - v1 = validatorData{ - id: ids.BuildTestNodeID([]byte{1}), + require.Equal(test.expected, test.a.Compare(test.b)) + require.Equal(-test.expected, test.b.Compare(test.a)) + }) } - require.False(v1.Less(v2)) - require.True(v2.Less(v1)) } diff --git a/vms/proposervm/state_syncable_vm_test.go b/vms/proposervm/state_syncable_vm_test.go index 826f8b877987..b2295d50017b 100644 --- a/vms/proposervm/state_syncable_vm_test.go +++ b/vms/proposervm/state_syncable_vm_test.go @@ -70,12 +70,14 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { // create the VM vm := New( innerVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) ctx := snow.DefaultContextTest() @@ -186,10 +188,10 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -271,10 +273,10 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -359,10 +361,10 @@ func TestStateSyncGetStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -432,10 +434,10 @@ func TestParseStateSummary(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ @@ -487,10 +489,10 @@ func TestStateSummaryAccept(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) @@ -567,10 +569,10 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { vm.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - vm.stakingCertLeaf, + vm.StakingCertLeaf, innerBlk.Bytes(), vm.ctx.ChainID, - vm.stakingLeafSigner, + vm.StakingLeafSigner, ) require.NoError(err) proBlk := &postForkBlock{ diff --git a/vms/proposervm/vm.go b/vms/proposervm/vm.go index a7bb897932d3..ae9afe6562cd 100644 --- a/vms/proposervm/vm.go +++ b/vms/proposervm/vm.go @@ -5,7 +5,6 @@ package proposervm import ( "context" - "crypto" "errors" "fmt" "time" @@ -26,7 +25,6 @@ import ( "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" - "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/math" @@ -86,19 +84,11 @@ func cachedBlockSize(_ ids.ID, blk snowman.Block) int { type VM struct { block.ChainVM + Config blockBuilderVM block.BuildBlockWithContextChainVM batchedVM block.BatchedChainVM ssVM block.StateSyncableVM - activationTime time.Time - minimumPChainHeight uint64 - minBlkDelay time.Duration - numHistoricalBlocks uint64 - // block signer - stakingLeafSigner crypto.Signer - // block certificate - stakingCertLeaf *staking.Certificate - state.State hIndexer indexer.HeightIndexer @@ -138,28 +128,17 @@ type VM struct { // timestamps are only specific to the second. func New( vm block.ChainVM, - activationTime time.Time, - minimumPChainHeight uint64, - minBlkDelay time.Duration, - numHistoricalBlocks uint64, - stakingLeafSigner crypto.Signer, - stakingCertLeaf *staking.Certificate, + config Config, ) *VM { blockBuilderVM, _ := vm.(block.BuildBlockWithContextChainVM) batchedVM, _ := vm.(block.BatchedChainVM) ssVM, _ := vm.(block.StateSyncableVM) return &VM{ ChainVM: vm, + Config: config, blockBuilderVM: blockBuilderVM, batchedVM: batchedVM, ssVM: ssVM, - - activationTime: activationTime, - minimumPChainHeight: minimumPChainHeight, - minBlkDelay: minBlkDelay, - numHistoricalBlocks: numHistoricalBlocks, - stakingLeafSigner: stakingLeafSigner, - stakingCertLeaf: stakingCertLeaf, } } @@ -373,9 +352,7 @@ func (vm *VM) SetPreference(ctx context.Context, preferred ids.ID) error { // validators can specify. This delay may be an issue for high performance, // custom VMs. Until the P-chain is modified to target a specific block // time, ProposerMinBlockDelay can be configured in the subnet config. - if minDelay < vm.minBlkDelay { - minDelay = vm.minBlkDelay - } + minDelay = math.Max(minDelay, vm.MinBlkDelay) preferredTime := blk.Timestamp() nextStartTime := preferredTime.Add(minDelay) @@ -418,7 +395,7 @@ func (vm *VM) repair(ctx context.Context) error { return err } - if vm.numHistoricalBlocks != 0 { + if vm.NumHistoricalBlocks != 0 { vm.ctx.Log.Fatal("block height index must be valid when pruning historical blocks") return errHeightIndexInvalidWhilePruning } diff --git a/vms/proposervm/vm_regression_test.go b/vms/proposervm/vm_regression_test.go index 0a27c43e112a..fba3f5974332 100644 --- a/vms/proposervm/vm_regression_test.go +++ b/vms/proposervm/vm_regression_test.go @@ -46,13 +46,16 @@ func TestProposerVMInitializeShouldFailIfInnerVMCantVerifyItsHeightIndex(t *test proVM := New( innerVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) + defer func() { // avoids leaking goroutines require.NoError(proVM.Shutdown(context.Background())) diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index fb8672d8b2f4..d3e2282f25c0 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -134,12 +134,14 @@ func initTestProposerVM( proVM := New( coreVM, - proBlkStartTime, - minPChainHeight, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: proBlkStartTime, + MinimumPChainHeight: minPChainHeight, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -526,10 +528,10 @@ func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { proVM.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) proBlk := postForkBlock{ @@ -570,10 +572,10 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { proVM.preferred, innerBlk.Timestamp(), 100, // pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) proBlk1 := postForkBlock{ @@ -589,10 +591,10 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { proVM.preferred, innerBlk.Timestamp(), 200, // pChainHeight, - proVM.stakingCertLeaf, + proVM.StakingCertLeaf, innerBlk.Bytes(), proVM.ctx.ChainID, - proVM.stakingLeafSigner, + proVM.StakingLeafSigner, ) require.NoError(err) proBlk2 := postForkBlock{ @@ -880,12 +882,14 @@ func TestExpiredBuildBlock(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -1224,12 +1228,14 @@ func TestInnerVMRollback(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) require.NoError(proVM.Initialize( @@ -1311,12 +1317,14 @@ func TestInnerVMRollback(t *testing.T) { proVM = New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) require.NoError(proVM.Initialize( @@ -1803,12 +1811,14 @@ func TestRejectedHeightNotIndexed(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -2010,12 +2020,14 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) valState := &validators.TestState{ @@ -2173,12 +2185,14 @@ func TestVMInnerBlkCache(t *testing.T) { innerVM := mocks.NewMockChainVM(ctrl) vm := New( innerVM, - time.Time{}, // fork is active - 0, // minimum P-Chain height - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) innerVM.EXPECT().Initialize( @@ -2229,10 +2243,10 @@ func TestVMInnerBlkCache(t *testing.T) { ids.GenerateTestID(), // parent time.Time{}, // timestamp 1, // pChainHeight, - vm.stakingCertLeaf, // cert + vm.StakingCertLeaf, // cert blkNearTipInnerBytes, // inner blk bytes vm.ctx.ChainID, // chain ID - vm.stakingLeafSigner, // key + vm.StakingLeafSigner, // key ) require.NoError(err) @@ -2402,12 +2416,14 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { innerVM := mocks.NewMockChainVM(ctrl) vm := New( innerVM, - time.Time{}, // fork is active - 0, // minimum P-Chain height - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) // make sure that DBs are compressed correctly @@ -2613,12 +2629,14 @@ func TestHistoricalBlockDeletion(t *testing.T) { proVM := New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - DefaultNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: DefaultNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) require.NoError(proVM.Initialize( @@ -2711,12 +2729,14 @@ func TestHistoricalBlockDeletion(t *testing.T) { numHistoricalBlocks := uint64(2) proVM = New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - numHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: numHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) require.NoError(proVM.Initialize( @@ -2753,12 +2773,14 @@ func TestHistoricalBlockDeletion(t *testing.T) { newNumHistoricalBlocks := numHistoricalBlocks + 2 proVM = New( coreVM, - time.Time{}, - 0, - DefaultMinBlockDelay, - newNumHistoricalBlocks, - pTestSigner, - pTestCert, + Config{ + ActivationTime: time.Time{}, + MinimumPChainHeight: 0, + MinBlkDelay: DefaultMinBlockDelay, + NumHistoricalBlocks: newNumHistoricalBlocks, + StakingLeafSigner: pTestSigner, + StakingCertLeaf: pTestCert, + }, ) require.NoError(proVM.Initialize( diff --git a/x/archivedb/key_test.go b/x/archivedb/key_test.go index d56dca5f37fc..18343e725bc0 100644 --- a/x/archivedb/key_test.go +++ b/x/archivedb/key_test.go @@ -21,9 +21,7 @@ func TestNaturalDescSortingForSameKey(t *testing.T) { entry := [][]byte{key0, key1, key2, key3} expected := [][]byte{key3, key2, key1, key0} - slices.SortFunc(entry, func(i, j []byte) bool { - return bytes.Compare(i, j) < 0 - }) + slices.SortFunc(entry, bytes.Compare) require.Equal(t, expected, entry) } @@ -37,9 +35,7 @@ func TestSortingDifferentPrefix(t *testing.T) { entry := [][]byte{key0, key1, key2, key3} expected := [][]byte{key1, key0, key3, key2} - slices.SortFunc(entry, func(i, j []byte) bool { - return bytes.Compare(i, j) < 0 - }) + slices.SortFunc(entry, bytes.Compare) require.Equal(t, expected, entry) } diff --git a/x/merkledb/key.go b/x/merkledb/key.go index d65d9b74a0a6..78e35f59924c 100644 --- a/x/merkledb/key.go +++ b/x/merkledb/key.go @@ -11,6 +11,8 @@ import ( "golang.org/x/exp/maps" "golang.org/x/exp/slices" + + "github.com/ava-labs/avalanchego/utils" ) var ( @@ -164,12 +166,19 @@ func (k Key) Length() int { // Greater returns true if current Key is greater than other Key func (k Key) Greater(other Key) bool { - return k.value > other.value || (k.value == other.value && k.length > other.length) + return k.Compare(other) == 1 } // Less will return true if current Key is less than other Key func (k Key) Less(other Key) bool { - return k.value < other.value || (k.value == other.value && k.length < other.length) + return k.Compare(other) == -1 +} + +func (k Key) Compare(other Key) int { + if valueCmp := utils.Compare(k.value, other.value); valueCmp != 0 { + return valueCmp + } + return utils.Compare(k.length, other.length) } // Extend returns a new Key that is the in-order aggregation of Key [k] with [keys] diff --git a/x/merkledb/view_iterator.go b/x/merkledb/view_iterator.go index fac213bf350b..66f4712daf7a 100644 --- a/x/merkledb/view_iterator.go +++ b/x/merkledb/view_iterator.go @@ -41,8 +41,8 @@ func (t *trieView) NewIteratorWithStartAndPrefix(start, prefix []byte) database. } // sort [changes] so they can be merged with the parent trie's state - slices.SortFunc(changes, func(a, b KeyChange) bool { - return bytes.Compare(a.Key, b.Key) == -1 + slices.SortFunc(changes, func(a, b KeyChange) int { + return bytes.Compare(a.Key, b.Key) }) return &viewIterator{ diff --git a/x/sync/sync_test.go b/x/sync/sync_test.go index af908c9d941c..71871e95db56 100644 --- a/x/sync/sync_test.go +++ b/x/sync/sync_test.go @@ -696,11 +696,11 @@ func TestFindNextKeyRandom(t *testing.T) { } // Sort in ascending order by key prefix. - serializedPathLess := func(i, j keyAndID) bool { - return i.key.Less(j.key) + serializedPathCompare := func(i, j keyAndID) int { + return i.key.Compare(j.key) } - slices.SortFunc(remoteKeyIDs, serializedPathLess) - slices.SortFunc(localKeyIDs, serializedPathLess) + slices.SortFunc(remoteKeyIDs, serializedPathCompare) + slices.SortFunc(localKeyIDs, serializedPathCompare) // Filter out keys that are before the last received key findBounds := func(keyIDs []keyAndID) (int, int) { @@ -738,7 +738,7 @@ func TestFindNextKeyRandom(t *testing.T) { for i := 0; i < len(remoteKeyIDs) && i < len(localKeyIDs); i++ { // See if the keys are different. smaller, bigger := remoteKeyIDs[i], localKeyIDs[i] - if serializedPathLess(localKeyIDs[i], remoteKeyIDs[i]) { + if serializedPathCompare(localKeyIDs[i], remoteKeyIDs[i]) == -1 { smaller, bigger = localKeyIDs[i], remoteKeyIDs[i] } @@ -1194,8 +1194,6 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen } i++ } - slices.SortFunc(allKeys, func(a, b []byte) bool { - return bytes.Compare(a, b) < 0 - }) + slices.SortFunc(allKeys, bytes.Compare) return db, allKeys, batch.Write() } diff --git a/x/sync/workheap_test.go b/x/sync/workheap_test.go index 0a3262a9310f..826011dbdebd 100644 --- a/x/sync/workheap_test.go +++ b/x/sync/workheap_test.go @@ -4,6 +4,7 @@ package sync import ( + "bytes" "math/rand" "testing" "time" @@ -13,7 +14,6 @@ import ( "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/maybe" ) @@ -199,7 +199,7 @@ func TestWorkHeapMergeInsertRandom(t *testing.T) { _, _ = rand.Read(bound) bounds = append(bounds, bound) } - utils.SortBytes(bounds) + slices.SortFunc(bounds, bytes.Compare) // Note that start < end for all ranges. // It is possible but extremely unlikely that