diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a98c58139a..a39bac1ac4 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,4 +7,4 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @quantumexplorer @lklimek @shotonoff +* @quantumexplorer @lklimek @shumkov diff --git a/.github/actions/bls/action.yml b/.github/actions/bls/action.yml index 4fa4f2ca38..ff0c6c0e65 100644 --- a/.github/actions/bls/action.yml +++ b/.github/actions/bls/action.yml @@ -15,7 +15,7 @@ runs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.19" + go-version: "1.22" - uses: actions/checkout@v2 with: submodules: true diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 5e824e8ee1..70ff467c10 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -18,11 +18,10 @@ updates: directory: "/" schedule: interval: weekly - target-branch: "v0.35.x" + target-branch: "master" open-pull-requests-limit: 10 reviewers: - - shotonoff + - shumkov - lklimek - - iammadab labels: - dependencies diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4b41672941..fc64049a83 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -21,9 +21,9 @@ jobs: goos: ["linux"] timeout-minutes: 55 steps: - - uses: actions/setup-go@v3.5.0 + - uses: actions/setup-go@v5.0.1 with: - go-version: "1.19" + go-version: "1.22" - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: @@ -52,9 +52,9 @@ jobs: needs: build timeout-minutes: 5 steps: - - uses: actions/setup-go@v3.5.0 + - uses: actions/setup-go@v5.0.1 with: - go-version: "1.19" + go-version: "1.22" - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: @@ -77,9 +77,9 @@ jobs: needs: build timeout-minutes: 5 steps: - - uses: actions/setup-go@v3.5.0 + - uses: actions/setup-go@v5.0.1 with: - go-version: "1.19" + go-version: "1.22" - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: diff --git a/.github/workflows/check-generated.yml b/.github/workflows/check-generated.yml index d5523b6f9d..3f555aed1e 100644 --- a/.github/workflows/check-generated.yml +++ b/.github/workflows/check-generated.yml @@ -21,9 +21,9 @@ jobs: check-mocks: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v3.5.0 + - uses: actions/setup-go@v5.0.1 with: - go-version: "1.19" + go-version: "1.22" - uses: actions/checkout@v4 @@ -31,7 +31,7 @@ jobs: run: | set -euo pipefail - readonly MOCKERY=2.33.2 # N.B. no leading "v" + readonly MOCKERY=2.41.0 # N.B. no leading "v" curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf - make mockery 2>/dev/null @@ -47,9 +47,9 @@ jobs: check-proto: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v3.5.0 + - uses: actions/setup-go@v5.0.1 with: - go-version: "1.19" + go-version: "1.22" - uses: actions/checkout@v4 with: diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index aff9dc0f66..1089da4b2c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -9,6 +9,15 @@ on: type: string description: "Docker tag" required: false + + platforms: + type: choice + description: "Image architecture to build" + default: "linux/amd64,linux/arm64" + options: + - "linux/amd64,linux/arm64" + - "linux/amd64" + - "linux/arm64" release: types: - published @@ -16,6 +25,8 @@ on: jobs: build: runs-on: ubuntu-22.04 + env: + PLATFORMS: ${{ github.event_name == 'release' && 'linux/amd64,linux/arm64' || github.event.inputs.platforms }} steps: - uses: actions/checkout@v4 @@ -25,11 +36,11 @@ jobs: platforms: all - name: Set up Docker Build - uses: docker/setup-buildx-action@v2.4.1 + uses: docker/setup-buildx-action@v3.3.0 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} - uses: docker/login-action@v2.0.0 + uses: docker/login-action@v3.2.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} @@ -55,7 +66,7 @@ jobs: - name: Set Docker tags and labels id: docker_meta - uses: docker/metadata-action@v3 + uses: docker/metadata-action@v5 with: images: dashpay/tenderdash tags: | @@ -75,11 +86,11 @@ jobs: # on Docker Hub, as Github caches are not available for other branches. - name: Build and cache image with dependencies id: docker_bls - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v6.0.0 with: context: . file: ./DOCKER/Dockerfile - platforms: linux/amd64,linux/arm64 + platforms: ${{ env.PLATFORMS }} target: base push: false cache-from: | @@ -91,11 +102,11 @@ jobs: - name: Publish to Docker Hub id: docker_build - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v6.0.0 with: context: . file: ./DOCKER/Dockerfile - platforms: linux/amd64,linux/arm64 + platforms: ${{ env.PLATFORMS }} push: ${{ github.event_name != 'pull_request' }} tags: ${{ steps.docker_meta.outputs.tags }} labels: ${{ steps.docker_meta.outputs.labels }} diff --git a/.github/workflows/docs-deployment.yml b/.github/workflows/docs-deployment.yml index 64dba8c761..2d822f9334 100644 --- a/.github/workflows/docs-deployment.yml +++ b/.github/workflows/docs-deployment.yml @@ -37,7 +37,7 @@ jobs: run: | git config --global --add safe.directory "$PWD" make build-docs - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@v4 with: name: build-output path: ~/output/ @@ -50,7 +50,7 @@ jobs: contents: write steps: - uses: actions/checkout@v4 - - uses: actions/download-artifact@v3 + - uses: actions/download-artifact@v4 with: name: build-output path: ~/output diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 15afabe990..6d615da90e 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -20,18 +20,18 @@ jobs: fail-fast: true matrix: testnet: ["dashcore", "rotate"] - timeout-minutes: 25 + timeout-minutes: 30 env: FULLNODE_PUBKEY_KEEP: false CGO_LDFLAGS: "-L/usr/local/lib -ldashbls -lrelic_s -lmimalloc-secure -lgmp" CGO_CXXFLAGS: "-I/usr/local/include" steps: - - uses: actions/setup-go@v3.5.0 + - uses: actions/setup-go@v5.0.1 with: - go-version: "1.19" + go-version: "1.22" - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2.4.1 + uses: docker/setup-buildx-action@v3.3.0 - uses: actions/checkout@v4 with: @@ -54,7 +54,7 @@ jobs: if: "github.event_name != 'pull_request' || env.GIT_DIFF != ''" - name: Build E2E Docker image - uses: docker/build-push-action@v4.0.0 + uses: docker/build-push-action@v6.0.0 with: context: . file: test/e2e/docker/Dockerfile @@ -76,7 +76,7 @@ jobs: if: ${{ failure() }} - name: Upload ${{ matrix.testnet }} logs - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: ${{ matrix.testnet }}.log path: test/e2e/${{ matrix.testnet }}.log diff --git a/.github/workflows/janitor.yml b/.github/workflows/janitor.yml index ceb21941d1..f943026750 100644 --- a/.github/workflows/janitor.yml +++ b/.github/workflows/janitor.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 3 steps: - - uses: styfle/cancel-workflow-action@0.10.0 + - uses: styfle/cancel-workflow-action@0.12.1 with: workflow_id: 1041851,1401230,2837803 access_token: ${{ github.token }} diff --git a/.github/workflows/jepsen.yml b/.github/workflows/jepsen.yml index 5a947f928c..793193138b 100644 --- a/.github/workflows/jepsen.yml +++ b/.github/workflows/jepsen.yml @@ -58,7 +58,7 @@ jobs: run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}' - name: Archive results - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: results path: tendermint/store/latest diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 846eef16b3..b2384eb179 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -11,6 +11,8 @@ on: pull_request: paths: - "**.go" + - ".golangci.yml" + - ".github/linters/*" push: branches: - master @@ -28,15 +30,16 @@ jobs: - uses: actions/checkout@v4 with: submodules: true - - uses: actions/setup-go@v3.5.0 + - uses: actions/setup-go@v5.0.1 with: - go-version: "^1.19" + go-version: "^1.22" - uses: technote-space/get-diff-action@v6 with: PATTERNS: | **/**.go go.mod go.sum + .golangci.yml - name: Install dependencies run: sudo apt-get update && sudo apt-get install -y libpcap-dev @@ -45,12 +48,12 @@ jobs: name: Install BLS library if: "env.GIT_DIFF != ''" - - uses: golangci/golangci-lint-action@v3.4.0 + - uses: golangci/golangci-lint-action@v6.0.1 with: # Required: the version of golangci-lint is required and # must be specified without patch version: we always use the # latest patch version. - version: v1.46 + version: v1.55 args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/.github/workflows/proto-lint.yml b/.github/workflows/proto-lint.yml index 65b894272e..aefeac122e 100644 --- a/.github/workflows/proto-lint.yml +++ b/.github/workflows/proto-lint.yml @@ -14,7 +14,7 @@ jobs: timeout-minutes: 5 steps: - uses: actions/checkout@v4 - - uses: bufbuild/buf-setup-action@v1.14.0 + - uses: bufbuild/buf-setup-action@v1.33.0 - uses: bufbuild/buf-lint-action@v1 with: input: "proto" diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0992fad38d..b982c6c528 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,21 +12,21 @@ jobs: with: fetch-depth: 0 - - uses: actions/setup-go@v3.5.0 + - uses: actions/setup-go@v5.0.1 with: - go-version: '1.19' + go-version: "1.22" - name: Build - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v6 if: ${{ github.event_name == 'pull_request' }} with: version: latest - args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run + args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run - run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md - name: Release - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v6 if: startsWith(github.ref, 'refs/tags/') with: version: latest diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index d5dbfbee1d..1e16964c60 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -7,7 +7,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v7 + - uses: actions/stale@v9 with: repo-token: ${{ secrets.GITHUB_TOKEN }} stale-pr-message: "This pull request has been automatically marked as stale because it has not had diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5d98b88e38..b84ea85ac1 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -16,9 +16,9 @@ jobs: matrix: part: ["00", "01", "02", "03", "04", "05"] steps: - - uses: actions/setup-go@v3.5.0 + - uses: actions/setup-go@v5.0.1 with: - go-version: "1.19" + go-version: "1.22" - uses: actions/checkout@v4 - uses: technote-space/get-diff-action@v6 with: diff --git a/.golangci.yml b/.golangci.yml index b0c13b706a..955e41347e 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -2,8 +2,7 @@ linters: enable: - asciicheck - bodyclose - - deadcode - - depguard + # - depguard - dogsled - dupl - errcheck @@ -30,14 +29,11 @@ linters: - nakedret - nolintlint - prealloc - - staticcheck - - structcheck - stylecheck # - typecheck - unconvert # - unparam - unused - - varcheck # - whitespace # - wsl @@ -53,7 +49,19 @@ linters-settings: max-blank-identifiers: 3 golint: min-confidence: 0 + goconst: + min-len: 5 + min-occurrences: 5 + ignore-tests: true maligned: suggest-new: true misspell: locale: US + revive: + rules: + # https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md#unused-parameter + - name: unused-parameter + severity: warning + disabled: false + arguments: + - allowRegex: "^_" diff --git a/CHANGELOG.md b/CHANGELOG.md index 0ee2d1966e..7643efbe99 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,110 @@ +## [1.0.0] - 2024-06-26 + +### Bug Fixes + +- Panic in ordered_map Key() (#721) +- Race condition when adding new channel to NodeInfo (#735) +- [**breaking**] E2e tests are flaky due to slow app state processing (#745) +- Cancel previous mempool run when starting new one (#760) +- Valid/locked block incorrectly marked as not timely (#762) +- Handle ValidatorSetUpdate with no validator changes (#774) +- Ignore abci section on seeds (#785) +- Abci valset update abci does not need a public key on replay (#786) +- [**breaking**] Limit mempool gossip rate on a per-peer basis (#787) +- Router.chDesc concurrent map iteration and write in pqueue (#794) +- Ineffective PROXY_APP and ABCI env in entrypoint (#805) + +### Features + +- Allow delaying transactions in ResponsePrepareProposal (#717) +- New vote extenison type THRESHOLD_RAW (#715) +- Env var ABCI changes abci option in config.toml (#742) +- Route abci requests depending on request type (#734) +- Add request result to prometheus stats (#743) +- Support timeouts in abci calls (#749) +- Allow configuration of check TX timeout for rpc and p2p tx broadcast (#750) +- Channel enqueue timeout and improved router cleanup (#754) +- Channels with limit of send and recv rate (#753) +- Proposer-based app version (#769) +- [**breaking**] Limit concurrent gRPC connections (#775) + +### Miscellaneous Tasks + +- Update CODEOWNERS (#736) +- [**breaking**] Remove ExecTxResult.GasWanted (#740) +- Regenerate mocks (#744) +- Don't use go-deadlock in clist due to performance issues (#747) +- Fix linter warnings in v0.14-dev (#748) +- Minor improvements: logging, comments, locks (#726) +- Update changelog and version to 0.14.0-dev.4 (#763) +- Update changelog and version to 0.14.0-dev.6 (#778) +- Update changelog and version to 0.14.0-dev.7 (#789) +- Add Warn log level and decrease verbosity of some logs (#790) +- Detect quorum hash mismatch when verifying commit (#791) +- Update changelog and version to 0.14.0-dev.8 (#792) +- Update changelog and version to 1.0.0-dev.2 (#806) + +### Refactor + +- Remove QuorumSingsVerifier (#727) +- Relaxed locking of mempool (#737) +- [**breaking**] Remove deprecated config fields (#755) +- Remove not needed commit timeout and unused LastPrecommits (#751) +- Tune channel priorities and move channel definitions to p2p/channel_params.go (#759) +- Use tmsync.Waker when txs are available (#761) +- [**breaking**] Move Events from FinalizeBlock to ResponseProcessProposal (#770) + +### Testing + +- Add parallel grpc execution test (#758) +- Fix flaky TestTooFarInTheFutureProposal (#768) +- Fix flaky TestEmitNewValidBlockEventOnCommitWithoutBlock (#772) + +### Build + +- Bump actions/setup-go from 3.5.0 to 5.0.0 +- Bump actions/stale from 7 to 9 +- Update golang to 1.21 (#716) +- Bump actions/download-artifact from 3 to 4 (#720) +- Bump actions/upload-artifact from 3 to 4 (#719) +- Manual docker image build platform selection (#722) +- Bump bufbuild/buf-setup-action from 1.27.0 to 1.29.0 (#729) +- Bump styfle/cancel-workflow-action from 0.12.0 to 0.12.1 (#728) +- Fix depguard linter configuration (#730) +- Simplify build process (#732) +- Fix docker platforms for releases (#733) +- Bump golangci/golangci-lint-action from 3.7.0 to 4.0.0 (#738) +- Bump go to 1.22, alpine to 3.19, mockery to 2.41.0 (#741) +- Bump docker/setup-buildx-action from 3.0.0 to 3.1.0 (#746) +- Bump docker/build-push-action from 5.0.0 to 5.2.0 (#756) +- Bump bufbuild/buf-setup-action from 1.29.0 to 1.30.0 (#757) +- Bump docker/setup-buildx-action from 3.1.0 to 3.2.0 (#765) +- Bump docker/login-action from 3.0.0 to 3.1.0 (#767) +- Bump docker/build-push-action from 5.2.0 to 5.3.0 (#766) +- Bump docker/setup-buildx-action from 3.2.0 to 3.3.0 (#776) +- Bump bufbuild/buf-setup-action from 1.30.0 to 1.30.1 (#777) +- Bump golangci/golangci-lint-action from 4.0.0 to 5.3.0 (#784) +- Bump actions/setup-go from 5.0.0 to 5.0.1 (#783) +- Bump bufbuild/buf-setup-action from 1.30.1 to 1.31.0 (#780) +- Bump golangci/golangci-lint-action from 5.3.0 to 6.0.1 (#788) +- Bump bufbuild/buf-setup-action from 1.31.0 to 1.32.0 (#793) +- Bump bufbuild/buf-setup-action from 1.32.0 to 1.32.2 (#796) +- Bump docker/login-action from 3.1.0 to 3.2.0 (#797) +- Bump docker/build-push-action from 5.3.0 to 5.4.0 (#799) +- Bump docker/build-push-action from 5.4.0 to 6.0.0 (#801) +- Bump bufbuild/buf-setup-action from 1.32.2 to 1.33.0 (#800) +- Bump goreleaser/goreleaser-action from 5 to 6 (#798) + ## [0.13.4] - 2023-12-11 ### Bug Fixes - Ordered map race condition (#708) +### Miscellaneous Tasks + +- Update changelog and version to 0.13.4 + ### Performance - Increase web socket channels capacity (#709) @@ -19,16 +120,30 @@ - Update changelog and version to 0.13.3 +### Build + +- Bump styfle/cancel-workflow-action from 0.10.0 to 0.12.0 (#696) +- Bump bufbuild/buf-setup-action from 1.14.0 to 1.27.0 (#695) + ## [0.13.2] - 2023-10-09 ### Bug Fixes +- Log-file-path setting does not work (#691) - Log-file-path setting does not work (#691) ### Miscellaneous Tasks - Update changelog and version to 0.13.2 +### Build + +- Bump goreleaser/goreleaser-action from 4 to 5 +- Bump docker/metadata-action from 3 to 5 +- Bump docker/login-action from 2.0.0 to 3.0.0 +- Bump docker/build-push-action from 4.0.0 to 5.0.0 +- Bump docker/setup-buildx-action from 2.4.1 to 3.0.0 + ## [0.13.1] - 2023-09-14 ### Bug Fixes diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index 10f46d8c0f..75ea89ad6e 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -4,8 +4,8 @@ # * compile - builds final binaries # * image - creates final image of minimal size -ARG ALIPNE_VERSION=3.17 -ARG GOLANG_VERSION=1.19 +ARG ALIPNE_VERSION=3.19 +ARG GOLANG_VERSION=1.22 ################################# # STAGE 1: install dependencies # ################################# @@ -13,15 +13,14 @@ FROM golang:${GOLANG_VERSION}-alpine${ALIPNE_VERSION} AS base RUN apk update && \ apk upgrade && \ - apk --no-cache add bash git gmp-dev sudo cmake build-base python3-dev libpcap-dev leveldb-dev && \ + apk --no-cache add bash git gmp-dev sudo cmake build-base libpcap-dev leveldb-dev && \ rm -rf /var/cache/apk/* WORKDIR /src/bls # Install BLS library COPY third_party ./third_party -RUN ./third_party/bls-signatures/build.sh && \ - ./third_party/bls-signatures/install.sh +RUN ./third_party/bls-signatures/build.sh ##################################### # STAGE 2: install golang libraries # @@ -94,7 +93,7 @@ COPY --from=compile /src/tenderdash/build/tenderdash /src/tenderdash/build/abcid # You can overwrite these before the first run to influence # config.json and genesis.json. Additionally, you can override # CMD to add parameters to `tenderdash node`. -ENV PROXY_APP=kvstore MONIKER=dockernode CHAIN_ID=dockerchain +ENV PROXY_APP=kvstore MONIKER=dockernode CHAIN_ID=dockerchain ABCI="" COPY ./DOCKER/docker-entrypoint.sh /usr/local/bin/ diff --git a/DOCKER/docker-entrypoint.sh b/DOCKER/docker-entrypoint.sh index bd080c46f5..ba612ff8f2 100755 --- a/DOCKER/docker-entrypoint.sh +++ b/DOCKER/docker-entrypoint.sh @@ -6,7 +6,7 @@ if [ ! -d "$TMHOME/config" ]; then tenderdash init validator sed -i \ - -e "s/^proxy-app\s*=.*/proxy-app = \"$PROXY_APP\"/" \ + -e "s/^address\s*=.*/address = \"$PROXY_APP\"/" \ -e "s/^moniker\s*=.*/moniker = \"$MONIKER\"/" \ -e 's/^addr-book-strict\s*=.*/addr-book-strict = false/' \ -e 's/^timeout-commit\s*=.*/timeout-commit = "500ms"/' \ @@ -15,8 +15,14 @@ if [ ! -d "$TMHOME/config" ]; then -e 's/^prometheus\s*=.*/prometheus = true/' \ "$TMHOME/config/config.toml" + if [ -n "$ABCI" ]; then + sed -i \ + -e "s/^transport\s*=.*/transport = \"$ABCI\"/" \ + "$TMHOME/config/config.toml" + fi + jq ".chain_id = \"$CHAIN_ID\" | .consensus_params.block.time_iota_ms = \"500\"" \ - "$TMHOME/config/genesis.json" > "$TMHOME/config/genesis.json.new" + "$TMHOME/config/genesis.json" >"$TMHOME/config/genesis.json.new" mv "$TMHOME/config/genesis.json.new" "$TMHOME/config/genesis.json" fi diff --git a/Makefile b/Makefile index 67c61379ae..8cf9474354 100644 --- a/Makefile +++ b/Makefile @@ -95,9 +95,10 @@ endif # allow users to pass additional flags via the conventional LDFLAGS variable LD_FLAGS += $(LDFLAGS) -all: build install +all: build build: build-bls build-binary .PHONY: build + install: install-bls .PHONY: all @@ -113,7 +114,7 @@ build-bls: .PHONY: build-bls install-bls: build-bls - @sudo third_party/bls-signatures/install.sh + @third_party/bls-signatures/install.sh .PHONY: install-bls ############################################################################### diff --git a/README.md b/README.md index b3b166f0f4..f485db6d79 100644 --- a/README.md +++ b/README.md @@ -57,7 +57,7 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe | Requirement | Notes | |-------------|------------------| -| Go version | Go1.17 or higher | +| Go version | Go1.22 or higher | ### Install diff --git a/abci/client/client.go b/abci/client/client.go index c5afa771e2..d0be4641c1 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -7,6 +7,7 @@ import ( sync "github.com/sasha-s/go-deadlock" "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/libs/log" "github.com/dashpay/tenderdash/libs/service" ) @@ -36,12 +37,17 @@ type Client interface { // NewClient returns a new ABCI client of the specified transport type. // It returns an error if the transport is not "socket" or "grpc" -func NewClient(logger log.Logger, addr, transport string, mustConnect bool) (Client, error) { +func NewClient(logger log.Logger, cfg config.AbciConfig, mustConnect bool) (Client, error) { + transport := cfg.Transport + addr := cfg.Address + switch transport { case "socket": return NewSocketClient(logger, addr, mustConnect), nil case "grpc": - return NewGRPCClient(logger, addr, mustConnect), nil + return NewGRPCClient(logger, addr, cfg.GrpcConcurrency, mustConnect), nil + case "routed": + return NewRoutedClientWithAddr(logger, addr, mustConnect) default: return nil, fmt.Errorf("unknown abci transport %s", transport) } @@ -51,22 +57,25 @@ type requestAndResponse struct { *types.Request *types.Response - mtx sync.Mutex + mtx sync.Mutex + // context for the request; we check if it's not expired before sending + ctx context.Context signal chan struct{} } -func makeReqRes(req *types.Request) *requestAndResponse { +func makeReqRes(ctx context.Context, req *types.Request) *requestAndResponse { return &requestAndResponse{ Request: req, Response: nil, + ctx: ctx, signal: make(chan struct{}), } } // markDone marks the ReqRes object as done. -func (r *requestAndResponse) markDone() { - r.mtx.Lock() - defer r.mtx.Unlock() +func (reqResp *requestAndResponse) markDone() { + reqResp.mtx.Lock() + defer reqResp.mtx.Unlock() - close(r.signal) + close(reqResp.signal) } diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 91e848f7bb..0ffed9b399 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -4,6 +4,7 @@ import ( "context" "errors" "net" + "strings" "time" sync "github.com/sasha-s/go-deadlock" @@ -30,6 +31,14 @@ type grpcClient struct { mtx sync.Mutex addr string err error + + // map between method name (in grpc format, for example `/tendermint.abci.ABCIApplication/Echo`) + // and a channel that will be used to limit the number of concurrent requests for that method. + // + // If the value is nil, no limit is enforced. + // + // Not thread-safe, only modify this before starting the client. + concurrency map[string]chan struct{} } var _ Client = (*grpcClient)(nil) @@ -37,29 +46,115 @@ var _ Client = (*grpcClient)(nil) // NewGRPCClient creates a gRPC client, which will connect to addr upon the // start. Note Client#Start returns an error if connection is unsuccessful and // mustConnect is true. -func NewGRPCClient(logger log.Logger, addr string, mustConnect bool) Client { +func NewGRPCClient(logger log.Logger, addr string, concurrency map[string]uint16, mustConnect bool) Client { cli := &grpcClient{ logger: logger, addr: addr, mustConnect: mustConnect, + concurrency: make(map[string]chan struct{}, 20), } cli.BaseService = *service.NewBaseService(logger, "grpcClient", cli) + cli.SetMaxConcurrentStreams(concurrency) + return cli } -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { +func methodID(method string) string { + if pos := strings.LastIndex(method, "/"); pos > 0 { + method = method[pos+1:] + } + + return strings.ToLower(method) +} + +// SetMaxConcurrentStreams sets the maximum number of concurrent streams to be +// allowed on this client. +// +// Not thread-safe, only use this before starting the client. +// +// If limit is 0, no limit is enforced. +func (cli *grpcClient) SetMaxConcurrentStreamsForMethod(method string, n uint16) { + if cli.IsRunning() { + panic("cannot set max concurrent streams after starting the client") + } + var ch chan struct{} + if n != 0 { + ch = make(chan struct{}, n) + } + + cli.mtx.Lock() + cli.concurrency[methodID(method)] = ch + cli.mtx.Unlock() +} + +// SetMaxConcurrentStreams sets the maximum number of concurrent streams to be +// allowed on this client. +// # Arguments +// +// * `methods` - A map between method name (in grpc format, for example `/tendermint.abci.ABCIApplication/Echo`) +// and the maximum number of concurrent streams to be allowed for that method. +// +// Special method name "*" can be used to set the default limit for methods not explicitly listed. +// +// If the value is 0, no limit is enforced. +// +// Not thread-safe, only use this before starting the client. +func (cli *grpcClient) SetMaxConcurrentStreams(methods map[string]uint16) { + for method, n := range methods { + cli.SetMaxConcurrentStreamsForMethod(method, n) + } +} + +func dialerFunc(_ctx context.Context, addr string) (net.Conn, error) { return tmnet.Connect(addr) } +// rateLimit blocks until the client is allowed to send a request. +// It returns a function that should be called after the request is done. +// +// method should be the method name in grpc format, for example `/tendermint.abci.ABCIApplication/Echo`. +// Special method name "*" can be used to define the default limit. +// If no limit is set for the method, the default limit is used. +func (cli *grpcClient) rateLimit(method string) context.CancelFunc { + ch := cli.concurrency[methodID(method)] + // handle default + if ch == nil { + ch = cli.concurrency["*"] + } + if ch == nil { + return func() {} + } + + cli.logger.Trace("grpcClient rateLimit", "addr", cli.addr) + ch <- struct{}{} + return func() { <-ch } +} + +func (cli *grpcClient) unaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + done := cli.rateLimit(method) + defer done() + + return invoker(ctx, method, req, reply, cc, opts...) +} + +func (cli *grpcClient) streamClientInterceptor(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + done := cli.rateLimit(method) + defer done() + + return streamer(ctx, desc, cc, method, opts...) +} + func (cli *grpcClient) OnStart(ctx context.Context) error { timer := time.NewTimer(0) defer timer.Stop() RETRY_LOOP: for { - conn, err := grpc.Dial(cli.addr, + conn, err := grpc.NewClient(cli.addr, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc), + grpc.WithChainUnaryInterceptor(cli.unaryClientInterceptor), + grpc.WithChainStreamInterceptor(cli.streamClientInterceptor), ) if err != nil { if cli.mustConnect { @@ -122,7 +217,7 @@ func (cli *grpcClient) Error() error { //---------------------------------------- -func (cli *grpcClient) Flush(ctx context.Context) error { return nil } +func (cli *grpcClient) Flush(_ctx context.Context) error { return nil } func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { return cli.client.Echo(ctx, types.ToRequestEcho(msg).GetEcho(), grpc.WaitForReady(true)) diff --git a/abci/client/grpc_client_test.go b/abci/client/grpc_client_test.go new file mode 100644 index 0000000000..551b8aeb55 --- /dev/null +++ b/abci/client/grpc_client_test.go @@ -0,0 +1,164 @@ +package abciclient + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/assert" + + abciserver "github.com/dashpay/tenderdash/abci/server" + "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/libs/log" + "github.com/dashpay/tenderdash/libs/service" +) + +// TestGRPCClientServerParallel tests that gRPC client and server can handle multiple parallel requests +func TestGRPCClientServerParallel(t *testing.T) { + const ( + timeout = 1 * time.Second + tick = 10 * time.Millisecond + ) + + type testCase struct { + threads int + infoConcurrency uint16 + defautConcurrency uint16 + } + + testCases := []testCase{ + {threads: 1, infoConcurrency: 1}, + {threads: 2, infoConcurrency: 1}, + {threads: 2, infoConcurrency: 2}, + {threads: 5, infoConcurrency: 0}, + {threads: 5, infoConcurrency: 0, defautConcurrency: 2}, + {threads: 5, infoConcurrency: 1}, + {threads: 5, infoConcurrency: 2}, + {threads: 5, infoConcurrency: 5}, + } + + logger := log.NewNopLogger() + + for _, tc := range testCases { + t.Run(fmt.Sprintf("t_%d-i_%d,d_%d", tc.threads, tc.infoConcurrency, tc.defautConcurrency), func(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + + app := &mockApplication{t: t, concurrencyLimit: int32(tc.infoConcurrency)} + + socket := t.TempDir() + "/grpc_test" + limits := map[string]uint16{ + "/tendermint.abci.ABCIApplication/Info": tc.infoConcurrency, + "*": tc.defautConcurrency, + } + + client, _, err := makeGRPCClientServer(ctx, t, logger, app, socket, limits) + if err != nil { + t.Fatal(err) + } + + // we'll use that mutex to ensure threads don't finish before we check status + app.mtx.Lock() + + // done will be used to wait for all threads to finish + var done sync.WaitGroup + + for i := 0; i < tc.threads; i++ { + done.Add(1) + thread := uint64(i) + go func() { + // we use BlockVersion for logging purposes, so we put thread id there + _, _ = client.Info(ctx, &types.RequestInfo{BlockVersion: thread}) + done.Done() + }() + } + + expectThreads := int32(tc.infoConcurrency) + if expectThreads == 0 { + expectThreads = int32(tc.defautConcurrency) + } + if expectThreads == 0 { + expectThreads = int32(tc.threads) + } + + // wait for all threads to start execution + assert.Eventually(t, func() bool { + return app.running.Load() == expectThreads + }, timeout, tick, "not all threads started in time") + + // ensure no other threads will start + time.Sleep(2 * tick) + + // unlock the mutex so that threads can finish their execution + app.mtx.Unlock() + + // wait for all threads to really finish + done.Wait() + }) + } +} + +func makeGRPCClientServer( + ctx context.Context, + t *testing.T, + logger log.Logger, + app types.Application, + name string, + concurrency map[string]uint16, +) (Client, service.Service, error) { + ctx, cancel := context.WithCancel(ctx) + t.Cleanup(cancel) + t.Cleanup(leaktest.Check(t)) + + // Start the listener + socket := fmt.Sprintf("unix://%s.sock", name) + + server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app) + + if err := server.Start(ctx); err != nil { + cancel() + return nil, nil, err + } + + client := NewGRPCClient(logger.With("module", "abci-client"), socket, concurrency, true) + + if err := client.Start(ctx); err != nil { + cancel() + return nil, nil, err + } + return client, server, nil +} + +// mockApplication that will decrease mockApplication.started when called Info, and then wait until +// mtx is unlocked before it finishes +type mockApplication struct { + types.BaseApplication + mtx sync.Mutex + + running atomic.Int32 + // concurrencyLimit of concurrent requests + concurrencyLimit int32 + + t *testing.T +} + +func (m *mockApplication) Info(_ctx context.Context, req *types.RequestInfo) (res *types.ResponseInfo, err error) { + m.t.Logf("Info %d called", req.BlockVersion) + running := m.running.Add(1) + defer m.running.Add(-1) + + if m.concurrencyLimit > 0 { + assert.LessOrEqual(m.t, running, m.concurrencyLimit, "too many requests running in parallel") + } + + // we will wait here until all expected threads are running + m.mtx.Lock() + defer m.mtx.Unlock() + m.t.Logf("Info %d finished", req.BlockVersion) + + return &types.ResponseInfo{}, nil +} diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 984e97ef7e..433a0cb60d 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -18,6 +18,10 @@ type Client struct { func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ApplySnapshotChunk") + } + var r0 *types.ResponseApplySnapshotChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)); ok { @@ -44,6 +48,10 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestAppl func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } + var r0 *types.ResponseCheckTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error)); ok { @@ -70,6 +78,10 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*type func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Echo") + } + var r0 *types.ResponseEcho var r1 error if rf, ok := ret.Get(0).(func(context.Context, string) (*types.ResponseEcho, error)); ok { @@ -96,6 +108,10 @@ func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, er func (_m *Client) Error() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Error") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -110,6 +126,10 @@ func (_m *Client) Error() error { func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ExtendVote") + } + var r0 *types.ResponseExtendVote var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) (*types.ResponseExtendVote, error)); ok { @@ -136,6 +156,10 @@ func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for FinalizeBlock") + } + var r0 *types.ResponseFinalizeBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)); ok { @@ -162,6 +186,10 @@ func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeB func (_m *Client) Flush(_a0 context.Context) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Flush") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(_a0) @@ -176,6 +204,10 @@ func (_m *Client) Flush(_a0 context.Context) error { func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Info") + } + var r0 *types.ResponseInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) (*types.ResponseInfo, error)); ok { @@ -202,6 +234,10 @@ func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.Resp func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for InitChain") + } + var r0 *types.ResponseInitChain var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) (*types.ResponseInitChain, error)); ok { @@ -228,6 +264,10 @@ func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (* func (_m *Client) IsRunning() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsRunning") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -242,6 +282,10 @@ func (_m *Client) IsRunning() bool { func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ListSnapshots") + } + var r0 *types.ResponseListSnapshots var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) (*types.ResponseListSnapshots, error)); ok { @@ -268,6 +312,10 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnaps func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for LoadSnapshotChunk") + } + var r0 *types.ResponseLoadSnapshotChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)); ok { @@ -294,6 +342,10 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadS func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for OfferSnapshot") + } + var r0 *types.ResponseOfferSnapshot var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)); ok { @@ -320,6 +372,10 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnap func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for PrepareProposal") + } + var r0 *types.ResponsePrepareProposal var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)); ok { @@ -346,6 +402,10 @@ func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepare func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ProcessProposal") + } + var r0 *types.ResponseProcessProposal var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) (*types.ResponseProcessProposal, error)); ok { @@ -372,6 +432,10 @@ func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcess func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Query") + } + var r0 *types.ResponseQuery var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) (*types.ResponseQuery, error)); ok { @@ -398,6 +462,10 @@ func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.Re func (_m *Client) Start(_a0 context.Context) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(_a0) @@ -412,6 +480,10 @@ func (_m *Client) Start(_a0 context.Context) error { func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for VerifyVoteExtension") + } + var r0 *types.ResponseVerifyVoteExtension var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)); ok { diff --git a/abci/client/routed_client.go b/abci/client/routed_client.go new file mode 100644 index 0000000000..fa1afc1bc3 --- /dev/null +++ b/abci/client/routed_client.go @@ -0,0 +1,377 @@ +package abciclient + +import ( + "context" + "fmt" + "reflect" + "runtime" + "strings" + "time" + + "github.com/hashicorp/go-multierror" + + "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/config" + "github.com/dashpay/tenderdash/libs/log" + "github.com/dashpay/tenderdash/libs/service" +) + +type routedClient struct { + service.Service + logger log.Logger + routing Routing + defaultClient ClientInfo +} + +var _ Client = (*routedClient)(nil) + +type RequestType string +type Routing map[RequestType][]ClientInfo + +type ClientInfo struct { + Client + // ClientID is an unique, human-readable, client identifier + ClientID string +} + +// NewRoutedClientWithAddr returns a new ABCI client that routes requests to multiple +// underlying clients based on the request type. +// +// It takes a comma-separated list of routing rules, consisting of colon-separated: request type, transport, address. +// Special request type "*" is used for default client. +// +// Example: +// +// ``` +// +// "Info:socket:unix:///tmp/socket.1,Info:socket:unix:///tmp/socket.2,CheckTx:socket:unix:///tmp/socket.1,*:socket:unix:///tmp/socket.3" +// +// ``` +// +// # Arguments +// - `logger` - The logger to use for the client. +// - `addr` - comma-separated list of routing rules, consisting of request type, transport name and client address separated with colon. +// Special request type "*" is used for default client. +func NewRoutedClientWithAddr(logger log.Logger, addr string, mustConnect bool) (Client, error) { + // Split the routing rules + routing := make(Routing) + clients := make(map[string]Client) + var defaultClient Client + + rules := strings.Split(addr, ",") + + for _, rule := range rules { + parts := strings.SplitN(rule, ":", 3) + if len(parts) != 3 { + return nil, fmt.Errorf("invalid routing rule: %s", rule) + } + requestType := strings.TrimSpace(parts[0]) + transport := strings.TrimSpace(parts[1]) + address := strings.TrimSpace(parts[2]) + + // Create a new client if it doesn't exist + clientName := fmt.Sprintf("%s:%s", transport, address) + if _, ok := clients[clientName]; !ok { + cfg := config.AbciConfig{Address: address, Transport: transport} + c, err := NewClient(logger, cfg, mustConnect) + if err != nil { + return nil, err + } + clients[clientName] = c + } + + // Add the client to the routing table + if requestType == "*" { + if defaultClient != nil { + return nil, fmt.Errorf("multiple default clients") + } + defaultClient = clients[clientName] + continue + } + + client := clients[clientName] + routing[RequestType(requestType)] = append(routing[RequestType(requestType)], ClientInfo{client, clientName}) + } + + if defaultClient == nil { + return nil, fmt.Errorf("no default client defined for routed client address %s", addr) + } + + return NewRoutedClient(logger, defaultClient, routing) +} + +// NewRoutedClient returns a new ABCI client that routes requests to the +// appropriate underlying client based on the request type. +// +// # Arguments +// +// - `logger` - The logger to use for the client. +// - `defaultClient` - The default client to use when no specific client is +// configured for a request type. +// - `routing` - The clients to route requests to. +// +// See docs of routedClient.delegate() for more details about implemented logic. +func NewRoutedClient(logger log.Logger, defaultClient Client, routing Routing) (Client, error) { + defaultClientID := "" + if s, ok := defaultClient.(fmt.Stringer); ok { + defaultClientID = fmt.Sprintf("DEFAULT:%s", s.String()) + } else { + defaultClientID = "DEFAULT" + } + + cli := &routedClient{ + logger: logger, + routing: routing, + defaultClient: ClientInfo{defaultClient, defaultClientID}, + } + + cli.Service = service.NewBaseService(logger, "RoutedClient", cli) + return cli, nil +} + +func (cli *routedClient) OnStart(ctx context.Context) error { + var errs error + for _, clients := range cli.routing { + for _, client := range clients { + if !client.IsRunning() { + if err := client.Start(ctx); err != nil { + errs = multierror.Append(errs, err) + } + } + } + } + + if !cli.defaultClient.IsRunning() { + if err := cli.defaultClient.Start(ctx); err != nil { + errs = multierror.Append(errs, err) + } + } + + return errs +} + +func (cli *routedClient) OnStop() { + for _, clients := range cli.routing { + for _, client := range clients { + if client.IsRunning() { + switch c := client.Client.(type) { + case *socketClient: + c.Stop() + case *localClient: + c.Stop() + case *grpcClient: + c.Stop() + } + } + } + } +} + +// delegate calls the given function on the appropriate client with the given +// arguments. +// +// It executes the given function on all clients configured in the routing table. +// If no client is configured for the given function, it calls the function on the +// default client. +// +// If more than one client is configured for the given function, it calls the +// function on each client in turn, and returns first result where any of returned +// values is non-zero. Results of subsequent calls are silently dropped. +// +// If all clients return only zero values, it returns response from last client, which is effectively +// also a zero value. +// +// If the function returns only 1 value, it assumes it is of type `error`. +// Otherwise it assumes response is `result, error`. +// +// When a function call returns an error, error is returned and remaining functions are NOT called. +func (cli *routedClient) delegate(args ...interface{}) (firstResult any, err error) { + // Get the caller function name; it will be our request type + pc, _, _, _ := runtime.Caller(1) + funcObj := runtime.FuncForPC(pc) + funcName := funcObj.Name() + // remove package name + funcName = funcName[strings.LastIndex(funcName, ".")+1:] + + clients, ok := cli.routing[RequestType(funcName)] + if !ok { + clients = []ClientInfo{cli.defaultClient} + cli.logger.Trace("no client found for method, falling back to default client", "method", funcName) + } + // client that provided first non-zero result + winner := "" + + startAll := time.Now() + + var ret any + for _, client := range clients { + start := time.Now() + + zerosReturned, results := cli.call(client, funcName, args...) + if ret, err = parseReturned(funcName, results); err != nil { + cli.logger.Error("abci client returned error", "client_id", client.ClientID, "err", err) + return ret, err + } + + // return first non-zero result + if !zerosReturned && firstResult == nil { + firstResult = ret + winner = client.ClientID + } + + cli.logger.Trace("routed ABCI request to a client", + "method", funcName, + "client_id", client.ClientID, + "nil", zerosReturned, + "took", time.Since(start).String()) + } + + cli.logger.Trace("routed ABCI request execution successful", + "method", funcName, + "client_id", winner, + "took", time.Since(startAll).String(), + "nil", firstResult == nil) + + if firstResult == nil { + firstResult = ret + } + + return firstResult, err +} + +// call calls the given function on the given client with the given arguments. +// It returns whether all returned values are zero, and these values itself. +func (cli *routedClient) call(client Client, funcName string, args ...interface{}) (onlyZeros bool, result []interface{}) { + method := reflect.ValueOf(client).MethodByName(funcName) + if !method.IsValid() { + panic(fmt.Sprintf("no method %s on client %T", funcName, client)) + } + + arguments := make([]reflect.Value, 0, len(args)) + for _, arg := range args { + arguments = append(arguments, reflect.ValueOf(arg)) + } + + values := method.Call(arguments) + + onlyZeros = true + + result = make([]interface{}, 0, len(values)) + for _, v := range values { + if !v.IsZero() { + onlyZeros = false + } + result = append(result, v.Interface()) + } + + return onlyZeros, result +} + +func parseReturned(funcName string, ret []interface{}) (any, error) { + switch len(ret) { + case 0: + // should never happen + return nil, fmt.Errorf("no result from any client for ABCI method %s", funcName) + case 1: + err, _ := ret[0].(error) + return nil, err + + case 2: + err, _ := ret[1].(error) + return ret[0], err + default: + panic(fmt.Sprintf("unexpected number of return values: %d", len(ret))) + } +} + +// Error returns an error if the client was stopped abruptly. +func (cli *routedClient) Error() error { + var errs error + for _, clients := range cli.routing { + for _, client := range clients { + err := client.Error() + if err != nil { + errs = multierror.Append(errs, err) + } + } + } + + return errs +} + +/// Implement the Application interface + +func (cli *routedClient) Flush(ctx context.Context) error { + _, err := cli.delegate(ctx) + return err +} + +func (cli *routedClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { + result, err := cli.delegate(ctx, msg) + return result.(*types.ResponseEcho), err +} + +func (cli *routedClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseInfo), err +} + +func (cli *routedClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseCheckTx), err +} + +func (cli *routedClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseQuery), err +} + +func (cli *routedClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseInitChain), err +} + +func (cli *routedClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseListSnapshots), err +} + +func (cli *routedClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseOfferSnapshot), err +} + +func (cli *routedClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseLoadSnapshotChunk), err +} + +func (cli *routedClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseApplySnapshotChunk), err +} + +func (cli *routedClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponsePrepareProposal), err +} + +func (cli *routedClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseProcessProposal), err +} + +func (cli *routedClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseExtendVote), err +} + +func (cli *routedClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseVerifyVoteExtension), err +} + +func (cli *routedClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { + result, err := cli.delegate(ctx, req) + return result.(*types.ResponseFinalizeBlock), err +} diff --git a/abci/client/routed_client_test.go b/abci/client/routed_client_test.go new file mode 100644 index 0000000000..99883da542 --- /dev/null +++ b/abci/client/routed_client_test.go @@ -0,0 +1,152 @@ +package abciclient_test + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + abciclient "github.com/dashpay/tenderdash/abci/client" + "github.com/dashpay/tenderdash/abci/server" + "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/abci/types/mocks" + "github.com/dashpay/tenderdash/libs/log" +) + +// TestRouting tests the RoutedClient. +// +// Given 3 clients: defaultApp, consensusApp and queryApp: +// * when a request of type Info is made, it should be routed to defaultApp +// * when a request of type FinalizeBlock is made, it should be first routed to queryApp, then to consensusApp +// * when a request of type CheckTx is made, it should be routed to queryApp +// * when a request of type PrepareProposal is made, it should be routed to to consensusApp +func TestRouting(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // infoMtx blocks Info until we finish the test + var infoMtx sync.Mutex + infoMtx.Lock() + infoExecuted := false + + logger := log.NewTestingLogger(t) + + defaultApp, defaultSocket := startApp(ctx, t, logger, "default") + defer defaultApp.AssertExpectations(t) + + defaultApp.On("Info", mock.Anything, mock.Anything).Return(&types.ResponseInfo{ + Data: "info", + }, nil).Run(func(_args mock.Arguments) { + t.Log("Info: before lock") + infoMtx.Lock() + defer infoMtx.Unlock() + t.Log("Info: after lock") + infoExecuted = true + }).Once() + + queryApp, querySocket := startApp(ctx, t, logger, "query") + defer queryApp.AssertExpectations(t) + queryApp.On("CheckTx", mock.Anything, mock.Anything).Return(&types.ResponseCheckTx{ + Priority: 1, + }, nil).Once() + queryApp.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&types.ResponseFinalizeBlock{}, nil).Once() + + consensusApp, consensusSocket := startApp(ctx, t, logger, "consensus") + defer consensusApp.AssertExpectations(t) + consensusApp.On("PrepareProposal", mock.Anything, mock.Anything).Return(&types.ResponsePrepareProposal{ + AppHash: []byte("apphash"), + AppVersion: 1, + }, nil).Once() + consensusApp.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&types.ResponseFinalizeBlock{ + RetainHeight: 1, + }, nil).Once() + + addr := fmt.Sprintf("CheckTx:socket:%s", querySocket) + + fmt.Sprintf(",FinalizeBlock:socket:%s,FinalizeBlock:socket:%s", querySocket, consensusSocket) + + fmt.Sprintf(",PrepareProposal:socket:%s", consensusSocket) + + fmt.Sprintf(",*:socket:%s", defaultSocket) + + logger.Info("configuring routed abci client with address", "addr", addr) + routedClient, err := abciclient.NewRoutedClientWithAddr(logger, addr, true) + assert.NoError(t, err) + err = routedClient.Start(ctx) + assert.NoError(t, err) + + // Test routing + wg := sync.WaitGroup{} + + // Info is called from separate thread, as we want it to block + // to see if we can execute other calls (on other clients) without blocking + wg.Add(1) + go func() { + // info is locked, so it should finish last + _, err := routedClient.Info(ctx, &types.RequestInfo{}) + require.NoError(t, err) + wg.Done() + }() + + // CheckTx + _, err = routedClient.CheckTx(ctx, &types.RequestCheckTx{}) + assert.NoError(t, err) + + // FinalizeBlock + _, err = routedClient.FinalizeBlock(ctx, &types.RequestFinalizeBlock{}) + assert.NoError(t, err) + + // PrepareProposal + _, err = routedClient.PrepareProposal(ctx, &types.RequestPrepareProposal{}) + assert.NoError(t, err) + + // unlock info + assert.False(t, infoExecuted) + infoMtx.Unlock() + wg.Wait() + assert.True(t, infoExecuted) +} + +func startApp(ctx context.Context, t *testing.T, logger log.Logger, id string) (*mocks.Application, string) { + app := mocks.NewApplication(t) + defer app.AssertExpectations(t) + + addr := fmt.Sprintf("unix://%s/%s", t.TempDir(), "/socket."+id) + + server, err := server.NewServer(logger, addr, "socket", app) + require.NoError(t, err) + err = server.Start(ctx) + require.NoError(t, err) + + return app, addr +} + +// / TestRoutedClientGrpc tests the RoutedClient correctly forwards requests to a gRPC server. +func TestRoutedClientGrpc(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + logger := log.NewTestingLogger(t) + + app := mocks.NewApplication(t) + defer app.AssertExpectations(t) + app.On("Echo", mock.Anything, mock.Anything).Return( + func(_ctx context.Context, msg *types.RequestEcho) (*types.ResponseEcho, error) { + return &types.ResponseEcho{Message: msg.Message}, nil + }).Maybe() + app.On("Info", mock.Anything, mock.Anything).Return(&types.ResponseInfo{}, nil).Once() + + grpcServer := server.NewGRPCServer(logger, "tcp://127.0.0.1:1234", app) + require.NoError(t, grpcServer.Start(ctx)) + + addr := "*:grpc:127.0.0.1:1234" + logger.Info("configuring routed abci client with address", "addr", addr) + client, err := abciclient.NewRoutedClientWithAddr(logger, addr, true) + require.NoError(t, err) + require.NoError(t, client.Start(ctx)) + + _, err = client.Info(ctx, &types.RequestInfo{}) + assert.NoError(t, err) + +} diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 26adc2318f..0726bb85f9 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -28,6 +28,7 @@ type socketClient struct { mustConnect bool conn net.Conn + // Requests queue reqQueue chan *requestAndResponse mtx sync.Mutex @@ -99,6 +100,14 @@ func (cli *socketClient) OnStop() { cli.drainQueue() } +func (cli *socketClient) String() string { + if err := cli.Error(); err != nil { + return fmt.Sprintf("%T(%s):err=%s", cli, cli.addr, err.Error()) + } + + return fmt.Sprintf("%T(%s)", cli, cli.addr) +} + // Error returns an error if the client was stopped abruptly. func (cli *socketClient) Error() error { cli.mtx.Lock() @@ -108,37 +117,62 @@ func (cli *socketClient) Error() error { //---------------------------------------- +// Add the request to the pending messages queue. +// +// If the context `ctx` is canceled, return ctx.Err(). +func (cli *socketClient) enqueue(ctx context.Context, reqres *requestAndResponse) error { + select { + case <-ctx.Done(): + return ctx.Err() + case cli.reqQueue <- reqres: + return nil + } +} + +// Block until first request arrives, then return it. +// +// If the context `ctx` is canceled, return nil. +func (cli *socketClient) dequeue(ctx context.Context) *requestAndResponse { + select { + case item := <-cli.reqQueue: + return item + case <-ctx.Done(): + return nil + } +} + func (cli *socketClient) sendRequestsRoutine(ctx context.Context, conn io.Writer) { bw := bufio.NewWriter(conn) - for { - select { - case <-ctx.Done(): + // dequeue will block until a message arrives + for reqres := cli.dequeue(ctx); reqres != nil && ctx.Err() == nil; reqres = cli.dequeue(ctx) { + if err := reqres.ctx.Err(); err != nil { + // request expired, skip it + cli.logger.Debug("abci.socketClient request expired, skipping", "req", reqres.Request.Value, "error", err) + continue + } + + // N.B. We must track request before sending it out, otherwise the + // server may reply before we do it, and the receiver will fail for an + // unsolicited reply. + cli.trackRequest(reqres) + + if err := types.WriteMessage(reqres.Request, bw); err != nil { + cli.stopForError(fmt.Errorf("write to buffer: %w", err)) return - case reqres := <-cli.reqQueue: - // N.B. We must enqueue before sending out the request, otherwise the - // server may reply before we do it, and the receiver will fail for an - // unsolicited reply. - cli.trackRequest(reqres) - - if err := types.WriteMessage(reqres.Request, bw); err != nil { - cli.stopForError(fmt.Errorf("write to buffer: %w", err)) - return - } + } - if err := bw.Flush(); err != nil { - cli.stopForError(fmt.Errorf("flush buffer: %w", err)) - return - } + if err := bw.Flush(); err != nil { + cli.stopForError(fmt.Errorf("flush buffer: %w", err)) + return } } + + cli.logger.Debug("context canceled, stopping sendRequestsRoutine") } func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader) { r := bufio.NewReader(conn) - for { - if ctx.Err() != nil { - return - } + for ctx.Err() == nil { res := &types.Response{} if err := types.ReadMessage(r, res); err != nil { @@ -158,6 +192,8 @@ func (cli *socketClient) recvResponseRoutine(ctx context.Context, conn io.Reader } } } + + cli.logger.Debug("context canceled, stopping recvResponseRoutine") } func (cli *socketClient) trackRequest(reqres *requestAndResponse) { @@ -201,15 +237,15 @@ func (cli *socketClient) doRequest(ctx context.Context, req *types.Request) (*ty return nil, errors.New("client has stopped") } - reqres := makeReqRes(req) - - select { - case cli.reqQueue <- reqres: - case <-ctx.Done(): - return nil, fmt.Errorf("can't queue req: %w", ctx.Err()) + reqres := makeReqRes(ctx, req) + if err := cli.enqueue(ctx, reqres); err != nil { + return nil, err } + // wait for response for our request select { + case <-reqres.ctx.Done(): + return nil, reqres.ctx.Err() case <-reqres.signal: if err := cli.Error(); err != nil { return nil, err diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go new file mode 100644 index 0000000000..b17940a7a5 --- /dev/null +++ b/abci/client/socket_client_test.go @@ -0,0 +1,120 @@ +package abciclient + +import ( + "context" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/dashpay/tenderdash/abci/server" + "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/abci/types/mocks" + "github.com/dashpay/tenderdash/libs/log" +) + +// TestSocketClientTimeout tests that the socket client times out correctly. +func TestSocketClientTimeout(t *testing.T) { + const ( + Success = 0 + FailDuringEnqueue = 1 + FailDuringProcessing = 2 + + baseTime = 10 * time.Millisecond + ) + type testCase struct { + name string + timeout time.Duration + enqueueSleep time.Duration + processingSleep time.Duration + expect int + } + testCases := []testCase{ + {name: "immediate", timeout: baseTime, enqueueSleep: 0, processingSleep: 0, expect: Success}, + {name: "small enqueue delay", timeout: 4 * baseTime, enqueueSleep: 1 * baseTime, processingSleep: 0, expect: Success}, + {name: "small processing delay", timeout: 4 * baseTime, enqueueSleep: 0, processingSleep: 1 * baseTime, expect: Success}, + {name: "within timeout", timeout: 4 * baseTime, enqueueSleep: 1 * baseTime, processingSleep: 1 * baseTime, expect: Success}, + {name: "timeout during enqueue", timeout: 3 * baseTime, enqueueSleep: 4 * baseTime, processingSleep: 1 * baseTime, expect: FailDuringEnqueue}, + {name: "timeout during processing", timeout: 4 * baseTime, enqueueSleep: 1 * baseTime, processingSleep: 4 * baseTime, expect: FailDuringProcessing}, + } + + logger := log.NewTestingLogger(t) + + for i, tc := range testCases { + i := i + tc := tc + t.Run(tc.name, func(t *testing.T) { + + // wait until all threads end, otherwise we'll get data race in t.Log() + wg := sync.WaitGroup{} + defer wg.Wait() + + mainCtx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + + socket := "unix://" + t.TempDir() + "/socket." + strconv.Itoa(i) + + checkTxExecuted := atomic.Bool{} + + app := mocks.NewApplication(t) + app.On("Echo", mock.Anything, mock.Anything).Return(&types.ResponseEcho{}, nil).Maybe() + app.On("Info", mock.Anything, mock.Anything).Run(func(_ mock.Arguments) { + wg.Add(1) + logger.Debug("Info before sleep") + time.Sleep(tc.enqueueSleep) + logger.Debug("Info after sleep") + wg.Done() + }).Return(&types.ResponseInfo{}, nil).Maybe() + app.On("CheckTx", mock.Anything, mock.Anything).Run(func(_ mock.Arguments) { + wg.Add(1) + logger.Debug("CheckTx before sleep") + checkTxExecuted.Store(true) + time.Sleep(tc.processingSleep) + logger.Debug("CheckTx after sleep") + wg.Done() + }).Return(&types.ResponseCheckTx{}, nil).Maybe() + + service, err := server.NewServer(logger, socket, "socket", app) + require.NoError(t, err) + svr := service.(*server.SocketServer) + err = svr.Start(mainCtx) + require.NoError(t, err) + defer svr.Stop() + + cli := NewSocketClient(logger, socket, true).(*socketClient) + + err = cli.Start(mainCtx) + require.NoError(t, err) + defer cli.Stop() + + reqCtx, reqCancel := context.WithTimeout(context.Background(), tc.timeout) + defer reqCancel() + // Info is here just to block for some time, so we don't want to enforce timeout on it + + wg.Add(1) + go func() { + _, _ = cli.Info(mainCtx, &types.RequestInfo{}) + wg.Done() + }() + + time.Sleep(1 * time.Millisecond) // ensure the goroutine has started + + _, err = cli.CheckTx(reqCtx, &types.RequestCheckTx{}) + switch tc.expect { + case Success: + require.NoError(t, err) + require.True(t, checkTxExecuted.Load()) + case FailDuringEnqueue: + require.Error(t, err) + require.False(t, checkTxExecuted.Load()) + case FailDuringProcessing: + require.Error(t, err) + require.True(t, checkTxExecuted.Load()) + } + }) + } +} diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index 6680a8aefe..efc2c85939 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -21,9 +21,11 @@ import ( "github.com/dashpay/tenderdash/abci/server" servertest "github.com/dashpay/tenderdash/abci/tests/server" "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/libs/log" "github.com/dashpay/tenderdash/proto/tendermint/crypto" tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" + pbversion "github.com/dashpay/tenderdash/proto/tendermint/version" "github.com/dashpay/tenderdash/version" ) @@ -54,7 +56,7 @@ func RootCmmand(logger log.Logger) *cobra.Command { Use: "abci-cli", Short: "the ABCI CLI tool wraps an ABCI client", Long: "the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers", - PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { + PersistentPreRunE: func(cmd *cobra.Command, _args []string) (err error) { switch cmd.Use { case "kvstore", "version": @@ -63,7 +65,8 @@ func RootCmmand(logger log.Logger) *cobra.Command { if client == nil { var err error - client, err = abciclient.NewClient(logger.With("module", "abci-client"), flagAddress, flagAbci, false) + cfg := config.AbciConfig{Address: flagAddress, Transport: flagAbci} + client, err = abciclient.NewClient(logger.With("module", "abci-client"), cfg, false) if err != nil { return err } @@ -223,7 +226,7 @@ var versionCmd = &cobra.Command{ Short: "print ABCI console version", Long: "print ABCI console version", Args: cobra.ExactArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_cmd *cobra.Command, _args []string) error { fmt.Println(version.ABCIVersion) return nil }, @@ -316,7 +319,7 @@ func compose(fs []func() error) error { return err } -func cmdTest(cmd *cobra.Command, args []string) error { +func cmdTest(cmd *cobra.Command, _args []string) error { ctx := cmd.Context() return compose( []func() error{ @@ -379,7 +382,7 @@ func cmdTest(cmd *cobra.Command, args []string) error { }) } -func cmdBatch(cmd *cobra.Command, args []string) error { +func cmdBatch(cmd *cobra.Command, _args []string) error { bufReader := bufio.NewReader(os.Stdin) LOOP: for { @@ -405,7 +408,7 @@ LOOP: return nil } -func cmdConsole(cmd *cobra.Command, args []string) error { +func cmdConsole(cmd *cobra.Command, _args []string) error { for { fmt.Printf("> ") bufReader := bufio.NewReader(os.Stdin) @@ -674,7 +677,7 @@ func cmdPrepareProposal(cmd *cobra.Command, args []string) error { existingTx := inTxArray(txsBytesArray, tx.Tx) if tx.Action == types.TxRecord_UNKNOWN || (existingTx && tx.Action == types.TxRecord_ADDED) || - (!existingTx && (tx.Action == types.TxRecord_UNMODIFIED || tx.Action == types.TxRecord_REMOVED)) { + (!existingTx && (tx.Action == types.TxRecord_UNMODIFIED || tx.Action == types.TxRecord_REMOVED || tx.Action == types.TxRecord_DELAYED)) { resps = append(resps, response{ Code: codeBad, Log: "Failed. Tx: " + string(tx.GetTx()) + " action: " + tx.Action.String(), @@ -697,8 +700,9 @@ func cmdProcessProposal(cmd *cobra.Command, args []string) error { panic(err) } res, err := client.ProcessProposal(cmd.Context(), &types.RequestProcessProposal{ - Height: height, - Txs: txsBytesArray, + Height: height, + Txs: txsBytesArray, + Version: &pbversion.Consensus{Block: version.BlockProtocol, App: kvstore.ProtocolVersion}, }) if err != nil { return err @@ -711,7 +715,7 @@ func cmdProcessProposal(cmd *cobra.Command, args []string) error { } func makeKVStoreCmd(logger log.Logger) func(*cobra.Command, []string) error { - return func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, _args []string) error { // Create the application - in memory or persisted to disk var ( app types.Application diff --git a/abci/example/counter/counter.go b/abci/example/counter/counter.go index 293e828f64..7e34923753 100644 --- a/abci/example/counter/counter.go +++ b/abci/example/counter/counter.go @@ -79,6 +79,7 @@ func (app *Application) PrepareProposal(_ context.Context, req *types.RequestPre AppHash: make([]byte, tmcrypto.DefaultAppHashSize), CoreChainLockUpdate: app.lastCoreChainLock.ToProto(), TxResults: app.lastTxResults, + AppVersion: 1, } return &resp, nil } diff --git a/abci/example/example_test.go b/abci/example/example_test.go index d1f38df68d..c25ad1f9dc 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -7,7 +7,6 @@ import ( "net" "os" "testing" - "time" "github.com/stretchr/testify/require" @@ -21,19 +20,16 @@ import ( "github.com/dashpay/tenderdash/abci/types" "github.com/dashpay/tenderdash/libs/log" tmnet "github.com/dashpay/tenderdash/libs/net" + "github.com/dashpay/tenderdash/proto/tendermint/version" ) -func init() { - rand.Seed(time.Now().UnixNano()) -} - func TestKVStore(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() logger := log.NewNopLogger() t.Log("### Testing KVStore") - app, err := kvstore.NewMemoryApp(kvstore.WithLogger(logger)) + app, err := kvstore.NewMemoryApp(kvstore.WithLogger(logger), kvstore.WithAppVersion(0)) require.NoError(t, err) testBulk(ctx, t, logger, app) } @@ -78,7 +74,11 @@ func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Ap require.NoError(t, err) // Construct request - rpp := &types.RequestProcessProposal{Height: 1, Txs: make([][]byte, numDeliverTxs)} + rpp := &types.RequestProcessProposal{ + Height: 1, + Txs: make([][]byte, numDeliverTxs), + Version: &version.Consensus{App: 1}, + } for counter := 0; counter < numDeliverTxs; counter++ { rpp.Txs[counter] = []byte("test") } @@ -98,7 +98,7 @@ func testBulk(ctx context.Context, t *testing.T, logger log.Logger, app types.Ap //------------------------- // test grpc -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { +func dialerFunc(_ctx context.Context, addr string) (net.Conn, error) { return tmnet.Connect(addr) } @@ -116,7 +116,7 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type t.Cleanup(server.Wait) // Connect to the socket - conn, err := grpc.Dial(socket, + conn, err := grpc.NewClient(socket, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialerFunc), ) diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 41e9c2e52d..608bc8be44 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -3,9 +3,9 @@ package kvstore import ( "bytes" "context" - "encoding/json" "errors" "fmt" + "io" "path" "strconv" "time" @@ -27,7 +27,9 @@ import ( "github.com/dashpay/tenderdash/version" ) -const ProtocolVersion uint64 = 0x12345678 +// ProtocolVersion defines initial protocol (app) version. +// App version is incremented on every block, to match current height. +const ProtocolVersion uint64 = 1 //--------------------------------------------------- @@ -85,6 +87,9 @@ type Application struct { offerSnapshot *offerSnapshot shouldCommitVerify bool + // appVersion returned in ResponsePrepareProposal. + // Special value of 0 means that it will be always set to current height. + appVersion uint64 } // WithCommitVerification enables commit verification @@ -95,6 +100,15 @@ func WithCommitVerification() OptFunc { } } +// WithAppVersion enables the application to enforce the app version to be equal to provided value. +// Special value of `0` means that app version will be always set to current block version. +func WithAppVersion(version uint64) OptFunc { + return func(app *Application) error { + app.appVersion = version + return nil + } +} + // WithValidatorSetUpdates defines initial validator set when creating Application func WithValidatorSetUpdates(validatorSetUpdates map[int64]abci.ValidatorSetUpdate) OptFunc { return func(app *Application) error { @@ -212,6 +226,7 @@ func newApplication(stateStore StoreFactory, opts ...OptFunc) (*Application, err verifyTx: verifyTx, execTx: execTx, shouldCommitVerify: false, + appVersion: ProtocolVersion, } for _, opt := range opts { @@ -228,7 +243,12 @@ func newApplication(stateStore StoreFactory, opts ...OptFunc) (*Application, err defer in.Close() if err := app.LastCommittedState.Load(in); err != nil { - return nil, fmt.Errorf("load state: %w", err) + // EOF means we most likely don't have any state yet + if !errors.Is(err, io.EOF) { + return nil, fmt.Errorf("load state: %w", err) + } else { + app.logger.Debug("no state found, using initial state") + } } app.snapshots, err = NewSnapshotStore(path.Join(app.cfg.Dir, "snapshots")) @@ -264,9 +284,9 @@ func (app *Application) InitChain(_ context.Context, req *abci.RequestInitChain) } // Overwrite state based on AppStateBytes + // Note this is not optimal from memory perspective; use chunked state sync instead if len(req.AppStateBytes) > 0 { - err := json.Unmarshal(req.AppStateBytes, &app.LastCommittedState) - if err != nil { + if err := app.LastCommittedState.Load(bytes.NewBuffer(req.AppStateBytes)); err != nil { return &abci.ResponseInitChain{}, err } } @@ -282,7 +302,7 @@ func (app *Application) InitChain(_ context.Context, req *abci.RequestInitChain) if !ok { consensusParams = types1.ConsensusParams{ Version: &types1.VersionParams{ - AppVersion: ProtocolVersion, + AppVersion: uint64(app.LastCommittedState.GetHeight()) + 1, }, } } @@ -341,6 +361,7 @@ func (app *Application) PrepareProposal(_ context.Context, req *abci.RequestPrep ConsensusParamUpdates: app.getConsensusParamsUpdate(req.Height), CoreChainLockUpdate: coreChainLock, ValidatorSetUpdate: app.getValidatorSetUpdate(req.Height), + AppVersion: app.appVersionForHeight(req.Height), } if app.cfg.PrepareProposalDelayMS != 0 { @@ -368,12 +389,22 @@ func (app *Application) ProcessProposal(_ context.Context, req *abci.RequestProc Status: abci.ResponseProcessProposal_REJECT, }, err } + + if req.Version.App != app.appVersionForHeight(req.Height) { + app.logger.Error("app version mismatch in process proposal request", + "version", req.Version.App, "expected", app.appVersionForHeight(req.Height), "height", roundState.GetHeight()) + return &abci.ResponseProcessProposal{ + Status: abci.ResponseProcessProposal_REJECT, + }, nil + } + resp := &abci.ResponseProcessProposal{ Status: abci.ResponseProcessProposal_ACCEPT, AppHash: roundState.GetAppHash(), TxResults: txResults, ConsensusParamUpdates: app.getConsensusParamsUpdate(req.Height), ValidatorSetUpdate: app.getValidatorSetUpdate(req.Height), + Events: []abci.Event{app.eventValUpdate(req.Height)}, } if app.cfg.ProcessProposalDelayMS != 0 { @@ -398,7 +429,8 @@ func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinali appHash := tmbytes.HexBytes(req.Block.Header.AppHash) roundState, ok := app.roundStates[roundKey(appHash, req.Height, req.Round)] if !ok { - return &abci.ResponseFinalizeBlock{}, fmt.Errorf("state with apphash %s not found", appHash) + return &abci.ResponseFinalizeBlock{}, fmt.Errorf("state with apphash %s at height %d round %d not found", + appHash, req.Height, req.Round) } if roundState.GetHeight() != req.Height { return &abci.ResponseFinalizeBlock{}, @@ -411,18 +443,15 @@ func (app *Application) FinalizeBlock(_ context.Context, req *abci.RequestFinali if app.shouldCommitVerify { vsu := app.getActiveValidatorSetUpdates() qsd := types.QuorumSignData{ - Block: makeBlockSignItem(req, btcjson.LLMQType_5_60, vsu.QuorumHash), - Extensions: makeVoteExtensionSignItems(req, btcjson.LLMQType_5_60, vsu.QuorumHash), + Block: makeBlockSignItem(req, btcjson.LLMQType_5_60, vsu.QuorumHash), + VoteExtensionSignItems: makeVoteExtensionSignItems(req, btcjson.LLMQType_5_60, vsu.QuorumHash), } err := app.verifyBlockCommit(qsd, req.Commit) if err != nil { return nil, err } } - events := []abci.Event{app.eventValUpdate(req.Height)} - resp := &abci.ResponseFinalizeBlock{ - Events: events, - } + resp := &abci.ResponseFinalizeBlock{} if app.RetainBlocks > 0 && app.LastCommittedState.GetHeight() >= app.RetainBlocks { resp.RetainHeight = app.LastCommittedState.GetHeight() - app.RetainBlocks + 1 } @@ -530,14 +559,15 @@ func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestA } if app.offerSnapshot.isFull() { - chunks := app.offerSnapshot.bytes() - err := json.Unmarshal(chunks, &app.LastCommittedState) - if err != nil { + chunks := app.offerSnapshot.reader() + defer chunks.Close() + + if err := app.LastCommittedState.Load(chunks); err != nil { return &abci.ResponseApplySnapshotChunk{}, fmt.Errorf("cannot unmarshal state: %w", err) } + app.logger.Info("restored state snapshot", "height", app.LastCommittedState.GetHeight(), - "json", string(chunks), "apphash", app.LastCommittedState.GetAppHash(), "snapshot_height", app.offerSnapshot.snapshot.Height, "snapshot_apphash", app.offerSnapshot.appHash, @@ -549,6 +579,13 @@ func (app *Application) ApplySnapshotChunk(_ context.Context, req *abci.RequestA app.logger.Debug("ApplySnapshotChunk", "resp", resp) return resp, nil } +func (app *Application) appVersionForHeight(height int64) uint64 { + if app.appVersion == 0 { + return uint64(height) + } + + return app.appVersion +} func (app *Application) createSnapshot() error { height := app.LastCommittedState.GetHeight() @@ -574,10 +611,12 @@ func (app *Application) Info(_ context.Context, req *abci.RequestInfo) (*abci.Re app.mu.Lock() defer app.mu.Unlock() appHash := app.LastCommittedState.GetAppHash() + appVersion := app.appVersionForHeight(app.LastCommittedState.GetHeight() + 1) // we set app version to CURRENT height + resp := &abci.ResponseInfo{ Data: fmt.Sprintf("{\"appHash\":\"%s\"}", appHash.String()), Version: version.ABCIVersion, - AppVersion: ProtocolVersion, + AppVersion: appVersion, LastBlockHeight: app.LastCommittedState.GetHeight(), LastBlockAppHash: app.LastCommittedState.GetAppHash(), } diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index 0041c0e4ff..93b044a4c8 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -21,6 +21,7 @@ import ( "github.com/dashpay/tenderdash/libs/log" "github.com/dashpay/tenderdash/libs/service" tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" + pbversion "github.com/dashpay/tenderdash/proto/tendermint/version" tmtypes "github.com/dashpay/tenderdash/types" "github.com/dashpay/tenderdash/version" ) @@ -48,13 +49,15 @@ func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx [] require.ErrorContains(t, err, "duplicate PrepareProposal call") reqProcess := &types.RequestProcessProposal{ - Txs: [][]byte{tx}, - Height: height, + Txs: [][]byte{tx}, + Height: height, + Version: &pbversion.Consensus{App: uint64(height)}, } respProcess, err := app.ProcessProposal(ctx, reqProcess) require.NoError(t, err) require.Len(t, respProcess.TxResults, 1) require.False(t, respProcess.TxResults[0].IsErr(), respProcess.TxResults[0].Log) + require.Len(t, respProcess.Events, 1) // Duplicate ProcessProposal calls should return error _, err = app.ProcessProposal(ctx, reqProcess) @@ -62,9 +65,8 @@ func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx [] reqFin := &types.RequestFinalizeBlock{Height: height} reqFin.Block, reqFin.BlockID = makeBlock(t, height, [][]byte{tx}, respPrep.AppHash) - respFin, err := app.FinalizeBlock(ctx, reqFin) + _, err = app.FinalizeBlock(ctx, reqFin) require.NoError(t, err) - require.Equal(t, 1, len(respFin.Events)) // repeating tx raises an error _, err = app.FinalizeBlock(ctx, reqFin) @@ -122,7 +124,9 @@ func TestPersistentKVStoreKV(t *testing.T) { dir := t.TempDir() logger := log.NewNopLogger() - kvstore, err := NewPersistentApp(DefaultConfig(dir), WithLogger(logger.With("module", "kvstore"))) + kvstore, err := NewPersistentApp(DefaultConfig(dir), + WithLogger(logger.With("module", "kvstore")), + WithAppVersion(0)) require.NoError(t, err) key := testKey @@ -136,7 +140,8 @@ func TestPersistentKVStoreKV(t *testing.T) { data, err := os.ReadFile(path.Join(dir, "state.json")) require.NoError(t, err) - assert.Contains(t, string(data), fmt.Sprintf(`"%s":"%s"`, key, value)) + + assert.Contains(t, string(data), fmt.Sprintf(`"key":"%s","value":"%s"`, key, value)) } func TestPersistentKVStoreInfo(t *testing.T) { @@ -156,7 +161,7 @@ func TestPersistentKVStoreInfo(t *testing.T) { dir := t.TempDir() logger := log.NewTestingLogger(t).With("module", "kvstore") - kvstore, err := NewPersistentApp(DefaultConfig(dir), WithLogger(logger)) + kvstore, err := NewPersistentApp(DefaultConfig(dir), WithLogger(logger), WithAppVersion(0)) require.NoError(t, err) defer kvstore.Close() @@ -240,7 +245,7 @@ func TestValUpdates(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - kvstore, err := NewMemoryApp() + kvstore, err := NewMemoryApp(WithAppVersion(0)) require.NoError(t, err) // init with some validators @@ -282,19 +287,20 @@ func makeApplyBlock( } respProcessProposal, err := kvstore.ProcessProposal(ctx, &types.RequestProcessProposal{ - Hash: hash, - Height: height, - Txs: txs, + Hash: hash, + Height: height, + Txs: txs, + Version: &pbversion.Consensus{App: uint64(height)}, }) require.NoError(t, err) require.NotZero(t, respProcessProposal) require.Equal(t, types.ResponseProcessProposal_ACCEPT, respProcessProposal.Status) + require.Len(t, respProcessProposal.Events, 1) rfb := &types.RequestFinalizeBlock{Hash: hash, Height: height} rfb.Block, rfb.BlockID = makeBlock(t, height, txs, respProcessProposal.AppHash) resFinalizeBlock, err := kvstore.FinalizeBlock(ctx, rfb) require.NoError(t, err) - require.Len(t, resFinalizeBlock.Events, 1) return respProcessProposal, resFinalizeBlock } @@ -352,7 +358,7 @@ func makeGRPCClientServer( return nil, nil, err } - client := abciclient.NewGRPCClient(logger.With("module", "abci-client"), socket, true) + client := abciclient.NewGRPCClient(logger.With("module", "abci-client"), socket, nil, true) if err := client.Start(ctx); err != nil { cancel() @@ -367,7 +373,9 @@ func TestClientServer(t *testing.T) { logger := log.NewTestingLogger(t) // set up socket app - kvstore, err := NewMemoryApp(WithLogger(logger.With("module", "app"))) + kvstore, err := NewMemoryApp( + WithLogger(logger.With("module", "app")), + WithAppVersion(0)) require.NoError(t, err) client, server, err := makeSocketClientServer(ctx, t, logger, kvstore, "kvstore-socket") @@ -379,7 +387,7 @@ func TestClientServer(t *testing.T) { runClientTests(ctx, t, client) // set up grpc app - kvstore, err = NewMemoryApp() + kvstore, err = NewMemoryApp(WithAppVersion(0)) require.NoError(t, err) gclient, gserver, err := makeGRPCClientServer(ctx, t, logger, kvstore, "/tmp/kvstore-grpc") @@ -405,20 +413,21 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client) func testClient(ctx context.Context, t *testing.T, app abciclient.Client, height int64, tx []byte, key, value string) { rpp, err := app.ProcessProposal(ctx, &types.RequestProcessProposal{ - Txs: [][]byte{tx}, - Height: height, + Txs: [][]byte{tx}, + Height: height, + Version: &pbversion.Consensus{App: uint64(height)}, }) require.NoError(t, err) require.NotZero(t, rpp) require.Equal(t, 1, len(rpp.TxResults)) require.False(t, rpp.TxResults[0].IsErr()) + require.Len(t, rpp.Events, 1) rfb := &types.RequestFinalizeBlock{Height: height} rfb.Block, rfb.BlockID = makeBlock(t, height, [][]byte{tx}, rpp.AppHash) ar, err := app.FinalizeBlock(ctx, rfb) require.NoError(t, err) require.Zero(t, ar.RetainHeight) - require.Len(t, ar.Events, 1) info, err := app.Info(ctx, &types.RequestInfo{}) require.NoError(t, err) @@ -521,6 +530,7 @@ func newKvApp(ctx context.Context, t *testing.T, genesisHeight int64, opts ...Op WithValidatorSetUpdates(map[int64]types.ValidatorSetUpdate{ genesisHeight: RandValidatorSetUpdate(1), }), + WithAppVersion(0), } app, err := NewMemoryApp(append(defaultOpts, opts...)...) require.NoError(t, err) @@ -535,17 +545,17 @@ func newKvApp(ctx context.Context, t *testing.T, genesisHeight int64, opts ...Op return app } -func assertRespInfo(t *testing.T, expectHeight int64, expectAppHash tmbytes.HexBytes, actual types.ResponseInfo, msgs ...interface{}) { +func assertRespInfo(t *testing.T, expectLastBlockHeight int64, expectAppHash tmbytes.HexBytes, actual types.ResponseInfo, msgs ...interface{}) { t.Helper() if expectAppHash == nil { expectAppHash = make(tmbytes.HexBytes, tmcrypto.DefaultAppHashSize) } expected := types.ResponseInfo{ - LastBlockHeight: expectHeight, + LastBlockHeight: expectLastBlockHeight, LastBlockAppHash: expectAppHash, Version: version.ABCIVersion, - AppVersion: ProtocolVersion, + AppVersion: uint64(expectLastBlockHeight + 1), Data: fmt.Sprintf(`{"appHash":"%s"}`, expectAppHash.String()), } diff --git a/abci/example/kvstore/snapshots.go b/abci/example/kvstore/snapshots.go index 13eb2ebb2f..271d5cf021 100644 --- a/abci/example/kvstore/snapshots.go +++ b/abci/example/kvstore/snapshots.go @@ -1,12 +1,13 @@ -// nolint: gosec package kvstore import ( "bytes" + "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" + "io" "os" "path/filepath" @@ -85,7 +86,7 @@ func (s *SnapshotStore) saveMetadata() error { // save the file to a new file and move it to make saving atomic. newFile := filepath.Join(s.dir, "metadata.json.new") file := filepath.Join(s.dir, "metadata.json") - err = os.WriteFile(newFile, bz, 0644) // nolint: gosec + err = os.WriteFile(newFile, bz, 0644) //nolint:gosec if err != nil { return err } @@ -97,20 +98,31 @@ func (s *SnapshotStore) Create(state State) (abci.Snapshot, error) { s.Lock() defer s.Unlock() - bz, err := json.Marshal(state) + height := state.GetHeight() + + filename := filepath.Join(s.dir, fmt.Sprintf("%v.json", height)) + f, err := os.Create(filename) if err != nil { return abci.Snapshot{}, err } - height := state.GetHeight() + defer f.Close() + + hasher := sha256.New() + writer := io.MultiWriter(f, hasher) + + if err := state.Save(writer); err != nil { + f.Close() + // Cleanup incomplete file; ignore errors during cleanup + _ = os.Remove(filename) + return abci.Snapshot{}, err + } + snapshot := abci.Snapshot{ Height: uint64(height), Version: 1, - Hash: crypto.Checksum(bz), - } - err = os.WriteFile(filepath.Join(s.dir, fmt.Sprintf("%v.json", height)), bz, 0644) - if err != nil { - return abci.Snapshot{}, err + Hash: hasher.Sum(nil), } + s.metadata = append(s.metadata, snapshot) err = s.saveMetadata() if err != nil { @@ -216,6 +228,44 @@ func (s *offerSnapshot) bytes() []byte { return buf.Bytes() } +// reader returns a reader for the snapshot data. +func (s *offerSnapshot) reader() io.ReadCloser { + chunks := s.chunks.Values() + reader := &chunkedReader{chunks: chunks} + + return reader +} + +type chunkedReader struct { + chunks [][]byte + index int + offset int +} + +func (r *chunkedReader) Read(p []byte) (n int, err error) { + if r.chunks == nil { + return 0, io.EOF + } + for n < len(p) && r.index < len(r.chunks) { + copyCount := copy(p[n:], r.chunks[r.index][r.offset:]) + n += copyCount + r.offset += copyCount + if r.offset >= len(r.chunks[r.index]) { + r.index++ + r.offset = 0 + } + } + if r.index >= len(r.chunks) { + err = io.EOF + } + return +} + +func (r *chunkedReader) Close() error { + r.chunks = nil + return nil +} + // makeChunkItem returns the chunk at a given index from the full byte slice. func makeChunkItem(chunks *ds.OrderedMap[string, []byte], chunkID []byte) chunkItem { chunkIDStr := hex.EncodeToString(chunkID) diff --git a/abci/example/kvstore/state.go b/abci/example/kvstore/state.go index fc4b2321c8..8a9e47602c 100644 --- a/abci/example/kvstore/state.go +++ b/abci/example/kvstore/state.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "net/url" dbm "github.com/tendermint/tm-db" @@ -19,8 +20,6 @@ import ( // Caller of State methods should do proper concurrency locking (eg. mutexes) type State interface { dbm.DB - json.Marshaler - json.Unmarshaler // Save writes full content of this state to some output Save(output io.Writer) error @@ -50,7 +49,7 @@ type State interface { } type kvState struct { - dbm.DB + dbm.DB `json:"-"` // Height of the state. Special value of 0 means zero state. Height int64 `json:"height"` InitialHeight int64 `json:"initial_height,omitempty"` @@ -171,7 +170,7 @@ func (state kvState) GetAppHash() tmbytes.HexBytes { return state.AppHash.Copy() } -func (state *kvState) UpdateAppHash(lastCommittedState State, txs types1.Txs, txResults []*types.ExecTxResult) error { +func (state *kvState) UpdateAppHash(lastCommittedState State, _txs types1.Txs, txResults []*types.ExecTxResult) error { // UpdateAppHash updates app hash for the current app state. txResultsHash, err := types.TxResultsHash(txResults) if err != nil { @@ -182,98 +181,99 @@ func (state *kvState) UpdateAppHash(lastCommittedState State, txs types1.Txs, tx return nil } +// Load state from the reader. +// It expects json-encoded kvState, followed by all items from the state. +// +// As a special case, io.EOF when reading the header means that the state is empty. func (state *kvState) Load(from io.Reader) error { if state == nil || state.DB == nil { return errors.New("cannot load into nil state") } - stateBytes, err := io.ReadAll(from) - if err != nil { - return fmt.Errorf("kvState read: %w", err) + // We reuse DB as we can use atomic batches to load items. + newState := NewKvState(state.DB, state.InitialHeight).(*kvState) + + decoder := json.NewDecoder(from) + if err := decoder.Decode(&newState); err != nil { + return fmt.Errorf("error reading state header: %w", err) } - if len(stateBytes) == 0 { - return nil // NOOP + + // Load items to state DB + batch := newState.DB.NewBatch() + defer batch.Close() + + if err := resetDB(newState.DB, batch); err != nil { + return err } - err = json.Unmarshal(stateBytes, &state) - if err != nil { - return fmt.Errorf("kvState unmarshal: %w", err) + item := exportItem{} + var err error + for err = decoder.Decode(&item); err == nil; err = decoder.Decode(&item) { + key, err := url.QueryUnescape(item.Key) + if err != nil { + return fmt.Errorf("error restoring state item key %+v: %w", item, err) + } + value, err := url.QueryUnescape(item.Value) + if err != nil { + return fmt.Errorf("error restoring state item value %+v: %w", item, err) + } + + if err := batch.Set([]byte(key), []byte(value)); err != nil { + return fmt.Errorf("error restoring state item %+v: %w", item, err) + } } - return nil -} + if !errors.Is(err, io.EOF) { + return err + } -func (state kvState) Save(to io.Writer) error { - stateBytes, err := json.Marshal(state) - if err != nil { - return fmt.Errorf("kvState marshal: %w", err) + // commit changes + if err := batch.Write(); err != nil { + return fmt.Errorf("error finalizing restore batch: %w", err) } - _, err = to.Write(stateBytes) - if err != nil { - return fmt.Errorf("kvState write: %w", err) + // copy loaded values to the state + state.InitialHeight = newState.InitialHeight + state.Height = newState.Height + state.Round = newState.Round + state.AppHash = newState.AppHash + // apphash cannot be nil,zero-length + if len(state.AppHash) == 0 { + state.AppHash = make(tmbytes.HexBytes, crypto.DefaultAppHashSize) } return nil } -type StateExport struct { - Height *int64 `json:"height,omitempty"` - InitialHeight *int64 `json:"initial_height,omitempty"` - AppHash tmbytes.HexBytes `json:"app_hash,omitempty"` - Items map[string]string `json:"items,omitempty"` // we store items as string-encoded values -} +// Save saves state to the writer. +// First it puts json-encoded kvState, followed by all items from the state. +func (state kvState) Save(to io.Writer) error { + encoder := json.NewEncoder(to) + if err := encoder.Encode(state); err != nil { + return fmt.Errorf("kvState marshal: %w", err) + } -// MarshalJSON implements json.Marshaler -func (state kvState) MarshalJSON() ([]byte, error) { iter, err := state.DB.Iterator(nil, nil) if err != nil { - return nil, err + return fmt.Errorf("error creating state iterator: %w", err) } defer iter.Close() - height := state.Height - initialHeight := state.InitialHeight - apphash := state.GetAppHash() - - export := StateExport{ - Height: &height, - InitialHeight: &initialHeight, - AppHash: apphash, - Items: nil, - } - for ; iter.Valid(); iter.Next() { - if export.Items == nil { - export.Items = map[string]string{} + key := url.QueryEscape(string(iter.Key())) + value := url.QueryEscape(string(iter.Value())) + item := exportItem{Key: key, Value: value} + if err := encoder.Encode(item); err != nil { + return fmt.Errorf("error encoding state item %+v: %w", item, err) } - export.Items[string(iter.Key())] = string(iter.Value()) } - return json.Marshal(&export) + return nil } -// UnmarshalJSON implements json.Unmarshaler. -// Note that it unmarshals only existing (non-nil) values. -// If unmarshaled data contains a nil value (eg. is not present in json), these will stay intact. -func (state *kvState) UnmarshalJSON(data []byte) error { - - export := StateExport{} - if err := json.Unmarshal(data, &export); err != nil { - return err - } - - if export.Height != nil { - state.Height = *export.Height - } - if export.InitialHeight != nil { - state.InitialHeight = *export.InitialHeight - } - if export.AppHash != nil { - state.AppHash = export.AppHash - } - - return state.persistItems(export.Items) +type exportItem struct { + Key string `json:"key"` + Value string `json:"value"` } func (state *kvState) Close() error { @@ -282,23 +282,3 @@ func (state *kvState) Close() error { } return nil } - -func (state *kvState) persistItems(items map[string]string) error { - if items == nil { - return nil - } - batch := state.DB.NewBatch() - defer batch.Close() - - if len(items) > 0 { - if err := resetDB(state.DB, batch); err != nil { - return err - } - for key, value := range items { - if err := batch.Set([]byte(key), []byte(value)); err != nil { - return err - } - } - } - return batch.Write() -} diff --git a/abci/example/kvstore/state_test.go b/abci/example/kvstore/state_test.go index e2ba12102d..e65f14950e 100644 --- a/abci/example/kvstore/state_test.go +++ b/abci/example/kvstore/state_test.go @@ -1,7 +1,7 @@ package kvstore import ( - "encoding/json" + "bytes" "testing" "github.com/stretchr/testify/assert" @@ -27,14 +27,17 @@ func TestStateMarshalUnmarshal(t *testing.T) { assert.NoError(t, state.UpdateAppHash(state, nil, nil)) apphash := state.GetAppHash() - encoded, err := json.MarshalIndent(state, "", " ") + encoded := bytes.NewBuffer(nil) + err := state.Save(encoded) require.NoError(t, err) assert.NotEmpty(t, encoded) - t.Log(string(encoded)) + + t.Log("encoded:", encoded.String()) decoded := NewKvState(dbm.NewMemDB(), 1) - err = json.Unmarshal(encoded, &decoded) + err = decoded.Load(encoded) require.NoError(t, err) + decoded.Print() v1, err := decoded.Get([]byte("key1")) require.NoError(t, err) @@ -44,14 +47,14 @@ func TestStateMarshalUnmarshal(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, []byte("value2"), v2) - v3, err := decoded.Get([]byte("key2")) + v3, err := decoded.Get(key3) require.NoError(t, err) - assert.EqualValues(t, []byte("value2"), v3) + assert.EqualValues(t, value3, v3) assert.EqualValues(t, apphash, decoded.GetAppHash()) } -func TestStateUnmarshal(t *testing.T) { +func TestStateLoad(t *testing.T) { const initialHeight = 12345678 zeroAppHash := make(tmbytes.HexBytes, crypto.DefaultAppHashSize) type keyVals struct { @@ -89,11 +92,10 @@ func TestStateUnmarshal(t *testing.T) { name: "full", encoded: []byte(`{ "height": 6531, - "app_hash": "1C9ECEC90E28D2461650418635878A5C91E49F47586ECF75F2B0CBB94E897112", - "items": { - "key1": "value1", - "key2": "value2" - }}`), + "app_hash": "1C9ECEC90E28D2461650418635878A5C91E49F47586ECF75F2B0CBB94E897112" + } + {"key":"key1","value":"value1"} + {"key":"key2","value":"value2"}`), expectHeight: 6531, expectAppHash: tmbytes.MustHexDecode("1C9ECEC90E28D2461650418635878A5C91E49F47586ECF75F2B0CBB94E897112"), expectKeyVals: []keyVals{ @@ -106,7 +108,7 @@ func TestStateUnmarshal(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { decoded := NewKvState(dbm.NewMemDB(), initialHeight) - err := json.Unmarshal(tc.encoded, &decoded) + err := decoded.Load(bytes.NewBuffer(tc.encoded)) if tc.expectDecodeError { require.Error(t, err, "decode error expected") } else { diff --git a/abci/example/kvstore/tx.go b/abci/example/kvstore/tx.go index 53a4b1987f..18330adad9 100644 --- a/abci/example/kvstore/tx.go +++ b/abci/example/kvstore/tx.go @@ -19,8 +19,53 @@ type VerifyTxFunc func(tx types.Tx, typ abci.CheckTxType) (abci.ResponseCheckTx, // ExecTxFunc executes the transaction against some state type ExecTxFunc func(tx types.Tx, roundState State) (abci.ExecTxResult, error) +// Helper struct that controls size of added transactions +type TxRecords struct { + Size int64 + Limit int64 + Txs []*abci.TxRecord +} + +// Add new transaction if it fits the limit. +// +// Returns action that was taken, as some transactions may be delayed despite provided `tx.Action`. +// Errors when newly added transaction does not fit the limit. +func (txr *TxRecords) Add(tx *abci.TxRecord) (abci.TxRecord_TxAction, error) { + txSize := int64(len(tx.Tx)) + switch tx.Action { + case abci.TxRecord_ADDED: + if txr.Size+txSize > txr.Limit { + return abci.TxRecord_UNKNOWN, errors.New("new transaction cannot be added: over limit") + } + // add to txs + txr.Txs = append(txr.Txs, tx) + txr.Size += txSize + return tx.Action, nil + case abci.TxRecord_UNMODIFIED: + { + if txr.Size+txSize > txr.Limit { + // over limit, delaying + delay := abci.TxRecord{Tx: tx.Tx, Action: abci.TxRecord_DELAYED} + return txr.Add(&delay) + } + + // add to txs + txr.Txs = append(txr.Txs, tx) + txr.Size += txSize + return tx.Action, nil + } + + case abci.TxRecord_REMOVED, abci.TxRecord_DELAYED: + // remove from txs, not counted in size + txr.Txs = append(txr.Txs, tx) + return tx.Action, nil + default: + panic(fmt.Sprintf("unknown tx action: %v", tx.Action)) + } +} + func prepareTxs(req abci.RequestPrepareProposal) ([]*abci.TxRecord, error) { - return substPrepareTx(req.Txs, req.MaxTxBytes), nil + return substPrepareTx(req.Txs, req.MaxTxBytes) } func txRecords2Txs(txRecords []*abci.TxRecord) types.Txs { @@ -74,7 +119,7 @@ func execTx(tx types.Tx, roundState State) (abci.ExecTxResult, error) { // execPrepareTx is noop. tx data is considered as placeholder // and is substitute at the PrepareProposal. -func execPrepareTx(tx []byte) (abci.ExecTxResult, error) { +func execPrepareTx(_ []byte) (abci.ExecTxResult, error) { // noop return abci.ExecTxResult{}, nil } @@ -83,34 +128,45 @@ func execPrepareTx(tx []byte) (abci.ExecTxResult, error) { // proposal for transactions with the prefix stripped. // It marks all of the original transactions as 'REMOVED' so that // Tendermint will remove them from its mempool. -func substPrepareTx(txs [][]byte, maxTxBytes int64) []*abci.TxRecord { - trs := make([]*abci.TxRecord, 0, len(txs)) - var removed []*abci.TxRecord - var totalBytes int64 +func substPrepareTx(txs [][]byte, maxTxBytes int64) ([]*abci.TxRecord, error) { + trs := TxRecords{ + Size: 0, + Limit: maxTxBytes, + Txs: make([]*abci.TxRecord, 0, len(txs)+1), + } + for _, tx := range txs { action := abci.TxRecord_UNMODIFIED + + // As a special logic of this app, we replace tx with the prefix 'prepare' with one without the prefix. + // We need to preserve ordering of the transactions. if isPrepareTx(tx) { - // replace tx and add it as REMOVED - removed = append(removed, &abci.TxRecord{ - Tx: tx, - Action: abci.TxRecord_REMOVED, - }) - totalBytes -= int64(len(tx)) - - tx = bytes.TrimPrefix(tx, []byte(PreparePrefix)) - action = abci.TxRecord_ADDED + // add new tx in place of the old one + record := abci.TxRecord{ + Tx: bytes.TrimPrefix(tx, []byte(PreparePrefix)), + Action: abci.TxRecord_ADDED, + } + if _, err := trs.Add(&record); err != nil { + // cannot add new tx, so we cannot remove old one - just delay it and retry next time + action = abci.TxRecord_DELAYED + } else { + // old one can be removed from the mempool + action = abci.TxRecord_REMOVED + } } - totalBytes += int64(len(tx)) - if totalBytes > maxTxBytes { - break - } - trs = append(trs, &abci.TxRecord{ + + // Now we add the transaction to the list of transactions + transaction := &abci.TxRecord{ Tx: tx, Action: action, - }) + } + if _, err := trs.Add(transaction); err != nil { + // this should definitely not fail, as we don't add anything new + return nil, err + } } - return append(trs, removed...) + return trs.Txs, nil } const PreparePrefix = "prepare" diff --git a/abci/example/kvstore/verify.go b/abci/example/kvstore/verify.go index 5e81aa18a1..33408712fd 100644 --- a/abci/example/kvstore/verify.go +++ b/abci/example/kvstore/verify.go @@ -9,7 +9,6 @@ import ( abci "github.com/dashpay/tenderdash/abci/types" "github.com/dashpay/tenderdash/crypto/encoding" tmbytes "github.com/dashpay/tenderdash/libs/bytes" - types1 "github.com/dashpay/tenderdash/proto/tendermint/types" "github.com/dashpay/tenderdash/types" ) @@ -18,32 +17,20 @@ func (app *Application) verifyBlockCommit(qsd types.QuorumSignData, commit abci. if !bytes.Equal(commit.QuorumHash, vsu.QuorumHash) { return fmt.Errorf("mismatch quorum hashes got %X, want %X", commit.QuorumHash, vsu.QuorumHash) } - verifier := types.NewQuorumSignsVerifier(qsd) pubKey, err := encoding.PubKeyFromProto(vsu.ThresholdPublicKey) if err != nil { return err } - return verifier.Verify(pubKey, types.QuorumSigns{ - BlockSign: commit.BlockSignature, - ExtensionSigns: makeThresholdVoteExtensions(commit.ThresholdVoteExtensions), - }) -} -func makeThresholdVoteExtensions(pbVoteExtensions []*types1.VoteExtension) []types.ThresholdExtensionSign { - voteExtensions := types.VoteExtensionsFromProto(pbVoteExtensions) - var thresholdExtensionSigns []types.ThresholdExtensionSign - thresholdVoteExtensions, ok := voteExtensions[types1.VoteExtensionType_THRESHOLD_RECOVER] - if !ok { - return nil + extSigs := make([][]byte, 0, len(commit.ThresholdVoteExtensions)) + for _, ext := range commit.ThresholdVoteExtensions { + extSigs = append(extSigs, ext.Signature) } - thresholdExtensionSigns = make([]types.ThresholdExtensionSign, len(thresholdVoteExtensions)) - for i, voteExtension := range thresholdVoteExtensions { - thresholdExtensionSigns[i] = types.ThresholdExtensionSign{ - Extension: voteExtension.Extension, - ThresholdSignature: voteExtension.Signature, - } - } - return thresholdExtensionSigns + + return qsd.Verify(pubKey, types.QuorumSigns{ + BlockSign: commit.BlockSignature, + VoteExtensionSignatures: extSigs, + }) } func makeBlockSignItem( @@ -67,19 +54,14 @@ func makeVoteExtensionSignItems( req *abci.RequestFinalizeBlock, quorumType btcjson.LLMQType, quorumHash []byte, -) map[types1.VoteExtensionType][]types.SignItem { - items := make(map[types1.VoteExtensionType][]types.SignItem) - reqID := types.VoteExtensionRequestID(req.Height, req.Round) - protoExtensionsMap := types1.VoteExtensionsToMap(req.Commit.ThresholdVoteExtensions) - for t, exts := range protoExtensionsMap { - if items[t] == nil && len(exts) > 0 { - items[t] = make([]types.SignItem, len(exts)) - } - chainID := req.Block.Header.ChainID - for i, ext := range exts { - raw := types.VoteExtensionSignBytes(chainID, req.Height, req.Round, ext) - items[t][i] = types.NewSignItem(quorumType, quorumHash, reqID, raw) - } +) []types.SignItem { + + extensions := types.VoteExtensionsFromProto(req.Commit.ThresholdVoteExtensions...) + chainID := req.Block.Header.ChainID + + items, err := extensions.SignItems(chainID, quorumType, quorumHash, req.Height, req.Round) + if err != nil { + panic(fmt.Errorf("vote extension sign items: %w", err)) } return items } diff --git a/abci/example/kvstore/verify_test.go b/abci/example/kvstore/verify_test.go index cb381a9fc9..29c8a8207c 100644 --- a/abci/example/kvstore/verify_test.go +++ b/abci/example/kvstore/verify_test.go @@ -60,6 +60,7 @@ func TestVerifyBlockCommit(t *testing.T) { assert.Len(t, respPrep.TxRecords, 1) require.Equal(t, 1, len(respPrep.TxResults)) require.False(t, respPrep.TxResults[0].IsErr(), respPrep.TxResults[0].Log) + pbBlock, err := block.ToProto() require.NoError(t, err) blockID := block.BlockID(nil) @@ -70,9 +71,8 @@ func TestVerifyBlockCommit(t *testing.T) { Block: pbBlock, BlockID: &pbBlockID, } - respFb, err := kvstore.FinalizeBlock(ctx, reqFb) + _, err = kvstore.FinalizeBlock(ctx, reqFb) require.NoError(t, err) - require.Equal(t, 1, len(respFb.Events)) } type blockExecutor struct { diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index 00821a843c..21bb42b03e 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -74,6 +74,6 @@ func (app *gRPCApplication) Echo(_ context.Context, req *types.RequestEcho) (*ty return &types.ResponseEcho{Message: req.Message}, nil } -func (app *gRPCApplication) Flush(_ context.Context, req *types.RequestFlush) (*types.ResponseFlush, error) { +func (app *gRPCApplication) Flush(_ context.Context, _req *types.RequestFlush) (*types.ResponseFlush, error) { return &types.ResponseFlush{}, nil } diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index ac40a604bb..650d9828f2 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -106,7 +106,7 @@ func (s *SocketServer) acceptConnectionsRoutine(ctx context.Context) { if !s.IsRunning() { return // Ignore error from listener closing. } - s.logger.Error("Failed to accept connection", "err", err) + s.logger.Warn("Failed to accept connection", "err", err) continue } diff --git a/abci/tests/client_server_test.go b/abci/tests/client_server_test.go index f95c124e5b..d9536a9436 100644 --- a/abci/tests/client_server_test.go +++ b/abci/tests/client_server_test.go @@ -11,6 +11,7 @@ import ( abciclientent "github.com/dashpay/tenderdash/abci/client" "github.com/dashpay/tenderdash/abci/example/kvstore" abciserver "github.com/dashpay/tenderdash/abci/server" + "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/libs/log" ) @@ -34,7 +35,8 @@ func TestClientServerNoAddrPrefix(t *testing.T) { assert.NoError(t, err, "expected no error on server.Start") t.Cleanup(server.Wait) - client, err := abciclientent.NewClient(logger, addr, transport, true) + cfg := config.AbciConfig{Address: addr, Transport: transport} + client, err := abciclientent.NewClient(logger, cfg, true) assert.NoError(t, err, "expected no error on NewClient") err = client.Start(ctx) assert.NoError(t, err, "expected no error on client.Start") diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 162f1a0007..687a17f25f 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -91,7 +91,7 @@ func FinalizeBlock(ctx context.Context, client abciclient.Client, txBytes [][]by return nil } -func PrepareProposal(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []types.TxRecord_TxAction, dataExp []byte) error { +func PrepareProposal(ctx context.Context, client abciclient.Client, txBytes [][]byte, codeExp []types.TxRecord_TxAction, _dataExp []byte) error { res, _ := client.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: txBytes}) for i, tx := range res.TxRecords { if tx.Action != codeExp[i] { diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index bf0d354630..08f6069752 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -23,7 +23,6 @@ > finalize_block 1 0000000000000000000000000000000000000000000000000000000000000000 "abc" -> code: OK --> data.hex: 0x6576656E74733A3C747970653A2276616C5F757064617465732220617474726962757465733A3C6B65793A2273697A65222076616C75653A223022203E20617474726962757465733A3C6B65793A22686569676874222076616C75653A223122203E203E20 > info -> code: OK @@ -56,7 +55,6 @@ > finalize_block 2 3C868EF29ED961094EE2C48BDD78CDC83C7AA426E763024985AF9F0F569EEFBE "def=xyz" "ghi=123" -> code: OK --> data.hex: 0x6576656E74733A3C747970653A2276616C5F757064617465732220617474726962757465733A3C6B65793A2273697A65222076616C75653A223022203E20617474726962757465733A3C6B65793A22686569676874222076616C75653A223222203E203E20 > query "def" -> code: OK @@ -101,5 +99,4 @@ > finalize_block 3 243BA0AC2622E2E7ACFEF366E85E503ACEBA1C950357436B554CA253492420AB -> code: OK --> data.hex: 0x6576656E74733A3C747970653A2276616C5F757064617465732220617474726962757465733A3C6B65793A2273697A65222076616C75653A223022203E20617474726962757465733A3C6B65793A22686569676874222076616C75653A223322203E203E20 diff --git a/abci/types/application.go b/abci/types/application.go index 19b52612f3..fe7b60bca4 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -55,37 +55,37 @@ func NewBaseApplication() *BaseApplication { return &BaseApplication{} } -func (BaseApplication) Info(_ context.Context, req *RequestInfo) (*ResponseInfo, error) { +func (BaseApplication) Info(_ context.Context, _req *RequestInfo) (*ResponseInfo, error) { return &ResponseInfo{}, nil } -func (BaseApplication) CheckTx(_ context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) { +func (BaseApplication) CheckTx(_ context.Context, _req *RequestCheckTx) (*ResponseCheckTx, error) { return &ResponseCheckTx{Code: CodeTypeOK}, nil } -func (BaseApplication) ExtendVote(_ context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) { +func (BaseApplication) ExtendVote(_ context.Context, _req *RequestExtendVote) (*ResponseExtendVote, error) { return &ResponseExtendVote{}, nil } -func (BaseApplication) VerifyVoteExtension(_ context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { +func (BaseApplication) VerifyVoteExtension(_ context.Context, _req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) { return &ResponseVerifyVoteExtension{ Status: ResponseVerifyVoteExtension_ACCEPT, }, nil } -func (BaseApplication) Query(_ context.Context, req *RequestQuery) (*ResponseQuery, error) { +func (BaseApplication) Query(_ context.Context, _req *RequestQuery) (*ResponseQuery, error) { return &ResponseQuery{Code: CodeTypeOK}, nil } -func (BaseApplication) InitChain(_ context.Context, req *RequestInitChain) (*ResponseInitChain, error) { +func (BaseApplication) InitChain(_ context.Context, _req *RequestInitChain) (*ResponseInitChain, error) { return &ResponseInitChain{}, nil } -func (BaseApplication) ListSnapshots(_ context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { +func (BaseApplication) ListSnapshots(_ context.Context, _req *RequestListSnapshots) (*ResponseListSnapshots, error) { return &ResponseListSnapshots{}, nil } -func (BaseApplication) OfferSnapshot(_ context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { +func (BaseApplication) OfferSnapshot(_ context.Context, _req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { return &ResponseOfferSnapshot{}, nil } @@ -93,7 +93,7 @@ func (BaseApplication) LoadSnapshotChunk(_ context.Context, _ *RequestLoadSnapsh return &ResponseLoadSnapshotChunk{}, nil } -func (BaseApplication) ApplySnapshotChunk(_ context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { +func (BaseApplication) ApplySnapshotChunk(_ context.Context, _req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { return &ResponseApplySnapshotChunk{}, nil } @@ -111,8 +111,9 @@ func (BaseApplication) PrepareProposal(_ context.Context, req *RequestPreparePro }) } return &ResponsePrepareProposal{TxRecords: trs, - AppHash: make([]byte, crypto.DefaultAppHashSize), - TxResults: txResults(req.Txs), + AppHash: make([]byte, crypto.DefaultAppHashSize), + TxResults: txResults(req.Txs), + AppVersion: 1, }, nil } @@ -132,7 +133,7 @@ func (BaseApplication) ProcessProposal(_ context.Context, req *RequestProcessPro }, nil } -func (BaseApplication) FinalizeBlock(_ context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) { +func (BaseApplication) FinalizeBlock(_ context.Context, _req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) { return &ResponseFinalizeBlock{}, nil } diff --git a/abci/types/mocks/application.go b/abci/types/mocks/application.go index e7767821d9..04ca3c5777 100644 --- a/abci/types/mocks/application.go +++ b/abci/types/mocks/application.go @@ -18,6 +18,10 @@ type Application struct { func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ApplySnapshotChunk") + } + var r0 *types.ResponseApplySnapshotChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)); ok { @@ -44,6 +48,10 @@ func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.Reques func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } + var r0 *types.ResponseCheckTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) (*types.ResponseCheckTx, error)); ok { @@ -70,6 +78,10 @@ func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) ( func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ExtendVote") + } + var r0 *types.ResponseExtendVote var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) (*types.ResponseExtendVote, error)); ok { @@ -96,6 +108,10 @@ func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendV func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for FinalizeBlock") + } + var r0 *types.ResponseFinalizeBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)); ok { @@ -122,6 +138,10 @@ func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFina func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Info") + } + var r0 *types.ResponseInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) (*types.ResponseInfo, error)); ok { @@ -148,6 +168,10 @@ func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for InitChain") + } + var r0 *types.ResponseInitChain var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) (*types.ResponseInitChain, error)); ok { @@ -174,6 +198,10 @@ func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChai func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ListSnapshots") + } + var r0 *types.ResponseListSnapshots var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) (*types.ResponseListSnapshots, error)); ok { @@ -200,6 +228,10 @@ func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestList func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for LoadSnapshotChunk") + } + var r0 *types.ResponseLoadSnapshotChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)); ok { @@ -226,6 +258,10 @@ func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.Request func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for OfferSnapshot") + } + var r0 *types.ResponseOfferSnapshot var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)); ok { @@ -252,6 +288,10 @@ func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOffe func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for PrepareProposal") + } + var r0 *types.ResponsePrepareProposal var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error)); ok { @@ -278,6 +318,10 @@ func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPr func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ProcessProposal") + } + var r0 *types.ResponseProcessProposal var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) (*types.ResponseProcessProposal, error)); ok { @@ -304,6 +348,10 @@ func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestPr func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Query") + } + var r0 *types.ResponseQuery var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) (*types.ResponseQuery, error)); ok { @@ -330,6 +378,10 @@ func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*typ func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for VerifyVoteExtension") + } + var r0 *types.ResponseVerifyVoteExtension var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error)); ok { diff --git a/abci/types/types.go b/abci/types/types.go index 3d82902597..f9623c9709 100644 --- a/abci/types/types.go +++ b/abci/types/types.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/gogo/protobuf/jsonpb" + "github.com/rs/zerolog" "github.com/dashpay/tenderdash/crypto" cryptoenc "github.com/dashpay/tenderdash/crypto/encoding" @@ -234,10 +235,9 @@ func (m *ValidatorUpdate) UnmarshalJSON(b []byte) error { // non-deterministic fields. The input response is not modified. func deterministicExecTxResult(response *ExecTxResult) *ExecTxResult { return &ExecTxResult{ - Code: response.Code, - Data: response.Data, - GasWanted: response.GasWanted, - GasUsed: response.GasUsed, + Code: response.Code, + Data: response.Data, + GasUsed: response.GasUsed, } } @@ -272,10 +272,71 @@ func (m *ResponsePrepareProposal) Validate() error { if !isValidApphash(m.AppHash) { return fmt.Errorf("apphash (%X) of size %d is invalid", m.AppHash, len(m.AppHash)) } + if m.AppVersion == 0 { + return fmt.Errorf("app version cannot be 0") + } return nil } +type Misbehaviors []Misbehavior + +func (m Misbehaviors) MarshalZerologArray(e *zerolog.Array) { + for v := range m { + e.Interface(v) + } +} + +type Txs [][]byte + +func (b Txs) MarshalZerologArray(a *zerolog.Array) { + for _, bs := range b { + a.Hex(crypto.Checksum(bs)[:8]) + } +} + +func (txr *TxRecord) MarshalZerologObject(e *zerolog.Event) { + e.Str("action", txr.Action.String()) + e.Hex("tx", crypto.Checksum(txr.Tx)[:8]) +} + +func (r *RequestPrepareProposal) MarshalZerologObject(e *zerolog.Event) { + e.Int64("max_tx_bytes", r.MaxTxBytes) + e.Array("txs", Txs(r.Txs)) + e.Interface("last_commit", r.LocalLastCommit) + e.Array("misbehavior", Misbehaviors(r.Misbehavior)) + e.Time("proposed_time", r.Time) + + e.Int64("height", r.Height) + e.Int32("round", r.Round) + + e.Hex("next_validators_hash", r.NextValidatorsHash) + e.Uint32("core_chain_locked_height", r.CoreChainLockedHeight) + e.Hex("proposer_pro_tx_hash", r.ProposerProTxHash) + e.Uint64("proposed_app_version", r.ProposedAppVersion) + e.Str("version", r.Version.String()) + e.Hex("quorum_hash", r.QuorumHash) +} + +func (r *RequestProcessProposal) MarshalZerologObject(e *zerolog.Event) { + e.Array("txs", Txs(r.Txs)) + e.Interface("last_commit", r.ProposedLastCommit.String()) + e.Array("misbehavior", Misbehaviors(r.Misbehavior)) + e.Time("proposed_time", r.Time) + + e.Hex("block_hash", r.Hash) + e.Int64("height", r.Height) + e.Int32("round", r.Round) + + e.Hex("next_validators_hash", r.NextValidatorsHash) + e.Uint32("core_chain_locked_height", r.CoreChainLockedHeight) + e.Interface("core_chain_lock_update", r.CoreChainLockUpdate) + e.Hex("proposer_pro_tx_hash", r.ProposerProTxHash) + e.Uint64("proposed_app_version", r.ProposedAppVersion) + e.Str("version", r.Version.String()) + e.Hex("quorum_hash", r.QuorumHash) +} + func isValidApphash(apphash tmbytes.HexBytes) bool { return len(apphash) == crypto.DefaultAppHashSize } @@ -311,3 +372,23 @@ func (m *RequestFinalizeBlock) ToCanonicalVote() (types.CanonicalVote, error) { } return cv, nil } + +// Convert to proto.types.VoteExtension. +// Signature field will be nil, as ExtendVoteExtension doesn't have it. +func (m *ExtendVoteExtension) ToVoteExtension() types.VoteExtension { + ve := types.VoteExtension{ + Type: m.Type, + Extension: m.Extension, + } + + // workaround for a bug in gogoproto + if m.XSignRequestId != nil { + src := m.GetSignRequestId() + + ve.XSignRequestId = &types.VoteExtension_SignRequestId{ + SignRequestId: bytes.Clone(src), + } + } + + return ve +} diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 5b4fd87a45..7513d79ee8 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -231,6 +231,7 @@ const ( TxRecord_UNMODIFIED TxRecord_TxAction = 1 TxRecord_ADDED TxRecord_TxAction = 2 TxRecord_REMOVED TxRecord_TxAction = 3 + TxRecord_DELAYED TxRecord_TxAction = 4 ) var TxRecord_TxAction_name = map[int32]string{ @@ -238,6 +239,7 @@ var TxRecord_TxAction_name = map[int32]string{ 1: "UNMODIFIED", 2: "ADDED", 3: "REMOVED", + 4: "DELAYED", } var TxRecord_TxAction_value = map[string]int32{ @@ -245,6 +247,7 @@ var TxRecord_TxAction_value = map[string]int32{ "UNMODIFIED": 1, "ADDED": 2, "REMOVED": 3, + "DELAYED": 4, } func (x TxRecord_TxAction) String() string { @@ -599,7 +602,7 @@ var xxx_messageInfo_RequestFlush proto.InternalMessageInfo // // Used to sync Tenderdash with the application during a handshake that happens on startup. // The returned app_version will be included in the Header of every block. -// Tenderdsah expects last_block_app_hash and last_block_height to be updated during Commit, +// Tenderdash expects last_block_app_hash and last_block_height to be updated during Commit, // ensuring that Commit is never called twice for the same block height. type RequestInfo struct { Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` @@ -1195,7 +1198,7 @@ func (m *RequestApplySnapshotChunk) GetSender() string { // their propose timeout goes off. // - As a result of executing the prepared proposal, the Application may produce header events or transaction events. // The Application must keep those events until a block is decided and then pass them on to Tenderdash via -// `ResponseFinalizeBlock`. +// `ResponsePrepareProposal`. // - As a sanity check, Tenderdash will check the returned parameters for validity if the Application modified them. // In particular, `ResponsePrepareProposal.tx_records` will be deemed invalid if // - There is a duplicate transaction in the list. @@ -1267,6 +1270,7 @@ type RequestPrepareProposal struct { // Proposer's latest available app protocol version. ProposedAppVersion uint64 `protobuf:"varint,11,opt,name=proposed_app_version,json=proposedAppVersion,proto3" json:"proposed_app_version,omitempty"` // App and block version used to generate the block. + // App version included in the block can be modified by setting ResponsePrepareProposal.app_version. Version *version.Consensus `protobuf:"bytes,12,opt,name=version,proto3" json:"version,omitempty"` // quorum_hash contains hash of validator quorum that will sign the block QuorumHash []byte `protobuf:"bytes,13,opt,name=quorum_hash,json=quorumHash,proto3" json:"quorum_hash,omitempty"` @@ -1466,6 +1470,7 @@ type RequestProcessProposal struct { // Proposer's latest available app protocol version. ProposedAppVersion uint64 `protobuf:"varint,12,opt,name=proposed_app_version,json=proposedAppVersion,proto3" json:"proposed_app_version,omitempty"` // App and block version used to generate the block. + // App version MUST be verified by the app. Version *version.Consensus `protobuf:"bytes,13,opt,name=version,proto3" json:"version,omitempty"` // quorum_hash contains hash of validator quorum that will sign the block QuorumHash []byte `protobuf:"bytes,14,opt,name=quorum_hash,json=quorumHash,proto3" json:"quorum_hash,omitempty"` @@ -1817,8 +1822,8 @@ func (m *RequestVerifyVoteExtension) GetVoteExtensions() []*ExtendVoteExtension // - The application must execute the transactions in full, in the order they appear in `RequestFinalizeBlock.txs`, // before returning control to Tenderdash. Alternatively, it can commit the candidate state corresponding to the same block // previously executed via `PrepareProposal` or `ProcessProposal`. -// - `ResponseFinalizeBlock.tx_results[i].Code == 0` only if the _i_-th transaction is fully valid. -// - Application is expected to persist its state at the end of this call, before calling `ResponseFinalizeBlock`. +// - If ProcessProposal for the same arguments have succeeded, FinalizeBlock MUST always succeed. +// - Application is expected to persist its state at the end of this call, before returning `ResponseFinalizeBlock`. // - Later calls to `Query` can return proofs about the application state anchored // in this Merkle root hash. // - Use `ResponseFinalizeBlock.retain_height` with caution! If all nodes in the network remove historical @@ -2906,6 +2911,8 @@ type ResponsePrepareProposal struct { CoreChainLockUpdate *types1.CoreChainLock `protobuf:"bytes,5,opt,name=core_chain_lock_update,json=coreChainLockUpdate,proto3" json:"core_chain_lock_update,omitempty"` // Changes to validator set that will be applied at next height. ValidatorSetUpdate *ValidatorSetUpdate `protobuf:"bytes,6,opt,name=validator_set_update,json=validatorSetUpdate,proto3" json:"validator_set_update,omitempty"` + // Application version that was used to create the current proposal. + AppVersion uint64 `protobuf:"varint,7,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` } func (m *ResponsePrepareProposal) Reset() { *m = ResponsePrepareProposal{} } @@ -2983,6 +2990,13 @@ func (m *ResponsePrepareProposal) GetValidatorSetUpdate() *ValidatorSetUpdate { return nil } +func (m *ResponsePrepareProposal) GetAppVersion() uint64 { + if m != nil { + return m.AppVersion + } + return 0 +} + type ResponseProcessProposal struct { // `enum` that signals if the application finds the proposal valid. Status ResponseProcessProposal_ProposalStatus `protobuf:"varint,1,opt,name=status,proto3,enum=tendermint.abci.ResponseProcessProposal_ProposalStatus" json:"status,omitempty"` @@ -2994,6 +3008,8 @@ type ResponseProcessProposal struct { ConsensusParamUpdates *types1.ConsensusParams `protobuf:"bytes,4,opt,name=consensus_param_updates,json=consensusParamUpdates,proto3" json:"consensus_param_updates,omitempty"` // Changes to validator set (set voting power to 0 to remove). ValidatorSetUpdate *ValidatorSetUpdate `protobuf:"bytes,5,opt,name=validator_set_update,json=validatorSetUpdate,proto3" json:"validator_set_update,omitempty"` + // Type & Key-Value events for indexing + Events []Event `protobuf:"bytes,6,rep,name=events,proto3" json:"events,omitempty"` } func (m *ResponseProcessProposal) Reset() { *m = ResponseProcessProposal{} } @@ -3064,13 +3080,26 @@ func (m *ResponseProcessProposal) GetValidatorSetUpdate() *ValidatorSetUpdate { return nil } -// Provides a vote extension for signing. Each field is mandatory for filling +func (m *ResponseProcessProposal) GetEvents() []Event { + if m != nil { + return m.Events + } + return nil +} + +// Provides a vote extension for signing. `type` and `extension` fields are mandatory for filling type ExtendVoteExtension struct { - // Vote extension type can be either DEFAULT or THRESHOLD_RECOVER. - // The Tenderdash supports only THRESHOLD_RECOVER at this moment. + // Vote extension type can be either DEFAULT, THRESHOLD_RECOVER or THRESHOLD_RECOVER_RAW. + // The Tenderdash supports only THRESHOLD_RECOVER and THRESHOLD_RECOVER_RAW at this moment. Type types1.VoteExtensionType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.VoteExtensionType" json:"type,omitempty"` // Deterministic or (Non-Deterministic) extension provided by the sending validator's Application. + // + // For THRESHOLD_RECOVER_RAW, it MUST be 32 bytes. Extension []byte `protobuf:"bytes,2,opt,name=extension,proto3" json:"extension,omitempty"` + // Types that are valid to be assigned to XSignRequestId: + // + // *ExtendVoteExtension_SignRequestId + XSignRequestId isExtendVoteExtension_XSignRequestId `protobuf_oneof:"_sign_request_id"` } func (m *ExtendVoteExtension) Reset() { *m = ExtendVoteExtension{} } @@ -3106,6 +3135,25 @@ func (m *ExtendVoteExtension) XXX_DiscardUnknown() { var xxx_messageInfo_ExtendVoteExtension proto.InternalMessageInfo +type isExtendVoteExtension_XSignRequestId interface { + isExtendVoteExtension_XSignRequestId() + MarshalTo([]byte) (int, error) + Size() int +} + +type ExtendVoteExtension_SignRequestId struct { + SignRequestId []byte `protobuf:"bytes,3,opt,name=sign_request_id,json=signRequestId,proto3,oneof" json:"sign_request_id,omitempty"` +} + +func (*ExtendVoteExtension_SignRequestId) isExtendVoteExtension_XSignRequestId() {} + +func (m *ExtendVoteExtension) GetXSignRequestId() isExtendVoteExtension_XSignRequestId { + if m != nil { + return m.XSignRequestId + } + return nil +} + func (m *ExtendVoteExtension) GetType() types1.VoteExtensionType { if m != nil { return m.Type @@ -3120,6 +3168,20 @@ func (m *ExtendVoteExtension) GetExtension() []byte { return nil } +func (m *ExtendVoteExtension) GetSignRequestId() []byte { + if x, ok := m.GetXSignRequestId().(*ExtendVoteExtension_SignRequestId); ok { + return x.SignRequestId + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*ExtendVoteExtension) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*ExtendVoteExtension_SignRequestId)(nil), + } +} + type ResponseExtendVote struct { VoteExtensions []*ExtendVoteExtension `protobuf:"bytes,1,rep,name=vote_extensions,json=voteExtensions,proto3" json:"vote_extensions,omitempty"` } @@ -3209,8 +3271,6 @@ func (m *ResponseVerifyVoteExtension) GetStatus() ResponseVerifyVoteExtension_Ve } type ResponseFinalizeBlock struct { - // Type & Key-Value events for indexing - Events []Event `protobuf:"bytes,1,rep,name=events,proto3" json:"events,omitempty"` // Blocks below this height may be removed. Defaults to `0` (retain all). RetainHeight int64 `protobuf:"varint,2,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` } @@ -3248,13 +3308,6 @@ func (m *ResponseFinalizeBlock) XXX_DiscardUnknown() { var xxx_messageInfo_ResponseFinalizeBlock proto.InternalMessageInfo -func (m *ResponseFinalizeBlock) GetEvents() []Event { - if m != nil { - return m.Events - } - return nil -} - func (m *ResponseFinalizeBlock) GetRetainHeight() int64 { if m != nil { return m.RetainHeight @@ -3450,12 +3503,11 @@ func (m *EventAttribute) GetIndex() bool { // ExecTxResult contains results of executing one individual transaction. type ExecTxResult struct { - Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - GasWanted int64 `protobuf:"varint,5,opt,name=gas_wanted,json=gasWanted,proto3" json:"gas_wanted,omitempty"` - GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,json=gasUsed,proto3" json:"gas_used,omitempty"` // Type & Key-Value events for indexing transactions (e.g. by account). Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"` Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"` @@ -3522,13 +3574,6 @@ func (m *ExecTxResult) GetInfo() string { return "" } -func (m *ExecTxResult) GetGasWanted() int64 { - if m != nil { - return m.GasWanted - } - return 0 -} - func (m *ExecTxResult) GetGasUsed() int64 { if m != nil { return m.GasUsed @@ -3798,6 +3843,21 @@ func (m *ValidatorUpdate) GetNodeAddress() string { return "" } +// ValidatorSetUpdate represents a change in the validator set. +// It can be used to add, remove, or update a validator. +// +// Validator set update consists of multiple ValidatorUpdate records, +// each of them can be used to add, remove, or update a validator, according to the +// following rules: +// +// 1. If a validator with the same public key already exists in the validator set +// and power is greater than 0, the existing validator will be updated with the new power. +// 2. If a validator with the same public key already exists in the validator set +// and power is 0, the existing validator will be removed from the validator set. +// 3. If a validator with the same public key does not exist in the validator set and the power is greater than 0, +// a new validator will be added to the validator set. +// 4. As a special case, if quorum hash has changed, all existing validators will be removed before applying +// the new validator set update. type ValidatorSetUpdate struct { ValidatorUpdates []ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates"` ThresholdPublicKey crypto.PublicKey `protobuf:"bytes,2,opt,name=threshold_public_key,json=thresholdPublicKey,proto3" json:"threshold_public_key"` @@ -4275,233 +4335,237 @@ func init() { func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) } var fileDescriptor_252557cfdd89a31a = []byte{ - // 3618 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0x3d, 0x70, 0x1b, 0xd7, - 0xf1, 0xc7, 0xe1, 0x1b, 0x8b, 0x4f, 0x3e, 0x52, 0x12, 0x04, 0x49, 0x24, 0x7d, 0xfa, 0xdb, 0x92, - 0xf5, 0xb7, 0x49, 0x5b, 0x8a, 0x2d, 0x3b, 0x76, 0x92, 0x01, 0x41, 0x28, 0xa0, 0x44, 0x91, 0xf4, - 0x11, 0xa4, 0xc7, 0x71, 0xec, 0x9b, 0x23, 0xf0, 0x08, 0x9c, 0x05, 0xe0, 0xce, 0x77, 0x07, 0x0a, - 0x74, 0x9b, 0x38, 0x85, 0x2b, 0x77, 0xa9, 0x5c, 0xa6, 0x4c, 0x93, 0x2a, 0x93, 0x22, 0x99, 0x74, - 0xce, 0xa4, 0x71, 0x99, 0x26, 0x8a, 0x47, 0x6e, 0x32, 0xe9, 0x52, 0xa5, 0xcb, 0x64, 0xde, 0xc7, - 0x7d, 0x02, 0x87, 0x0f, 0xcb, 0x33, 0x99, 0x74, 0x78, 0xfb, 0x76, 0xf7, 0xde, 0xc7, 0xbe, 0xdd, - 0x7d, 0xbf, 0x7d, 0x80, 0x2b, 0x16, 0x1e, 0xb4, 0xb1, 0xd1, 0x57, 0x07, 0xd6, 0xa6, 0x72, 0xd2, - 0x52, 0x37, 0xad, 0x73, 0x1d, 0x9b, 0x1b, 0xba, 0xa1, 0x59, 0x1a, 0x2a, 0xba, 0x9d, 0x1b, 0xa4, - 0xb3, 0x72, 0xcd, 0xc3, 0xdd, 0x32, 0xce, 0x75, 0x4b, 0xdb, 0xd4, 0x0d, 0x4d, 0x3b, 0x65, 0xfc, - 0x15, 0xaf, 0x32, 0xaa, 0x67, 0xb3, 0xad, 0x98, 0x5d, 0xde, 0x79, 0x75, 0xac, 0xf3, 0xa4, 0xa7, - 0xb5, 0x1e, 0x85, 0xf6, 0x7a, 0x06, 0xe2, 0xeb, 0xe5, 0xdf, 0x7d, 0x84, 0xcf, 0xed, 0xde, 0x6b, - 0x63, 0xb2, 0xba, 0x62, 0x28, 0x7d, 0xbb, 0x7b, 0xd5, 0xd3, 0x7d, 0x86, 0x0d, 0x53, 0xd5, 0x06, - 0x3e, 0xe5, 0x6b, 0x1d, 0x4d, 0xeb, 0xf4, 0xf0, 0x26, 0x6d, 0x9d, 0x0c, 0x4f, 0x37, 0x2d, 0xb5, - 0x8f, 0x4d, 0x4b, 0xe9, 0xeb, 0x9c, 0x61, 0xa5, 0xa3, 0x75, 0x34, 0xfa, 0x73, 0x93, 0xfc, 0x62, - 0x54, 0xf1, 0xd3, 0x0c, 0xa4, 0x24, 0xfc, 0xf1, 0x10, 0x9b, 0x16, 0xba, 0x0d, 0x71, 0xdc, 0xea, - 0x6a, 0x65, 0x61, 0x5d, 0xb8, 0x99, 0xbd, 0x7d, 0x75, 0x23, 0xb0, 0x6e, 0x1b, 0x9c, 0xaf, 0xde, - 0xea, 0x6a, 0x8d, 0x88, 0x44, 0x79, 0xd1, 0x6b, 0x90, 0x38, 0xed, 0x0d, 0xcd, 0x6e, 0x39, 0x4a, - 0x85, 0xae, 0x85, 0x09, 0xdd, 0x23, 0x4c, 0x8d, 0x88, 0xc4, 0xb8, 0xc9, 0xa7, 0xd4, 0xc1, 0xa9, - 0x56, 0x8e, 0x4d, 0xff, 0xd4, 0xce, 0xe0, 0x94, 0x7e, 0x8a, 0xf0, 0xa2, 0x2d, 0x00, 0x75, 0xa0, - 0x5a, 0x72, 0xab, 0xab, 0xa8, 0x83, 0x72, 0x9c, 0x4a, 0x3e, 0x17, 0x2e, 0xa9, 0x5a, 0x35, 0xc2, - 0xd8, 0x88, 0x48, 0x19, 0xd5, 0x6e, 0x90, 0xe1, 0x7e, 0x3c, 0xc4, 0xc6, 0x79, 0x39, 0x31, 0x7d, - 0xb8, 0xef, 0x10, 0x26, 0x32, 0x5c, 0xca, 0x8d, 0xde, 0x86, 0x74, 0xab, 0x8b, 0x5b, 0x8f, 0x64, - 0x6b, 0x54, 0x4e, 0x51, 0xc9, 0xb5, 0x30, 0xc9, 0x1a, 0xe1, 0x6b, 0x8e, 0x1a, 0x11, 0x29, 0xd5, - 0x62, 0x3f, 0xd1, 0x1e, 0x14, 0x7a, 0xaa, 0x69, 0xc9, 0xe6, 0x40, 0xd1, 0xcd, 0xae, 0x66, 0x99, - 0xe5, 0x2c, 0xd5, 0xf1, 0x7c, 0x98, 0x8e, 0x5d, 0xd5, 0xb4, 0x0e, 0x6d, 0xe6, 0x46, 0x44, 0xca, - 0xf7, 0xbc, 0x04, 0xa2, 0x4f, 0x3b, 0x3d, 0xc5, 0x86, 0xa3, 0xb0, 0x9c, 0x9b, 0xae, 0x6f, 0x9f, - 0x70, 0xdb, 0xf2, 0x44, 0x9f, 0xe6, 0x25, 0xa0, 0xf7, 0x61, 0xb9, 0xa7, 0x29, 0x6d, 0x47, 0x9d, - 0xdc, 0xea, 0x0e, 0x07, 0x8f, 0xca, 0x79, 0xaa, 0xf4, 0xc5, 0xd0, 0x41, 0x6a, 0x4a, 0xdb, 0x56, - 0x51, 0x23, 0x02, 0x8d, 0x88, 0xb4, 0xd4, 0x0b, 0x12, 0xd1, 0x87, 0xb0, 0xa2, 0xe8, 0x7a, 0xef, - 0x3c, 0xa8, 0xbd, 0x40, 0xb5, 0xdf, 0x0a, 0xd3, 0x5e, 0x25, 0x32, 0x41, 0xf5, 0x48, 0x19, 0xa3, - 0xa2, 0x26, 0x94, 0x74, 0x03, 0xeb, 0x8a, 0x81, 0x65, 0xdd, 0xd0, 0x74, 0xcd, 0x54, 0x7a, 0xe5, - 0x22, 0xd5, 0x7d, 0x23, 0x4c, 0xf7, 0x01, 0xe3, 0x3f, 0xe0, 0xec, 0x8d, 0x88, 0x54, 0xd4, 0xfd, - 0x24, 0xa6, 0x55, 0x6b, 0x61, 0xd3, 0x74, 0xb5, 0x96, 0x66, 0x69, 0xa5, 0xfc, 0x7e, 0xad, 0x3e, - 0x12, 0xaa, 0x43, 0x16, 0x8f, 0x88, 0xb8, 0x7c, 0xa6, 0x59, 0xb8, 0xbc, 0x44, 0x15, 0x8a, 0xa1, - 0xe7, 0x8c, 0xb2, 0x1e, 0x6b, 0x16, 0x6e, 0x44, 0x24, 0xc0, 0x4e, 0x0b, 0x29, 0x70, 0xe1, 0x0c, - 0x1b, 0xea, 0xe9, 0x39, 0x55, 0x23, 0xd3, 0x1e, 0xe2, 0x0f, 0xca, 0x88, 0x2a, 0xfc, 0xff, 0x30, - 0x85, 0xc7, 0x54, 0x88, 0xa8, 0xa8, 0xdb, 0x22, 0x8d, 0x88, 0xb4, 0x7c, 0x36, 0x4e, 0x26, 0x26, - 0x76, 0xaa, 0x0e, 0x94, 0x9e, 0xfa, 0x09, 0x96, 0xa9, 0x83, 0x2b, 0x2f, 0x4f, 0x37, 0xb1, 0x7b, - 0x9c, 0x7b, 0x8b, 0x30, 0x13, 0x13, 0x3b, 0xf5, 0x12, 0xb6, 0x52, 0x90, 0x38, 0x53, 0x7a, 0x43, - 0x7c, 0x3f, 0x9e, 0x4e, 0x96, 0x52, 0xf7, 0xe3, 0xe9, 0x74, 0x29, 0x73, 0x3f, 0x9e, 0xce, 0x94, - 0xe0, 0x7e, 0x3c, 0x0d, 0xa5, 0xac, 0x78, 0x03, 0xb2, 0x1e, 0xf7, 0x82, 0xca, 0x90, 0xea, 0x63, - 0xd3, 0x54, 0x3a, 0x98, 0x7a, 0xa3, 0x8c, 0x64, 0x37, 0xc5, 0x02, 0xe4, 0xbc, 0x2e, 0x45, 0xfc, - 0x5c, 0x70, 0x24, 0x89, 0xb7, 0x20, 0x92, 0xdc, 0x3d, 0xda, 0x92, 0xbc, 0x89, 0xae, 0x43, 0x9e, - 0x4e, 0x45, 0xb6, 0xfb, 0x89, 0xcb, 0x8a, 0x4b, 0x39, 0x4a, 0x3c, 0xe6, 0x4c, 0x6b, 0x90, 0xd5, - 0x6f, 0xeb, 0x0e, 0x4b, 0x8c, 0xb2, 0x80, 0x7e, 0x5b, 0xb7, 0x19, 0x9e, 0x83, 0x1c, 0x99, 0xb7, - 0xc3, 0x11, 0xa7, 0x1f, 0xc9, 0x12, 0x1a, 0x67, 0x11, 0x7f, 0x1e, 0x83, 0x52, 0xd0, 0x0d, 0xa1, - 0x37, 0x20, 0x4e, 0x3c, 0x32, 0x77, 0xae, 0x95, 0x0d, 0xe6, 0xae, 0x37, 0x6c, 0x77, 0xbd, 0xd1, - 0xb4, 0xdd, 0xf5, 0x56, 0xfa, 0xcb, 0x27, 0x6b, 0x91, 0xcf, 0xff, 0xb6, 0x26, 0x48, 0x54, 0x02, - 0x5d, 0x26, 0xce, 0x47, 0x51, 0x07, 0xb2, 0xda, 0xa6, 0x43, 0xce, 0x10, 0xcf, 0xa2, 0xa8, 0x83, - 0x9d, 0x36, 0xda, 0x85, 0x52, 0x4b, 0x1b, 0x98, 0x78, 0x60, 0x0e, 0x4d, 0x99, 0x85, 0x0b, 0xee, - 0x52, 0x7d, 0x8e, 0x91, 0xc5, 0x89, 0x9a, 0xcd, 0x79, 0x40, 0x19, 0xa5, 0x62, 0xcb, 0x4f, 0x40, - 0x7b, 0x90, 0x3f, 0x53, 0x7a, 0x6a, 0x5b, 0xb1, 0x34, 0x43, 0x36, 0xb1, 0xc5, 0x7d, 0xec, 0xf5, - 0xb1, 0x3d, 0x3f, 0xb6, 0xb9, 0x0e, 0xb1, 0x75, 0xa4, 0xb7, 0x15, 0x0b, 0x6f, 0xc5, 0xbf, 0x7c, - 0xb2, 0x26, 0x48, 0xb9, 0x33, 0x4f, 0x0f, 0x7a, 0x01, 0x8a, 0x8a, 0xae, 0xcb, 0xa6, 0xa5, 0x58, - 0x58, 0x3e, 0x39, 0xb7, 0xb0, 0x49, 0xdd, 0x6e, 0x4e, 0xca, 0x2b, 0xba, 0x7e, 0x48, 0xa8, 0x5b, - 0x84, 0x88, 0x9e, 0x87, 0x02, 0xf1, 0xd0, 0xaa, 0xd2, 0x93, 0xbb, 0x58, 0xed, 0x74, 0xad, 0x72, - 0x72, 0x5d, 0xb8, 0x19, 0x93, 0xf2, 0x9c, 0xda, 0xa0, 0x44, 0xb4, 0x01, 0xcb, 0x36, 0x5b, 0x4b, - 0x33, 0xb0, 0xcd, 0x4b, 0xfc, 0x71, 0x5e, 0x5a, 0xe2, 0x5d, 0x35, 0xcd, 0xc0, 0x8c, 0x5f, 0x6c, - 0x3b, 0x96, 0x42, 0xbd, 0x39, 0x42, 0x10, 0x6f, 0x2b, 0x96, 0x42, 0x77, 0x20, 0x27, 0xd1, 0xdf, - 0x84, 0xa6, 0x2b, 0x56, 0x97, 0xaf, 0x2b, 0xfd, 0x8d, 0x2e, 0x42, 0x92, 0xab, 0x8e, 0xd1, 0x61, - 0xf0, 0x16, 0x5a, 0x81, 0x84, 0x6e, 0x68, 0x67, 0x98, 0x2e, 0x4b, 0x5a, 0x62, 0x0d, 0x51, 0x82, - 0x82, 0xdf, 0xf3, 0xa3, 0x02, 0x44, 0xad, 0x11, 0xff, 0x4a, 0xd4, 0x1a, 0xa1, 0x57, 0x20, 0x4e, - 0x36, 0x80, 0x7e, 0xa3, 0x30, 0x21, 0xd6, 0x71, 0xb9, 0xe6, 0xb9, 0x8e, 0x25, 0xca, 0x29, 0x5e, - 0x84, 0x95, 0x49, 0x91, 0x40, 0xec, 0x3a, 0x74, 0x9f, 0x47, 0x47, 0xaf, 0x41, 0xda, 0x09, 0x05, - 0xcc, 0xbe, 0x2e, 0x8f, 0x7d, 0xc5, 0x66, 0x96, 0x1c, 0x56, 0x62, 0x58, 0x64, 0x7f, 0xba, 0x0a, - 0x0f, 0xdf, 0x39, 0x29, 0xa5, 0xe8, 0x7a, 0x43, 0x31, 0xbb, 0x62, 0x07, 0xca, 0x61, 0x6e, 0xde, - 0xb3, 0x3e, 0x02, 0x3d, 0x1d, 0xf6, 0xfa, 0x78, 0x4e, 0x5e, 0x94, 0xee, 0x89, 0x73, 0xf2, 0xa8, - 0x05, 0x0f, 0x07, 0x8f, 0x88, 0x05, 0xc7, 0xd8, 0x87, 0x68, 0x7b, 0xa7, 0x2d, 0xb6, 0xe1, 0x72, - 0xa8, 0xc7, 0xf7, 0xc9, 0x09, 0x3e, 0x39, 0xb2, 0x19, 0x2c, 0x8e, 0xb0, 0x81, 0xb3, 0x06, 0x19, - 0x9a, 0x49, 0xe7, 0x4d, 0x3f, 0x93, 0x91, 0x78, 0x4b, 0xfc, 0x67, 0x1c, 0x2e, 0x4e, 0x76, 0xfe, - 0x68, 0x1d, 0x72, 0x7d, 0x65, 0x24, 0x5b, 0x23, 0x6e, 0xa1, 0x02, 0xdd, 0x73, 0xe8, 0x2b, 0xa3, - 0xe6, 0x88, 0x99, 0x67, 0x09, 0x62, 0xd6, 0xc8, 0x2c, 0x47, 0xd7, 0x63, 0x37, 0x73, 0x12, 0xf9, - 0x89, 0x1e, 0xc2, 0x52, 0x4f, 0x6b, 0x29, 0x3d, 0xb9, 0xa7, 0x98, 0x96, 0xdc, 0xd2, 0xfa, 0x7d, - 0xd5, 0xe2, 0xe7, 0xee, 0xca, 0xf8, 0xf6, 0xd2, 0x6e, 0xe2, 0x9b, 0xe8, 0x21, 0x89, 0x48, 0x45, - 0x2a, 0xbb, 0xab, 0x98, 0x16, 0xeb, 0x42, 0xdb, 0x90, 0xed, 0xab, 0xe6, 0x09, 0xee, 0x2a, 0x67, - 0xaa, 0x66, 0x94, 0xe3, 0xeb, 0xb1, 0x89, 0x39, 0xd1, 0x43, 0x97, 0x87, 0x6b, 0xf2, 0x8a, 0x79, - 0xb6, 0x25, 0xe1, 0x33, 0x5b, 0xdb, 0xf1, 0x24, 0x17, 0x76, 0x3c, 0xaf, 0xc0, 0xca, 0x00, 0x8f, - 0x2c, 0xd9, 0x39, 0xd4, 0x26, 0xb3, 0x95, 0x14, 0x5d, 0x72, 0x44, 0xfa, 0x1c, 0x4f, 0x60, 0x12, - 0xb3, 0x21, 0xbb, 0x62, 0x68, 0xc3, 0x41, 0xbb, 0x9c, 0x5e, 0x17, 0x6e, 0x26, 0x24, 0xd6, 0x40, - 0x77, 0xa1, 0x4c, 0x0f, 0x2c, 0xf3, 0x62, 0xc4, 0xdb, 0xe2, 0xb6, 0x7d, 0x7a, 0x33, 0xd4, 0x52, - 0x2e, 0x90, 0x7e, 0xea, 0x27, 0x77, 0x69, 0x2f, 0x3f, 0xf1, 0x9b, 0xb0, 0xc2, 0xa2, 0x2f, 0x36, - 0x48, 0x18, 0x26, 0x9b, 0x44, 0x07, 0x00, 0x74, 0x00, 0x4b, 0x76, 0xdf, 0x81, 0xa1, 0x35, 0x47, - 0xf4, 0xfb, 0xaf, 0x38, 0x02, 0x6d, 0x99, 0x98, 0xb6, 0x6d, 0x8f, 0x59, 0x6a, 0xa8, 0xc8, 0xee, - 0xab, 0xea, 0x8e, 0x3b, 0xbf, 0xeb, 0x1a, 0x6d, 0x6e, 0x3c, 0x25, 0xe4, 0x5d, 0xae, 0xeb, 0x74, - 0x6d, 0x7a, 0x0d, 0xb2, 0x1f, 0x0f, 0x35, 0x63, 0xd8, 0x67, 0x43, 0xca, 0xd3, 0x21, 0x01, 0x23, - 0xd1, 0x23, 0xf4, 0x87, 0x84, 0xc7, 0xe6, 0xfc, 0x79, 0x00, 0xb7, 0x28, 0xc1, 0xb5, 0xa8, 0x43, - 0xcf, 0xc0, 0xbd, 0x46, 0x15, 0x9d, 0xd7, 0xa8, 0x9c, 0xb9, 0x85, 0xdb, 0x55, 0xec, 0xdb, 0xd9, - 0x15, 0x82, 0x38, 0x9d, 0x61, 0x9c, 0xb9, 0x4d, 0xf2, 0x3b, 0xd4, 0xd6, 0x9c, 0xfd, 0x4f, 0x7a, - 0xf7, 0xdf, 0xb6, 0xc0, 0xd4, 0x77, 0x66, 0x81, 0xe9, 0x50, 0x0b, 0xfc, 0xd6, 0xb6, 0xd6, 0x84, - 0x8b, 0x01, 0x41, 0x79, 0x48, 0x43, 0x1b, 0xb5, 0xb6, 0x40, 0xc2, 0x6f, 0x07, 0x54, 0x8f, 0x22, - 0x69, 0xd9, 0xa7, 0x97, 0x85, 0xc5, 0x50, 0x0b, 0xce, 0x2e, 0x6a, 0xc1, 0xb9, 0x79, 0x2c, 0x38, - 0xff, 0x2c, 0x16, 0x5c, 0x18, 0xb3, 0xe0, 0x23, 0x58, 0x1a, 0x4b, 0x45, 0x1d, 0x73, 0x10, 0x26, - 0x9a, 0x43, 0x74, 0xb2, 0x39, 0xc4, 0x3c, 0xe6, 0x20, 0x7e, 0x2d, 0x40, 0x25, 0x3c, 0x23, 0x9d, - 0xf8, 0x81, 0x57, 0xe1, 0x82, 0x9b, 0x99, 0x78, 0xd7, 0x91, 0x79, 0x7f, 0xe4, 0x74, 0xba, 0x0b, - 0x39, 0x25, 0x8a, 0xb3, 0x31, 0xc5, 0xbd, 0x26, 0xfa, 0x10, 0x8a, 0xfe, 0x5c, 0x9a, 0xa4, 0x2a, - 0xe4, 0xb8, 0xfc, 0xdf, 0xd8, 0x71, 0x71, 0xd7, 0xc2, 0x19, 0xb3, 0x54, 0x38, 0xf3, 0x36, 0x4d, - 0xf1, 0xcf, 0x51, 0x27, 0x52, 0xfb, 0x12, 0x63, 0xf4, 0x26, 0x24, 0xf9, 0xc9, 0x16, 0xe6, 0x3d, - 0xd9, 0x5c, 0x20, 0x78, 0x9a, 0xa3, 0xcf, 0x76, 0x9a, 0x63, 0x13, 0xb7, 0x2f, 0x3e, 0x79, 0xa9, - 0x12, 0xde, 0xa5, 0x7a, 0x19, 0x12, 0xec, 0x46, 0xc0, 0x02, 0xca, 0xa5, 0xf1, 0x73, 0x41, 0xa7, - 0x2a, 0x31, 0x2e, 0x54, 0x85, 0x34, 0xcb, 0xba, 0xd5, 0x36, 0x77, 0x00, 0x97, 0x43, 0x24, 0x76, - 0xb6, 0xb7, 0xb2, 0x4f, 0x9f, 0xac, 0xa5, 0x78, 0x43, 0x4a, 0x51, 0xb9, 0x9d, 0xb6, 0xf8, 0xc7, - 0x0c, 0xa4, 0x25, 0x6c, 0xea, 0xc4, 0x84, 0xd1, 0x16, 0x64, 0xf0, 0xa8, 0x85, 0x75, 0xcb, 0xce, - 0xf0, 0x27, 0xdf, 0xa0, 0x18, 0x77, 0xdd, 0xe6, 0x6c, 0x44, 0x24, 0x57, 0x0c, 0xdd, 0xe1, 0x40, - 0x47, 0x38, 0x66, 0xc1, 0xc5, 0xbd, 0x48, 0xc7, 0xeb, 0x36, 0xd2, 0xc1, 0x02, 0xfd, 0x6a, 0xa8, - 0x54, 0x00, 0xea, 0xb8, 0xc3, 0xa1, 0x8e, 0xf8, 0x8c, 0x8f, 0xf9, 0xb0, 0x8e, 0x9a, 0x0f, 0xeb, - 0x48, 0xcc, 0x98, 0x66, 0x08, 0xd8, 0xf1, 0xba, 0x0d, 0x76, 0x24, 0x67, 0x8c, 0x38, 0x80, 0x76, - 0xfc, 0x60, 0x0c, 0xed, 0x58, 0x0f, 0x15, 0x9d, 0x00, 0x77, 0xec, 0x8f, 0xc1, 0x1d, 0x69, 0xaa, - 0xe4, 0x85, 0x50, 0x25, 0x33, 0xf0, 0x8e, 0xfd, 0x31, 0xbc, 0x23, 0x33, 0x43, 0xe1, 0x0c, 0xc0, - 0xe3, 0xa7, 0x93, 0x01, 0x0f, 0x08, 0x85, 0x24, 0xf8, 0x30, 0xe7, 0x43, 0x3c, 0xe4, 0x10, 0xc4, - 0x23, 0x1b, 0x7a, 0x3b, 0x67, 0xea, 0xe7, 0x86, 0x3c, 0x8e, 0x26, 0x40, 0x1e, 0x2c, 0x79, 0xb9, - 0x19, 0xaa, 0x7c, 0x0e, 0xcc, 0xe3, 0x68, 0x02, 0xe6, 0x91, 0x9f, 0xa9, 0x76, 0x26, 0xe8, 0x71, - 0xcf, 0x0f, 0x7a, 0x14, 0x42, 0xee, 0x94, 0xee, 0x91, 0x0d, 0x41, 0x3d, 0x4e, 0xc2, 0x50, 0x0f, - 0x86, 0xf6, 0xbc, 0x14, 0xaa, 0x71, 0x01, 0xd8, 0x63, 0x7f, 0x0c, 0xf6, 0x28, 0xcd, 0xb0, 0xb4, - 0x39, 0x71, 0x0f, 0xf1, 0x45, 0x12, 0x4b, 0x03, 0x4e, 0x89, 0x38, 0x58, 0x6c, 0x18, 0x9a, 0xc1, - 0x91, 0x0a, 0xd6, 0x10, 0x6f, 0x92, 0x7b, 0xab, 0xeb, 0x80, 0xa6, 0x60, 0x21, 0x45, 0xc8, 0xfb, - 0x9c, 0x8e, 0xf8, 0x5b, 0xc1, 0x95, 0xa5, 0x68, 0x88, 0xf7, 0xce, 0x9b, 0xe1, 0x77, 0xde, 0xc0, - 0x3d, 0x2d, 0xe3, 0xcb, 0x08, 0xbc, 0x39, 0x07, 0x07, 0x3f, 0x14, 0x37, 0xd7, 0xb8, 0x05, 0x4b, - 0x34, 0x3b, 0x65, 0x1e, 0xdd, 0x17, 0x34, 0x8a, 0xa4, 0x83, 0xad, 0x02, 0x8b, 0x1e, 0x2f, 0xc3, - 0xb2, 0x87, 0xd7, 0xb9, 0x68, 0x32, 0x04, 0xa0, 0xe4, 0x70, 0x57, 0xf9, 0x8d, 0xf3, 0xef, 0x51, - 0x77, 0x85, 0x5c, 0xd4, 0x64, 0x12, 0xc0, 0x21, 0x7c, 0x6b, 0x80, 0x23, 0xfc, 0xc2, 0x8b, 0xde, - 0x87, 0x15, 0x1f, 0xf6, 0x61, 0x27, 0x7f, 0xb1, 0xc5, 0x20, 0x90, 0x88, 0x27, 0x17, 0x71, 0x7a, - 0xd0, 0x07, 0x70, 0x85, 0xa6, 0xb1, 0x21, 0x09, 0x66, 0x7c, 0xbe, 0x04, 0xf3, 0x12, 0xd1, 0x51, - 0x9b, 0x90, 0x64, 0x86, 0x00, 0x23, 0x89, 0x30, 0x60, 0xe4, 0x5f, 0x82, 0x6b, 0x37, 0x0e, 0x34, - 0xd2, 0xd2, 0xda, 0xcc, 0xbe, 0xf2, 0x12, 0xfd, 0x4d, 0x2e, 0x29, 0x3d, 0xad, 0xc3, 0x4d, 0x84, - 0xfc, 0x24, 0x5c, 0x0e, 0x68, 0x9f, 0xe1, 0x81, 0x6a, 0x05, 0x12, 0xea, 0xa0, 0x8d, 0x47, 0xdc, - 0x0a, 0x58, 0x83, 0xc8, 0x3e, 0xc2, 0xe7, 0x7c, 0xaf, 0xc9, 0x4f, 0xc2, 0x47, 0x0f, 0x02, 0x8d, - 0x45, 0x39, 0x89, 0x35, 0xd0, 0x1b, 0x90, 0xa1, 0x95, 0x17, 0x59, 0xd3, 0x4d, 0x1e, 0x6a, 0x7c, - 0x19, 0x11, 0xab, 0x92, 0x6c, 0x1c, 0x10, 0x9e, 0x7d, 0xdd, 0x94, 0xd2, 0x3a, 0xff, 0xe5, 0xc9, - 0x59, 0xd2, 0xbe, 0x9c, 0xe5, 0x2a, 0x64, 0xc8, 0xe8, 0x4d, 0x5d, 0x69, 0x61, 0x1a, 0x26, 0x32, - 0x92, 0x4b, 0x10, 0x7f, 0x2f, 0x40, 0x31, 0x10, 0xb9, 0x26, 0xce, 0xdd, 0x3e, 0x36, 0x51, 0x3f, - 0x54, 0x34, 0x36, 0xfb, 0x6b, 0x00, 0x1d, 0xc5, 0x94, 0x1f, 0x2b, 0x03, 0x0b, 0xb7, 0xf9, 0x12, - 0x64, 0x3a, 0x8a, 0xf9, 0x2e, 0x25, 0xf8, 0x07, 0x93, 0x08, 0x0c, 0xc6, 0x03, 0x56, 0x24, 0xbd, - 0x60, 0x05, 0xaa, 0x40, 0x5a, 0x37, 0x54, 0xcd, 0x50, 0xad, 0x73, 0xba, 0x26, 0x31, 0xc9, 0x69, - 0x8b, 0x07, 0x70, 0x61, 0x62, 0xd0, 0x44, 0x77, 0x21, 0xe3, 0xc6, 0x5b, 0x81, 0xe6, 0x86, 0x53, - 0x30, 0x20, 0x97, 0x97, 0x2c, 0xc9, 0x85, 0x89, 0x61, 0x13, 0xd5, 0x21, 0x69, 0x60, 0x73, 0xd8, - 0x63, 0xb9, 0x6a, 0xe1, 0xf6, 0xcb, 0xf3, 0x85, 0x5b, 0x42, 0x1d, 0xf6, 0x2c, 0x89, 0x0b, 0x8b, - 0x1f, 0x42, 0x92, 0x51, 0x50, 0x16, 0x52, 0x47, 0x7b, 0x0f, 0xf6, 0xf6, 0xdf, 0xdd, 0x2b, 0x45, - 0x10, 0x40, 0xb2, 0x5a, 0xab, 0xd5, 0x0f, 0x9a, 0x25, 0x01, 0x65, 0x20, 0x51, 0xdd, 0xda, 0x97, - 0x9a, 0xa5, 0x28, 0x21, 0x4b, 0xf5, 0xfb, 0xf5, 0x5a, 0xb3, 0x14, 0x43, 0x4b, 0x90, 0x67, 0xbf, - 0xe5, 0x7b, 0xfb, 0xd2, 0xc3, 0x6a, 0xb3, 0x14, 0xf7, 0x90, 0x0e, 0xeb, 0x7b, 0xdb, 0x75, 0xa9, - 0x94, 0x10, 0x5f, 0x85, 0xcb, 0xa1, 0x01, 0xda, 0x85, 0x89, 0x04, 0x0f, 0x4c, 0x24, 0x7e, 0x15, - 0x25, 0x37, 0x90, 0xb0, 0xa8, 0x8b, 0xee, 0x07, 0x26, 0x7e, 0x7b, 0x81, 0x90, 0x1d, 0x98, 0x3d, - 0x7a, 0x1e, 0x0a, 0x06, 0x3e, 0xc5, 0x56, 0xab, 0xcb, 0xb2, 0x00, 0x1b, 0x47, 0xca, 0x73, 0x2a, - 0x15, 0x32, 0x19, 0xdb, 0x47, 0xb8, 0x65, 0xc9, 0xcc, 0x08, 0x4c, 0x7a, 0x5b, 0xcf, 0x10, 0x36, - 0x42, 0x3d, 0x64, 0x44, 0xe2, 0xa0, 0x99, 0x23, 0x61, 0xaa, 0xe2, 0x54, 0x15, 0x50, 0xbf, 0x40, - 0x29, 0xe2, 0xe3, 0x85, 0x16, 0x3b, 0x03, 0x09, 0xa9, 0xde, 0x94, 0xde, 0x2b, 0xc5, 0x10, 0x82, - 0x02, 0xfd, 0x29, 0x1f, 0xee, 0x55, 0x0f, 0x0e, 0x1b, 0xfb, 0x64, 0xb1, 0x97, 0xa1, 0x68, 0x2f, - 0xb6, 0x4d, 0x4c, 0xa0, 0x0b, 0xb0, 0x54, 0xdb, 0x7f, 0x78, 0xb0, 0x5b, 0x6f, 0xd6, 0x5d, 0x72, - 0x52, 0xfc, 0x5d, 0x0c, 0x2e, 0x85, 0xe4, 0x1a, 0xe8, 0x0d, 0x00, 0x6b, 0x24, 0x1b, 0xb8, 0xa5, - 0x19, 0xed, 0x70, 0xe3, 0x6c, 0x8e, 0x24, 0xca, 0x21, 0x65, 0x2c, 0xfe, 0x6b, 0xaa, 0xc3, 0x7e, - 0x9b, 0x2b, 0x25, 0x93, 0x35, 0x39, 0xb6, 0x71, 0x6d, 0xc2, 0x65, 0x0d, 0xb7, 0x88, 0x62, 0xba, - 0x27, 0x54, 0x31, 0xe5, 0x47, 0xef, 0xc1, 0xa5, 0x40, 0x5c, 0xe1, 0xce, 0xd8, 0x9c, 0x54, 0x58, - 0x9c, 0x1c, 0x5e, 0x2e, 0xf8, 0xc3, 0x0b, 0x73, 0xc6, 0xe6, 0x14, 0x20, 0x21, 0xf1, 0x0c, 0x40, - 0x42, 0x58, 0x7c, 0x4a, 0x2e, 0x0a, 0xd1, 0x4f, 0x88, 0x4f, 0xe2, 0x6f, 0x7c, 0x9b, 0xe7, 0x4f, - 0xdf, 0xf6, 0x21, 0x69, 0x5a, 0x8a, 0x35, 0x34, 0xf9, 0x61, 0xb8, 0x3b, 0x6f, 0x2e, 0xb8, 0x61, - 0xff, 0x38, 0xa4, 0xe2, 0x12, 0x57, 0xf3, 0x3f, 0xb9, 0xa7, 0x61, 0xab, 0x9f, 0xf8, 0x2e, 0x56, - 0xff, 0x35, 0x28, 0xf8, 0x97, 0x2a, 0xfc, 0xec, 0xba, 0xde, 0x31, 0x2a, 0xf6, 0x60, 0x79, 0x02, - 0x14, 0x81, 0xee, 0xf2, 0x6a, 0x03, 0xdb, 0xad, 0xeb, 0xe3, 0x53, 0xf6, 0xb1, 0xbb, 0x45, 0x07, - 0x12, 0xac, 0xdc, 0x9c, 0x9a, 0x6d, 0x8c, 0x4b, 0x10, 0x5b, 0x80, 0xc6, 0x33, 0xf4, 0x49, 0xb0, - 0x89, 0xf0, 0x0c, 0xb0, 0xc9, 0xaf, 0x04, 0xb8, 0x32, 0x25, 0x6b, 0x47, 0xef, 0x04, 0x6c, 0xf1, - 0xcd, 0x45, 0x72, 0xfe, 0x0d, 0x46, 0xf3, 0x5b, 0xa3, 0x78, 0x07, 0x72, 0x5e, 0xfa, 0x7c, 0x4b, - 0xff, 0x0b, 0x4f, 0xcc, 0xf4, 0xe3, 0x3b, 0x0d, 0x48, 0xe2, 0x33, 0x3c, 0x70, 0x62, 0xf0, 0xc5, - 0xf1, 0x75, 0x20, 0xdd, 0x5b, 0x65, 0x92, 0x2b, 0xfe, 0xe3, 0xc9, 0x5a, 0x89, 0x71, 0xbf, 0xa4, - 0xf5, 0x55, 0x0b, 0xf7, 0x75, 0xeb, 0x5c, 0xe2, 0xf2, 0xe8, 0x3a, 0xe4, 0x0d, 0x6c, 0x11, 0x17, - 0xe2, 0x83, 0xd6, 0x72, 0x8c, 0xc8, 0x33, 0xb9, 0x3f, 0x09, 0x00, 0x2e, 0x60, 0xe4, 0x02, 0x36, - 0x82, 0x17, 0xb0, 0x09, 0xe0, 0x7c, 0xd1, 0x20, 0xce, 0x87, 0x6e, 0x40, 0x91, 0x25, 0xe9, 0xa6, - 0xda, 0x19, 0x28, 0xd6, 0xd0, 0xc0, 0x1c, 0x1e, 0x2a, 0x50, 0xf2, 0xa1, 0x4d, 0x45, 0xef, 0xc3, - 0x65, 0xab, 0x6b, 0x60, 0xb3, 0xab, 0xf5, 0xda, 0x72, 0x70, 0xe3, 0x59, 0xd9, 0x62, 0x6d, 0x86, - 0xc1, 0x49, 0x97, 0x1c, 0x0d, 0xc7, 0xfe, 0xcd, 0xff, 0x04, 0x12, 0x74, 0x6d, 0x48, 0xa2, 0xe5, - 0x58, 0x70, 0x86, 0x1b, 0xe7, 0x07, 0x00, 0x8a, 0x65, 0x19, 0xea, 0xc9, 0x90, 0x1c, 0xe7, 0xe8, - 0xf8, 0xa7, 0xdc, 0xb5, 0xad, 0xda, 0x7c, 0x5b, 0x57, 0xf9, 0x22, 0xaf, 0xb8, 0xa2, 0x9e, 0x85, - 0xf6, 0x28, 0x14, 0xf7, 0xa0, 0xe0, 0x97, 0xb5, 0x33, 0x58, 0x36, 0x06, 0x7f, 0x06, 0xcb, 0x32, - 0x62, 0x9e, 0xc1, 0x3a, 0xf9, 0x6f, 0x8c, 0x15, 0x05, 0x69, 0x43, 0xfc, 0xb7, 0x00, 0x39, 0xaf, - 0x9b, 0x9a, 0x3b, 0xc9, 0xe4, 0x49, 0x77, 0x6c, 0x3c, 0xe9, 0x8e, 0x87, 0xa6, 0x9d, 0x89, 0x60, - 0xda, 0x79, 0x19, 0xd2, 0xa4, 0x7b, 0x68, 0xe2, 0x36, 0xaf, 0xa4, 0xa6, 0x3a, 0x8a, 0x79, 0x64, - 0xe2, 0xb6, 0xc7, 0x3e, 0x53, 0xcf, 0x68, 0x9f, 0xbe, 0xdc, 0x36, 0x1d, 0x4c, 0xb4, 0x3f, 0x15, - 0x20, 0xed, 0x4c, 0xde, 0x5f, 0x30, 0xf4, 0xe1, 0x8b, 0x6c, 0xed, 0x58, 0xb9, 0x90, 0xdf, 0x1d, - 0x58, 0xf9, 0x34, 0xe6, 0x94, 0x4f, 0xdf, 0x72, 0xb2, 0xb1, 0x30, 0x04, 0xcd, 0xbb, 0xd2, 0x36, - 0x68, 0xca, 0x93, 0xcf, 0x5f, 0xf2, 0x71, 0x90, 0x74, 0x02, 0x7d, 0x1f, 0x92, 0x4a, 0xcb, 0xc1, - 0x0d, 0x0b, 0x13, 0x00, 0x35, 0x9b, 0x75, 0xa3, 0x39, 0xaa, 0x52, 0x4e, 0x89, 0x4b, 0xf0, 0x51, - 0x45, 0xed, 0x51, 0x89, 0x3f, 0x22, 0x7a, 0x19, 0x8f, 0xdf, 0x67, 0x14, 0x00, 0x8e, 0xf6, 0x1e, - 0xee, 0x6f, 0xef, 0xdc, 0xdb, 0xa9, 0x6f, 0xf3, 0x74, 0x6b, 0x7b, 0xbb, 0xbe, 0x5d, 0x8a, 0x12, - 0x3e, 0xa9, 0xfe, 0x70, 0xff, 0xb8, 0xbe, 0x5d, 0x8a, 0x89, 0x55, 0xc8, 0x38, 0x51, 0x82, 0x96, - 0x96, 0xb5, 0xc7, 0xd8, 0xe0, 0x0b, 0xc4, 0x1a, 0x68, 0x15, 0xb2, 0xe3, 0x58, 0x37, 0xb9, 0x30, - 0x31, 0x88, 0x5b, 0xfc, 0xb5, 0x00, 0x45, 0x47, 0x07, 0xcf, 0x13, 0xde, 0x82, 0x94, 0x3e, 0x3c, - 0x91, 0x6d, 0xdb, 0x0d, 0x20, 0xc4, 0xf6, 0x7d, 0x6a, 0x78, 0xd2, 0x53, 0x5b, 0x0f, 0xf0, 0x39, - 0x8f, 0x4a, 0x49, 0x7d, 0x78, 0xf2, 0x80, 0x99, 0x38, 0x1b, 0x46, 0x74, 0xca, 0x30, 0x62, 0x81, - 0x61, 0xa0, 0x1b, 0x90, 0x1b, 0x68, 0x6d, 0x2c, 0x2b, 0xed, 0xb6, 0x81, 0x4d, 0x16, 0x6c, 0x33, - 0x5c, 0x73, 0x96, 0xf4, 0x54, 0x59, 0x87, 0xf8, 0xb5, 0x00, 0x68, 0x3c, 0x32, 0xa2, 0x43, 0x58, - 0x72, 0x83, 0xab, 0x1d, 0xb1, 0x99, 0xfb, 0x5c, 0x0f, 0x8f, 0xac, 0xbe, 0x4b, 0x77, 0xe9, 0xcc, - 0x4f, 0x26, 0x59, 0xd8, 0x8a, 0xeb, 0xaa, 0x74, 0x3a, 0x5f, 0xba, 0x28, 0xd1, 0x39, 0x17, 0x25, - 0x22, 0x21, 0x47, 0xde, 0xe9, 0x09, 0xba, 0xd2, 0xd8, 0x58, 0xc9, 0x44, 0x87, 0x72, 0x73, 0x4c, - 0x8c, 0xcf, 0x33, 0x6c, 0x48, 0xc2, 0xb3, 0x0c, 0x49, 0xbc, 0x03, 0xa5, 0x77, 0x9c, 0xef, 0xf3, - 0x2f, 0x05, 0x86, 0x29, 0x8c, 0x0d, 0xf3, 0x0c, 0xd2, 0xc4, 0xfb, 0xd2, 0xa0, 0xf1, 0x43, 0xc8, - 0x38, 0xab, 0xe7, 0xbc, 0x4e, 0x09, 0x5d, 0x76, 0x3e, 0x12, 0x57, 0x04, 0xdd, 0x82, 0x25, 0x12, - 0x37, 0xec, 0xc2, 0x25, 0x83, 0xcd, 0xa2, 0xd4, 0x1b, 0x16, 0x59, 0xc7, 0xae, 0x8d, 0xf5, 0x90, - 0x00, 0x5f, 0x62, 0x89, 0x00, 0x6e, 0xff, 0x37, 0x06, 0x40, 0xee, 0x59, 0x01, 0xf4, 0x90, 0xed, - 0x61, 0xde, 0x97, 0x89, 0x88, 0x3f, 0x8b, 0x42, 0xd6, 0x53, 0x48, 0x41, 0xdf, 0xf3, 0x25, 0x55, - 0xeb, 0xd3, 0x8a, 0x2e, 0x9e, 0x8c, 0xca, 0x37, 0xb1, 0xe8, 0xe2, 0x13, 0x0b, 0x2b, 0x61, 0xd9, - 0xf5, 0xd4, 0xf8, 0xc2, 0xf5, 0xd4, 0x97, 0x00, 0x59, 0x9a, 0xa5, 0xf4, 0x48, 0xf0, 0x56, 0x07, - 0x1d, 0x99, 0x9d, 0x76, 0x16, 0x40, 0x4a, 0xb4, 0xe7, 0x98, 0x76, 0x1c, 0x10, 0xba, 0xd8, 0x83, - 0xb4, 0x03, 0x06, 0x2c, 0xfe, 0xe8, 0x63, 0x52, 0xdd, 0xb8, 0x02, 0xe9, 0x3e, 0xb6, 0x14, 0x1a, - 0xf6, 0x18, 0x38, 0xe4, 0xb4, 0x6f, 0xbd, 0x09, 0x59, 0xcf, 0x4b, 0x18, 0x12, 0x09, 0xf7, 0xea, - 0xef, 0x96, 0x22, 0x95, 0xd4, 0x67, 0x5f, 0xac, 0xc7, 0xf6, 0xf0, 0x63, 0xf2, 0x29, 0xa9, 0x5e, - 0x6b, 0xd4, 0x6b, 0x0f, 0x4a, 0x42, 0x25, 0xfb, 0xd9, 0x17, 0xeb, 0x29, 0x09, 0xd3, 0x9a, 0xc3, - 0xad, 0x07, 0x50, 0x0c, 0xec, 0x80, 0xdf, 0x27, 0x23, 0x28, 0x6c, 0x1f, 0x1d, 0xec, 0xee, 0xd4, - 0xaa, 0xcd, 0xba, 0x7c, 0xbc, 0xdf, 0xac, 0x97, 0x04, 0x74, 0x09, 0x96, 0x77, 0x77, 0x7e, 0xdc, - 0x68, 0xca, 0xb5, 0xdd, 0x9d, 0xfa, 0x5e, 0x53, 0xae, 0x36, 0x9b, 0xd5, 0xda, 0x83, 0x52, 0xf4, - 0xf6, 0x5f, 0x01, 0x8a, 0xd5, 0xad, 0xda, 0x0e, 0xb9, 0xdb, 0xab, 0x2d, 0x85, 0x7a, 0xf8, 0x1a, - 0xc4, 0x29, 0x14, 0x3b, 0xf5, 0x4d, 0x6c, 0x65, 0x7a, 0x21, 0x09, 0xdd, 0x83, 0x04, 0x45, 0x69, - 0xd1, 0xf4, 0x47, 0xb2, 0x95, 0x19, 0x95, 0x25, 0x32, 0x18, 0x7a, 0x6e, 0xa6, 0xbe, 0x9a, 0xad, - 0x4c, 0x2f, 0x34, 0xa1, 0x5d, 0x48, 0xd9, 0x00, 0xd8, 0xac, 0xa7, 0xac, 0x95, 0x99, 0xd5, 0x1f, - 0x32, 0x35, 0x06, 0x24, 0x4e, 0x7f, 0x50, 0x5b, 0x99, 0x51, 0x82, 0x42, 0x12, 0x64, 0x5c, 0xec, - 0x77, 0xf6, 0xdb, 0xde, 0xca, 0x1c, 0x25, 0x31, 0xf4, 0x21, 0xe4, 0xfd, 0x50, 0xd9, 0x7c, 0xcf, - 0x6e, 0x2b, 0x73, 0x96, 0xab, 0x88, 0x7e, 0x3f, 0x6e, 0x36, 0xdf, 0x33, 0xdc, 0xca, 0x9c, 0xd5, - 0x2b, 0xf4, 0x11, 0x2c, 0x8d, 0xe3, 0x5a, 0xf3, 0xbf, 0xca, 0xad, 0x2c, 0x50, 0xcf, 0x42, 0x7d, - 0x40, 0x13, 0xf0, 0xb0, 0x05, 0x1e, 0xe9, 0x56, 0x16, 0x29, 0x6f, 0xa1, 0x36, 0x14, 0x83, 0x58, - 0xd1, 0xbc, 0x8f, 0x76, 0x2b, 0x73, 0x97, 0xba, 0xd8, 0x57, 0xfc, 0xa0, 0xc6, 0xbc, 0x8f, 0x78, - 0x2b, 0x73, 0x57, 0xbe, 0xd0, 0x11, 0x80, 0xe7, 0x62, 0x3c, 0xc7, 0xa3, 0xde, 0xca, 0x3c, 0x35, - 0x30, 0xa4, 0xc3, 0xf2, 0xa4, 0x9b, 0xf0, 0x22, 0x6f, 0x7c, 0x2b, 0x0b, 0x95, 0xc6, 0x88, 0x3d, - 0xfb, 0xef, 0xb4, 0xf3, 0xbd, 0xf9, 0xad, 0xcc, 0x59, 0x23, 0xdb, 0xda, 0xfa, 0xf2, 0xe9, 0xaa, - 0xf0, 0xd5, 0xd3, 0x55, 0xe1, 0xeb, 0xa7, 0xab, 0xc2, 0xe7, 0xdf, 0xac, 0x46, 0xbe, 0xfa, 0x66, - 0x35, 0xf2, 0x97, 0x6f, 0x56, 0x23, 0x3f, 0xb9, 0xd9, 0x51, 0xad, 0xee, 0xf0, 0x64, 0xa3, 0xa5, - 0xf5, 0xe9, 0x5f, 0x2e, 0x74, 0xe5, 0x7c, 0x93, 0xe9, 0x24, 0x2d, 0xcf, 0x1f, 0x3b, 0x4e, 0x92, - 0x34, 0xd6, 0xdd, 0xf9, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x2e, 0x77, 0x7c, 0xf8, 0x31, + // 3682 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x5b, 0xcb, 0x73, 0x1b, 0x47, + 0x73, 0xc7, 0xe2, 0x8d, 0xc6, 0x6b, 0x39, 0xa4, 0x24, 0x08, 0x92, 0x48, 0x7a, 0x15, 0x5b, 0xb2, + 0x6c, 0x93, 0xb6, 0x14, 0x5b, 0x76, 0xec, 0xa4, 0x0a, 0x04, 0xa1, 0x80, 0x14, 0x45, 0xd2, 0x4b, + 0x90, 0x2e, 0xc7, 0xb1, 0xb7, 0x96, 0xc0, 0x90, 0x58, 0x0b, 0xc0, 0xae, 0x77, 0x17, 0x14, 0xe8, + 0x6b, 0xe2, 0xaa, 0x94, 0x4f, 0xfe, 0x07, 0x7c, 0x4b, 0x8e, 0xb9, 0xe7, 0x94, 0x54, 0x6e, 0x4e, + 0xe5, 0xe2, 0x63, 0xaa, 0x52, 0x51, 0x5c, 0xf2, 0x25, 0x95, 0x9b, 0x4f, 0xdf, 0xe5, 0x3b, 0x7c, + 0x35, 0x8f, 0x7d, 0x02, 0x8b, 0x87, 0xe5, 0xaa, 0xaf, 0xbe, 0x1b, 0xa6, 0xa7, 0xbb, 0x77, 0x1e, + 0x3d, 0xdd, 0x3d, 0xbf, 0x1e, 0xc0, 0x0d, 0x1b, 0x0f, 0x3a, 0xd8, 0xec, 0x6b, 0x03, 0x7b, 0x53, + 0x3d, 0x6d, 0x6b, 0x9b, 0xf6, 0xa5, 0x81, 0xad, 0x0d, 0xc3, 0xd4, 0x6d, 0x1d, 0x95, 0xbd, 0xce, + 0x0d, 0xd2, 0x59, 0xbd, 0xe5, 0xe3, 0x6e, 0x9b, 0x97, 0x86, 0xad, 0x6f, 0x1a, 0xa6, 0xae, 0x9f, + 0x31, 0xfe, 0xaa, 0x5f, 0x19, 0xd5, 0xb3, 0xd9, 0x51, 0xad, 0x2e, 0xef, 0xbc, 0x39, 0xd6, 0x79, + 0xda, 0xd3, 0xdb, 0x4f, 0x23, 0x7b, 0x7d, 0x03, 0x09, 0xf4, 0xf2, 0xef, 0x3e, 0xc5, 0x97, 0x4e, + 0xef, 0xad, 0x31, 0x59, 0x43, 0x35, 0xd5, 0xbe, 0xd3, 0xbd, 0xea, 0xeb, 0xbe, 0xc0, 0xa6, 0xa5, + 0xe9, 0x83, 0x80, 0xf2, 0xb5, 0x73, 0x5d, 0x3f, 0xef, 0xe1, 0x4d, 0xda, 0x3a, 0x1d, 0x9e, 0x6d, + 0xda, 0x5a, 0x1f, 0x5b, 0xb6, 0xda, 0x37, 0x38, 0xc3, 0xca, 0xb9, 0x7e, 0xae, 0xd3, 0x9f, 0x9b, + 0xe4, 0x17, 0xa3, 0x4a, 0xdf, 0xe4, 0x20, 0x23, 0xe3, 0xaf, 0x86, 0xd8, 0xb2, 0xd1, 0x7d, 0x48, + 0xe2, 0x76, 0x57, 0xaf, 0x08, 0xeb, 0xc2, 0xdd, 0xfc, 0xfd, 0x9b, 0x1b, 0xa1, 0x75, 0xdb, 0xe0, + 0x7c, 0x8d, 0x76, 0x57, 0x6f, 0xc6, 0x64, 0xca, 0x8b, 0xde, 0x85, 0xd4, 0x59, 0x6f, 0x68, 0x75, + 0x2b, 0x71, 0x2a, 0x74, 0x2b, 0x4a, 0xe8, 0x11, 0x61, 0x6a, 0xc6, 0x64, 0xc6, 0x4d, 0x3e, 0xa5, + 0x0d, 0xce, 0xf4, 0x4a, 0x62, 0xfa, 0xa7, 0x76, 0x06, 0x67, 0xf4, 0x53, 0x84, 0x17, 0x6d, 0x01, + 0x68, 0x03, 0xcd, 0x56, 0xda, 0x5d, 0x55, 0x1b, 0x54, 0x92, 0x54, 0xf2, 0x95, 0x68, 0x49, 0xcd, + 0xae, 0x13, 0xc6, 0x66, 0x4c, 0xce, 0x69, 0x4e, 0x83, 0x0c, 0xf7, 0xab, 0x21, 0x36, 0x2f, 0x2b, + 0xa9, 0xe9, 0xc3, 0xfd, 0x98, 0x30, 0x91, 0xe1, 0x52, 0x6e, 0xf4, 0x11, 0x64, 0xdb, 0x5d, 0xdc, + 0x7e, 0xaa, 0xd8, 0xa3, 0x4a, 0x86, 0x4a, 0xae, 0x45, 0x49, 0xd6, 0x09, 0x5f, 0x6b, 0xd4, 0x8c, + 0xc9, 0x99, 0x36, 0xfb, 0x89, 0xf6, 0xa1, 0xd4, 0xd3, 0x2c, 0x5b, 0xb1, 0x06, 0xaa, 0x61, 0x75, + 0x75, 0xdb, 0xaa, 0xe4, 0xa9, 0x8e, 0x57, 0xa3, 0x74, 0xec, 0x69, 0x96, 0x7d, 0xe4, 0x30, 0x37, + 0x63, 0x72, 0xb1, 0xe7, 0x27, 0x10, 0x7d, 0xfa, 0xd9, 0x19, 0x36, 0x5d, 0x85, 0x95, 0xc2, 0x74, + 0x7d, 0x07, 0x84, 0xdb, 0x91, 0x27, 0xfa, 0x74, 0x3f, 0x01, 0x7d, 0x06, 0xcb, 0x3d, 0x5d, 0xed, + 0xb8, 0xea, 0x94, 0x76, 0x77, 0x38, 0x78, 0x5a, 0x29, 0x52, 0xa5, 0xaf, 0x47, 0x0e, 0x52, 0x57, + 0x3b, 0x8e, 0x8a, 0x3a, 0x11, 0x68, 0xc6, 0xe4, 0xa5, 0x5e, 0x98, 0x88, 0xbe, 0x80, 0x15, 0xd5, + 0x30, 0x7a, 0x97, 0x61, 0xed, 0x25, 0xaa, 0xfd, 0x5e, 0x94, 0xf6, 0x1a, 0x91, 0x09, 0xab, 0x47, + 0xea, 0x18, 0x15, 0xb5, 0x40, 0x34, 0x4c, 0x6c, 0xa8, 0x26, 0x56, 0x0c, 0x53, 0x37, 0x74, 0x4b, + 0xed, 0x55, 0xca, 0x54, 0xf7, 0x9d, 0x28, 0xdd, 0x87, 0x8c, 0xff, 0x90, 0xb3, 0x37, 0x63, 0x72, + 0xd9, 0x08, 0x92, 0x98, 0x56, 0xbd, 0x8d, 0x2d, 0xcb, 0xd3, 0x2a, 0xce, 0xd2, 0x4a, 0xf9, 0x83, + 0x5a, 0x03, 0x24, 0xd4, 0x80, 0x3c, 0x1e, 0x11, 0x71, 0xe5, 0x42, 0xb7, 0x71, 0x65, 0x89, 0x2a, + 0x94, 0x22, 0xcf, 0x19, 0x65, 0x3d, 0xd1, 0x6d, 0xdc, 0x8c, 0xc9, 0x80, 0xdd, 0x16, 0x52, 0xe1, + 0xca, 0x05, 0x36, 0xb5, 0xb3, 0x4b, 0xaa, 0x46, 0xa1, 0x3d, 0xc4, 0x1f, 0x54, 0x10, 0x55, 0xf8, + 0x46, 0x94, 0xc2, 0x13, 0x2a, 0x44, 0x54, 0x34, 0x1c, 0x91, 0x66, 0x4c, 0x5e, 0xbe, 0x18, 0x27, + 0x13, 0x13, 0x3b, 0xd3, 0x06, 0x6a, 0x4f, 0xfb, 0x1a, 0x2b, 0xd4, 0xc1, 0x55, 0x96, 0xa7, 0x9b, + 0xd8, 0x23, 0xce, 0xbd, 0x45, 0x98, 0x89, 0x89, 0x9d, 0xf9, 0x09, 0x5b, 0x19, 0x48, 0x5d, 0xa8, + 0xbd, 0x21, 0xde, 0x4d, 0x66, 0xd3, 0x62, 0x66, 0x37, 0x99, 0xcd, 0x8a, 0xb9, 0xdd, 0x64, 0x36, + 0x27, 0xc2, 0x6e, 0x32, 0x0b, 0x62, 0x5e, 0xba, 0x03, 0x79, 0x9f, 0x7b, 0x41, 0x15, 0xc8, 0xf4, + 0xb1, 0x65, 0xa9, 0xe7, 0x98, 0x7a, 0xa3, 0x9c, 0xec, 0x34, 0xa5, 0x12, 0x14, 0xfc, 0x2e, 0x45, + 0xfa, 0x4e, 0x70, 0x25, 0x89, 0xb7, 0x20, 0x92, 0xdc, 0x3d, 0x3a, 0x92, 0xbc, 0x89, 0x6e, 0x43, + 0x91, 0x4e, 0x45, 0x71, 0xfa, 0x89, 0xcb, 0x4a, 0xca, 0x05, 0x4a, 0x3c, 0xe1, 0x4c, 0x6b, 0x90, + 0x37, 0xee, 0x1b, 0x2e, 0x4b, 0x82, 0xb2, 0x80, 0x71, 0xdf, 0x70, 0x18, 0x5e, 0x81, 0x02, 0x99, + 0xb7, 0xcb, 0x91, 0xa4, 0x1f, 0xc9, 0x13, 0x1a, 0x67, 0x91, 0xfe, 0x3e, 0x01, 0x62, 0xd8, 0x0d, + 0xa1, 0xf7, 0x21, 0x49, 0x3c, 0x32, 0x77, 0xae, 0xd5, 0x0d, 0xe6, 0xae, 0x37, 0x1c, 0x77, 0xbd, + 0xd1, 0x72, 0xdc, 0xf5, 0x56, 0xf6, 0x87, 0xe7, 0x6b, 0xb1, 0xef, 0xfe, 0x77, 0x4d, 0x90, 0xa9, + 0x04, 0xba, 0x4e, 0x9c, 0x8f, 0xaa, 0x0d, 0x14, 0xad, 0x43, 0x87, 0x9c, 0x23, 0x9e, 0x45, 0xd5, + 0x06, 0x3b, 0x1d, 0xb4, 0x07, 0x62, 0x5b, 0x1f, 0x58, 0x78, 0x60, 0x0d, 0x2d, 0x85, 0x85, 0x0b, + 0xee, 0x52, 0x03, 0x8e, 0x91, 0xc5, 0x89, 0xba, 0xc3, 0x79, 0x48, 0x19, 0xe5, 0x72, 0x3b, 0x48, + 0x40, 0xfb, 0x50, 0xbc, 0x50, 0x7b, 0x5a, 0x47, 0xb5, 0x75, 0x53, 0xb1, 0xb0, 0xcd, 0x7d, 0xec, + 0xed, 0xb1, 0x3d, 0x3f, 0x71, 0xb8, 0x8e, 0xb0, 0x7d, 0x6c, 0x74, 0x54, 0x1b, 0x6f, 0x25, 0x7f, + 0x78, 0xbe, 0x26, 0xc8, 0x85, 0x0b, 0x5f, 0x0f, 0x7a, 0x0d, 0xca, 0xaa, 0x61, 0x28, 0x96, 0xad, + 0xda, 0x58, 0x39, 0xbd, 0xb4, 0xb1, 0x45, 0xdd, 0x6e, 0x41, 0x2e, 0xaa, 0x86, 0x71, 0x44, 0xa8, + 0x5b, 0x84, 0x88, 0x5e, 0x85, 0x12, 0xf1, 0xd0, 0x9a, 0xda, 0x53, 0xba, 0x58, 0x3b, 0xef, 0xda, + 0x95, 0xf4, 0xba, 0x70, 0x37, 0x21, 0x17, 0x39, 0xb5, 0x49, 0x89, 0x68, 0x03, 0x96, 0x1d, 0xb6, + 0xb6, 0x6e, 0x62, 0x87, 0x97, 0xf8, 0xe3, 0xa2, 0xbc, 0xc4, 0xbb, 0xea, 0xba, 0x89, 0x19, 0xbf, + 0xd4, 0x71, 0x2d, 0x85, 0x7a, 0x73, 0x84, 0x20, 0xd9, 0x51, 0x6d, 0x95, 0xee, 0x40, 0x41, 0xa6, + 0xbf, 0x09, 0xcd, 0x50, 0xed, 0x2e, 0x5f, 0x57, 0xfa, 0x1b, 0x5d, 0x85, 0x34, 0x57, 0x9d, 0xa0, + 0xc3, 0xe0, 0x2d, 0xb4, 0x02, 0x29, 0xc3, 0xd4, 0x2f, 0x30, 0x5d, 0x96, 0xac, 0xcc, 0x1a, 0x92, + 0x0c, 0xa5, 0xa0, 0xe7, 0x47, 0x25, 0x88, 0xdb, 0x23, 0xfe, 0x95, 0xb8, 0x3d, 0x42, 0x6f, 0x43, + 0x92, 0x6c, 0x00, 0xfd, 0x46, 0x69, 0x42, 0xac, 0xe3, 0x72, 0xad, 0x4b, 0x03, 0xcb, 0x94, 0x53, + 0xba, 0x0a, 0x2b, 0x93, 0x22, 0x81, 0xd4, 0x75, 0xe9, 0x01, 0x8f, 0x8e, 0xde, 0x85, 0xac, 0x1b, + 0x0a, 0x98, 0x7d, 0x5d, 0x1f, 0xfb, 0x8a, 0xc3, 0x2c, 0xbb, 0xac, 0xc4, 0xb0, 0xc8, 0xfe, 0x74, + 0x55, 0x1e, 0xbe, 0x0b, 0x72, 0x46, 0x35, 0x8c, 0xa6, 0x6a, 0x75, 0xa5, 0x73, 0xa8, 0x44, 0xb9, + 0x79, 0xdf, 0xfa, 0x08, 0xf4, 0x74, 0x38, 0xeb, 0xe3, 0x3b, 0x79, 0x71, 0xba, 0x27, 0xee, 0xc9, + 0xa3, 0x16, 0x3c, 0x1c, 0x3c, 0x25, 0x16, 0x9c, 0x60, 0x1f, 0xa2, 0xed, 0x9d, 0x8e, 0xd4, 0x81, + 0xeb, 0x91, 0x1e, 0x3f, 0x20, 0x27, 0x04, 0xe4, 0xc8, 0x66, 0xb0, 0x38, 0xc2, 0x06, 0xce, 0x1a, + 0x64, 0x68, 0x16, 0x9d, 0x37, 0xfd, 0x4c, 0x4e, 0xe6, 0x2d, 0xe9, 0x97, 0x24, 0x5c, 0x9d, 0xec, + 0xfc, 0xd1, 0x3a, 0x14, 0xfa, 0xea, 0x48, 0xb1, 0x47, 0xdc, 0x42, 0x05, 0xba, 0xe7, 0xd0, 0x57, + 0x47, 0xad, 0x11, 0x33, 0x4f, 0x11, 0x12, 0xf6, 0xc8, 0xaa, 0xc4, 0xd7, 0x13, 0x77, 0x0b, 0x32, + 0xf9, 0x89, 0x9e, 0xc0, 0x52, 0x4f, 0x6f, 0xab, 0x3d, 0xa5, 0xa7, 0x5a, 0xb6, 0xd2, 0xd6, 0xfb, + 0x7d, 0xcd, 0xe6, 0xe7, 0xee, 0xc6, 0xf8, 0xf6, 0xd2, 0x6e, 0xe2, 0x9b, 0xe8, 0x21, 0x89, 0xc9, + 0x65, 0x2a, 0xbb, 0xa7, 0x5a, 0x36, 0xeb, 0x42, 0xdb, 0x90, 0xef, 0x6b, 0xd6, 0x29, 0xee, 0xaa, + 0x17, 0x9a, 0x6e, 0x56, 0x92, 0xeb, 0x89, 0x89, 0x39, 0xd1, 0x13, 0x8f, 0x87, 0x6b, 0xf2, 0x8b, + 0xf9, 0xb6, 0x25, 0x15, 0x30, 0x5b, 0xc7, 0xf1, 0xa4, 0x17, 0x76, 0x3c, 0x6f, 0xc3, 0xca, 0x00, + 0x8f, 0x6c, 0xc5, 0x3d, 0xd4, 0x16, 0xb3, 0x95, 0x0c, 0x5d, 0x72, 0x44, 0xfa, 0x5c, 0x4f, 0x60, + 0x11, 0xb3, 0x21, 0xbb, 0x62, 0xea, 0xc3, 0x41, 0xa7, 0x92, 0x5d, 0x17, 0xee, 0xa6, 0x64, 0xd6, + 0x40, 0x0f, 0xa1, 0x42, 0x0f, 0x2c, 0xf3, 0x62, 0xc4, 0xdb, 0xe2, 0x8e, 0x73, 0x7a, 0x73, 0xd4, + 0x52, 0xae, 0x90, 0x7e, 0xea, 0x27, 0xf7, 0x68, 0x2f, 0x3f, 0xf1, 0x9b, 0xb0, 0xc2, 0xa2, 0x2f, + 0x36, 0x49, 0x18, 0x26, 0x9b, 0x44, 0x07, 0x00, 0x74, 0x00, 0x4b, 0x4e, 0xdf, 0xa1, 0xa9, 0xb7, + 0x46, 0xf4, 0xfb, 0x6f, 0xbb, 0x02, 0x1d, 0x85, 0x98, 0xb6, 0x63, 0x8f, 0x79, 0x6a, 0xa8, 0xc8, + 0xe9, 0xab, 0x19, 0xae, 0x3b, 0x7f, 0xe8, 0x19, 0x6d, 0x61, 0x3c, 0x25, 0xe4, 0x5d, 0x9e, 0xeb, + 0xf4, 0x6c, 0x7a, 0x0d, 0xf2, 0x5f, 0x0d, 0x75, 0x73, 0xd8, 0x67, 0x43, 0x2a, 0xd2, 0x21, 0x01, + 0x23, 0xd1, 0x23, 0xf4, 0x6f, 0x29, 0x9f, 0xcd, 0x05, 0xf3, 0x00, 0x6e, 0x51, 0x82, 0x67, 0x51, + 0x47, 0xbe, 0x81, 0xfb, 0x8d, 0x2a, 0x3e, 0xaf, 0x51, 0xb9, 0x73, 0x8b, 0xb6, 0xab, 0xc4, 0xaf, + 0xb3, 0x2b, 0x04, 0x49, 0x3a, 0xc3, 0x24, 0x73, 0x9b, 0xe4, 0x77, 0xa4, 0xad, 0xb9, 0xfb, 0x9f, + 0xf6, 0xef, 0xbf, 0x63, 0x81, 0x99, 0xdf, 0xcc, 0x02, 0xb3, 0x91, 0x16, 0xf8, 0xab, 0x6d, 0xad, + 0x05, 0x57, 0x43, 0x82, 0xca, 0x90, 0x86, 0x36, 0x6a, 0x6d, 0xa1, 0x84, 0xdf, 0x09, 0xa8, 0x3e, + 0x45, 0xf2, 0x72, 0x40, 0x2f, 0x0b, 0x8b, 0x91, 0x16, 0x9c, 0x5f, 0xd4, 0x82, 0x0b, 0xf3, 0x58, + 0x70, 0xf1, 0x65, 0x2c, 0xb8, 0x34, 0x66, 0xc1, 0xc7, 0xb0, 0x34, 0x96, 0x8a, 0xba, 0xe6, 0x20, + 0x4c, 0x34, 0x87, 0xf8, 0x64, 0x73, 0x48, 0xf8, 0xcc, 0x41, 0xfa, 0x49, 0x80, 0x6a, 0x74, 0x46, + 0x3a, 0xf1, 0x03, 0xef, 0xc0, 0x15, 0x2f, 0x33, 0xf1, 0xaf, 0x23, 0xf3, 0xfe, 0xc8, 0xed, 0xf4, + 0x16, 0x72, 0x4a, 0x14, 0x67, 0x63, 0x4a, 0xfa, 0x4d, 0xf4, 0x09, 0x94, 0x83, 0xb9, 0x34, 0x49, + 0x55, 0xc8, 0x71, 0xf9, 0xb3, 0xb1, 0xe3, 0xe2, 0xad, 0x85, 0x3b, 0x66, 0xb9, 0x74, 0xe1, 0x6f, + 0x5a, 0xd2, 0x7f, 0xc6, 0xdd, 0x48, 0x1d, 0x48, 0x8c, 0xd1, 0x07, 0x90, 0xe6, 0x27, 0x5b, 0x98, + 0xf7, 0x64, 0x73, 0x81, 0xf0, 0x69, 0x8e, 0xbf, 0xdc, 0x69, 0x4e, 0x4c, 0xdc, 0xbe, 0xe4, 0xe4, + 0xa5, 0x4a, 0xf9, 0x97, 0xea, 0x2d, 0x48, 0xb1, 0x1b, 0x01, 0x0b, 0x28, 0xd7, 0xc6, 0xcf, 0x05, + 0x9d, 0xaa, 0xcc, 0xb8, 0x50, 0x0d, 0xb2, 0x2c, 0xeb, 0xd6, 0x3a, 0xdc, 0x01, 0x5c, 0x8f, 0x90, + 0xd8, 0xd9, 0xde, 0xca, 0xbf, 0x78, 0xbe, 0x96, 0xe1, 0x0d, 0x39, 0x43, 0xe5, 0x76, 0x3a, 0xd2, + 0xbf, 0xe7, 0x20, 0x2b, 0x63, 0xcb, 0x20, 0x26, 0x8c, 0xb6, 0x20, 0x87, 0x47, 0x6d, 0x6c, 0xd8, + 0x4e, 0x86, 0x3f, 0xf9, 0x06, 0xc5, 0xb8, 0x1b, 0x0e, 0x67, 0x33, 0x26, 0x7b, 0x62, 0xe8, 0x01, + 0x07, 0x3a, 0xa2, 0x31, 0x0b, 0x2e, 0xee, 0x47, 0x3a, 0xde, 0x73, 0x90, 0x0e, 0x16, 0xe8, 0x57, + 0x23, 0xa5, 0x42, 0x50, 0xc7, 0x03, 0x0e, 0x75, 0x24, 0x67, 0x7c, 0x2c, 0x80, 0x75, 0xd4, 0x03, + 0x58, 0x47, 0x6a, 0xc6, 0x34, 0x23, 0xc0, 0x8e, 0xf7, 0x1c, 0xb0, 0x23, 0x3d, 0x63, 0xc4, 0x21, + 0xb4, 0xe3, 0x2f, 0xc7, 0xd0, 0x8e, 0xf5, 0x48, 0xd1, 0x09, 0x70, 0xc7, 0xc1, 0x18, 0xdc, 0x91, + 0xa5, 0x4a, 0x5e, 0x8b, 0x54, 0x32, 0x03, 0xef, 0x38, 0x18, 0xc3, 0x3b, 0x72, 0x33, 0x14, 0xce, + 0x00, 0x3c, 0xfe, 0x76, 0x32, 0xe0, 0x01, 0x91, 0x90, 0x04, 0x1f, 0xe6, 0x7c, 0x88, 0x87, 0x12, + 0x81, 0x78, 0xe4, 0x23, 0x6f, 0xe7, 0x4c, 0xfd, 0xdc, 0x90, 0xc7, 0xf1, 0x04, 0xc8, 0x83, 0x25, + 0x2f, 0x77, 0x23, 0x95, 0xcf, 0x81, 0x79, 0x1c, 0x4f, 0xc0, 0x3c, 0x8a, 0x33, 0xd5, 0xce, 0x04, + 0x3d, 0x1e, 0x05, 0x41, 0x8f, 0x52, 0xc4, 0x9d, 0xd2, 0x3b, 0xb2, 0x11, 0xa8, 0xc7, 0x69, 0x14, + 0xea, 0xc1, 0xd0, 0x9e, 0x37, 0x23, 0x35, 0x2e, 0x00, 0x7b, 0x1c, 0x8c, 0xc1, 0x1e, 0xe2, 0x0c, + 0x4b, 0x9b, 0x13, 0xf7, 0x90, 0x5e, 0x27, 0xb1, 0x34, 0xe4, 0x94, 0x88, 0x83, 0xc5, 0xa6, 0xa9, + 0x9b, 0x1c, 0xa9, 0x60, 0x0d, 0xe9, 0x2e, 0xb9, 0xb7, 0x7a, 0x0e, 0x68, 0x0a, 0x16, 0x52, 0x86, + 0x62, 0xc0, 0xe9, 0x48, 0xff, 0x22, 0x78, 0xb2, 0x14, 0x0d, 0xf1, 0xdf, 0x79, 0x73, 0xfc, 0xce, + 0x1b, 0xba, 0xa7, 0xe5, 0x02, 0x19, 0x81, 0x3f, 0xe7, 0xe0, 0xe0, 0x87, 0xea, 0xe5, 0x1a, 0xf7, + 0x60, 0x89, 0x66, 0xa7, 0xcc, 0xa3, 0x07, 0x82, 0x46, 0x99, 0x74, 0xb0, 0x55, 0x60, 0xd1, 0xe3, + 0x2d, 0x58, 0xf6, 0xf1, 0xba, 0x17, 0x4d, 0x86, 0x00, 0x88, 0x2e, 0x77, 0x8d, 0xdf, 0x38, 0xff, + 0x2f, 0xee, 0xad, 0x90, 0x87, 0x9a, 0x4c, 0x02, 0x38, 0x84, 0x5f, 0x0d, 0x70, 0x44, 0x5f, 0x78, + 0xd1, 0x67, 0xb0, 0x12, 0xc0, 0x3e, 0x9c, 0xe4, 0x2f, 0xb1, 0x18, 0x04, 0x12, 0xf3, 0xe5, 0x22, + 0x6e, 0x0f, 0xfa, 0x1c, 0x6e, 0xd0, 0x34, 0x36, 0x22, 0xc1, 0x4c, 0xce, 0x97, 0x60, 0x5e, 0x23, + 0x3a, 0xea, 0x13, 0x92, 0xcc, 0x08, 0x60, 0x24, 0x15, 0x05, 0x8c, 0xfc, 0x4e, 0xf0, 0xec, 0xc6, + 0x85, 0x46, 0xda, 0x7a, 0x87, 0xd9, 0x57, 0x51, 0xa6, 0xbf, 0xc9, 0x25, 0xa5, 0xa7, 0x9f, 0x73, + 0x13, 0x21, 0x3f, 0x09, 0x97, 0x0b, 0xda, 0xe7, 0x78, 0xa0, 0x5a, 0x81, 0x94, 0x36, 0xe8, 0xe0, + 0x11, 0xb7, 0x02, 0xd6, 0x20, 0xb2, 0x4f, 0xf1, 0x25, 0xdf, 0x6b, 0xf2, 0x93, 0xf0, 0xd1, 0x83, + 0x40, 0x63, 0x51, 0x41, 0x66, 0x0d, 0xf4, 0x3e, 0xe4, 0x68, 0xe5, 0x45, 0xd1, 0x0d, 0x8b, 0x87, + 0x9a, 0x40, 0x46, 0xc4, 0xaa, 0x24, 0x1b, 0x87, 0x84, 0xe7, 0xc0, 0xb0, 0xe4, 0xac, 0xc1, 0x7f, + 0xf9, 0x72, 0x96, 0x6c, 0x20, 0x67, 0xb9, 0x09, 0x39, 0x32, 0x7a, 0xcb, 0x50, 0xdb, 0x98, 0x86, + 0x89, 0x9c, 0xec, 0x11, 0xa4, 0x7f, 0x15, 0xa0, 0x1c, 0x8a, 0x5c, 0x13, 0xe7, 0xee, 0x1c, 0x9b, + 0x78, 0x10, 0x2a, 0x1a, 0x9b, 0xfd, 0x2d, 0x80, 0x73, 0xd5, 0x52, 0x9e, 0xa9, 0x03, 0x1b, 0x77, + 0xf8, 0x12, 0xe4, 0xce, 0x55, 0xeb, 0x13, 0x4a, 0x08, 0x0e, 0x26, 0x15, 0x1a, 0x8c, 0x0f, 0xac, + 0x48, 0xfb, 0xc1, 0x0a, 0x54, 0x85, 0xac, 0x61, 0x6a, 0xba, 0xa9, 0xd9, 0x97, 0x74, 0x4d, 0x12, + 0xb2, 0xdb, 0x96, 0x0e, 0xe1, 0xca, 0xc4, 0xa0, 0x89, 0x1e, 0x42, 0xce, 0x8b, 0xb7, 0x02, 0xcd, + 0x0d, 0xa7, 0x60, 0x40, 0x1e, 0x2f, 0x59, 0x92, 0x2b, 0x13, 0xc3, 0x26, 0x6a, 0x40, 0xda, 0xc4, + 0xd6, 0xb0, 0xc7, 0x72, 0xd5, 0xd2, 0xfd, 0xb7, 0xe6, 0x0b, 0xb7, 0x84, 0x3a, 0xec, 0xd9, 0x32, + 0x17, 0x96, 0xbe, 0x80, 0x34, 0xa3, 0xa0, 0x3c, 0x64, 0x8e, 0xf7, 0x1f, 0xef, 0x1f, 0x7c, 0xb2, + 0x2f, 0xc6, 0x10, 0x40, 0xba, 0x56, 0xaf, 0x37, 0x0e, 0x5b, 0xa2, 0x80, 0x72, 0x90, 0xaa, 0x6d, + 0x1d, 0xc8, 0x2d, 0x31, 0x4e, 0xc8, 0x72, 0x63, 0xb7, 0x51, 0x6f, 0x89, 0x09, 0xb4, 0x04, 0x45, + 0xf6, 0x5b, 0x79, 0x74, 0x20, 0x3f, 0xa9, 0xb5, 0xc4, 0xa4, 0x8f, 0x74, 0xd4, 0xd8, 0xdf, 0x6e, + 0xc8, 0x62, 0x4a, 0x7a, 0x07, 0xae, 0x47, 0x06, 0x68, 0x0f, 0x26, 0x12, 0x7c, 0x30, 0x91, 0xf4, + 0x63, 0x9c, 0xdc, 0x40, 0xa2, 0xa2, 0x2e, 0xda, 0x0d, 0x4d, 0xfc, 0xfe, 0x02, 0x21, 0x3b, 0x34, + 0x7b, 0xf4, 0x2a, 0x94, 0x4c, 0x7c, 0x86, 0xed, 0x76, 0x97, 0x65, 0x01, 0x0e, 0x8e, 0x54, 0xe4, + 0x54, 0x2a, 0x64, 0x31, 0xb6, 0x2f, 0x71, 0xdb, 0x56, 0x98, 0x11, 0x58, 0xf4, 0xb6, 0x9e, 0x23, + 0x6c, 0x84, 0x7a, 0xc4, 0x88, 0xc4, 0x41, 0x33, 0x47, 0xc2, 0x54, 0x25, 0xa9, 0x2a, 0xa0, 0x7e, + 0x81, 0x52, 0xa4, 0x67, 0x0b, 0x2d, 0x76, 0x0e, 0x52, 0x72, 0xa3, 0x25, 0x7f, 0x2a, 0x26, 0x10, + 0x82, 0x12, 0xfd, 0xa9, 0x1c, 0xed, 0xd7, 0x0e, 0x8f, 0x9a, 0x07, 0x64, 0xb1, 0x97, 0xa1, 0xec, + 0x2c, 0xb6, 0x43, 0x4c, 0xa1, 0x2b, 0xb0, 0x54, 0x3f, 0x78, 0x72, 0xb8, 0xd7, 0x68, 0x35, 0x3c, + 0x72, 0x5a, 0xfa, 0xef, 0x04, 0x5c, 0x8b, 0xc8, 0x35, 0xd0, 0xfb, 0x00, 0xf6, 0x48, 0x31, 0x71, + 0x5b, 0x37, 0x3b, 0xd1, 0xc6, 0xd9, 0x1a, 0xc9, 0x94, 0x43, 0xce, 0xd9, 0xfc, 0xd7, 0x54, 0x87, + 0xfd, 0x11, 0x57, 0x4a, 0x26, 0x6b, 0x71, 0x6c, 0xe3, 0xd6, 0x84, 0xcb, 0x1a, 0x6e, 0x13, 0xc5, + 0x74, 0x4f, 0xa8, 0x62, 0xca, 0x8f, 0x3e, 0x85, 0x6b, 0xa1, 0xb8, 0xc2, 0x9d, 0xb1, 0x35, 0xa9, + 0xb0, 0x38, 0x39, 0xbc, 0x5c, 0x09, 0x86, 0x17, 0xe6, 0x8c, 0xad, 0x29, 0x40, 0x42, 0xea, 0x25, + 0x80, 0x84, 0xa8, 0xf8, 0x94, 0x5e, 0x14, 0xa2, 0x9f, 0x14, 0x9f, 0x42, 0x71, 0x3f, 0x13, 0x8e, + 0xfb, 0xd2, 0xef, 0x03, 0xbb, 0x1b, 0xcc, 0xef, 0x0e, 0x20, 0x6d, 0xd9, 0xaa, 0x3d, 0xb4, 0xf8, + 0x69, 0x79, 0x38, 0x6f, 0xb2, 0xb8, 0xe1, 0xfc, 0x38, 0xa2, 0xe2, 0x32, 0x57, 0xf3, 0x27, 0xb9, + 0xe9, 0x51, 0xdb, 0x93, 0xfa, 0x2d, 0xb6, 0xa7, 0x09, 0x69, 0x7c, 0x81, 0x07, 0xb6, 0x55, 0x49, + 0xd3, 0x19, 0x5f, 0x1d, 0x9f, 0x31, 0xe9, 0xde, 0xaa, 0x90, 0x04, 0xe4, 0xff, 0x9f, 0xaf, 0x89, + 0x8c, 0xfb, 0x4d, 0xbd, 0xaf, 0xd9, 0xb8, 0x6f, 0xd8, 0x97, 0x32, 0x97, 0x97, 0xde, 0x85, 0x52, + 0x70, 0xd1, 0xa3, 0xdd, 0x84, 0xe7, 0x88, 0xe3, 0xd2, 0x3f, 0x0b, 0xb0, 0x3c, 0x01, 0xf6, 0x40, + 0x0f, 0x79, 0x65, 0x83, 0x6d, 0xfc, 0xed, 0xf1, 0xd5, 0x0b, 0xb0, 0x7b, 0x05, 0x0e, 0x12, 0x18, + 0xbd, 0xfc, 0x9d, 0xed, 0xb1, 0x47, 0x40, 0x6f, 0x40, 0xd9, 0xd2, 0xce, 0x07, 0x8a, 0xc9, 0x10, + 0x14, 0xb7, 0x6a, 0x40, 0xd2, 0x6b, 0xd2, 0xe1, 0xd4, 0xd6, 0x3a, 0xff, 0x20, 0x08, 0x5b, 0x08, + 0x44, 0x25, 0xc4, 0x2d, 0xb5, 0x01, 0x8d, 0x5f, 0x27, 0x26, 0x61, 0x3c, 0xc2, 0x4b, 0x60, 0x3c, + 0xff, 0x24, 0xc0, 0x8d, 0x29, 0x57, 0x0c, 0xf4, 0x71, 0xe8, 0x5c, 0x7c, 0xb0, 0xc8, 0x05, 0x65, + 0x83, 0xd1, 0x82, 0x27, 0x43, 0x7a, 0x00, 0x05, 0x3f, 0x7d, 0xbe, 0xcd, 0xdb, 0xf5, 0xe2, 0x7b, + 0x10, 0x8b, 0xba, 0x0d, 0x45, 0x13, 0xdb, 0xc4, 0x49, 0x05, 0xc0, 0xbb, 0x02, 0x23, 0xb2, 0x5c, + 0x71, 0x37, 0x99, 0x15, 0xc4, 0xb8, 0x6b, 0x3f, 0xff, 0x21, 0x00, 0x78, 0x00, 0x95, 0x07, 0x10, + 0x09, 0x7e, 0x80, 0x28, 0x84, 0x2b, 0xc6, 0xc3, 0xb8, 0x22, 0xba, 0x03, 0x65, 0x76, 0x29, 0x20, + 0xfb, 0xa6, 0xda, 0x43, 0x13, 0x73, 0x38, 0xaa, 0x44, 0xc9, 0x47, 0x0e, 0x15, 0x7d, 0x06, 0xd7, + 0xed, 0xae, 0x89, 0xad, 0xae, 0xde, 0xeb, 0x28, 0xe1, 0xbd, 0x63, 0x65, 0x92, 0xb5, 0x19, 0x46, + 0x27, 0x5f, 0x73, 0x35, 0x9c, 0x04, 0xf7, 0xef, 0x6b, 0x48, 0xd1, 0x63, 0x43, 0x12, 0x3b, 0xd7, + 0x8a, 0x73, 0xdc, 0x40, 0x3f, 0x07, 0x50, 0x6d, 0xdb, 0xd4, 0x4e, 0x87, 0xc4, 0x3b, 0xc4, 0xc7, + 0x3f, 0xe5, 0x1d, 0xbb, 0x9a, 0xc3, 0xb7, 0x75, 0x93, 0x9f, 0xbf, 0x15, 0x4f, 0xd4, 0x77, 0x06, + 0x7d, 0x0a, 0xa5, 0x7d, 0x28, 0x05, 0x65, 0x9d, 0x8c, 0x99, 0x8d, 0x21, 0x98, 0x31, 0xb3, 0x0c, + 0x9c, 0x67, 0xcc, 0x6e, 0xbe, 0x9d, 0x60, 0x45, 0x48, 0xda, 0x90, 0x7e, 0x11, 0xa0, 0xe0, 0xf7, + 0x7a, 0x73, 0x27, 0xb5, 0x3c, 0xc9, 0x4f, 0x8c, 0x27, 0xf9, 0x49, 0x5f, 0x9a, 0x7b, 0x1d, 0xb2, + 0x24, 0xcd, 0x1d, 0x5a, 0xb8, 0xc3, 0x4b, 0xb3, 0x99, 0x73, 0xd5, 0x3a, 0xb6, 0x70, 0xc7, 0xe7, + 0x9b, 0x32, 0x2f, 0xe7, 0x9b, 0x82, 0xc9, 0x72, 0x36, 0x94, 0x2c, 0xef, 0x26, 0xb3, 0x29, 0x31, + 0x2d, 0xfb, 0xb2, 0x6d, 0xe9, 0x1b, 0x01, 0xb2, 0xee, 0x7c, 0x83, 0x35, 0xc9, 0x00, 0x84, 0xc9, + 0x96, 0x8b, 0x55, 0x24, 0xf9, 0xf5, 0x84, 0x55, 0x68, 0x13, 0x6e, 0x85, 0xf6, 0x43, 0x37, 0xe1, + 0x8b, 0x02, 0xe9, 0xfc, 0x8b, 0xeb, 0xe0, 0xb2, 0x3c, 0xbf, 0xfd, 0x47, 0x3e, 0x0e, 0x92, 0xb1, + 0xa0, 0xbf, 0x80, 0xb4, 0xda, 0x76, 0xa1, 0xc9, 0xd2, 0x04, 0xcc, 0xce, 0x61, 0xdd, 0x68, 0x8d, + 0x6a, 0x94, 0x53, 0xe6, 0x12, 0x7c, 0x54, 0x71, 0x67, 0x54, 0xd2, 0x1e, 0xd1, 0xcb, 0x78, 0x82, + 0x27, 0xbd, 0x04, 0x70, 0xbc, 0xff, 0xe4, 0x60, 0x7b, 0xe7, 0xd1, 0x4e, 0x63, 0x9b, 0x67, 0x74, + 0xdb, 0xdb, 0x8d, 0x6d, 0x31, 0x4e, 0xf8, 0xe4, 0xc6, 0x93, 0x83, 0x93, 0xc6, 0xb6, 0x98, 0x20, + 0x8d, 0xed, 0xc6, 0x5e, 0xed, 0xd3, 0xc6, 0xb6, 0x98, 0x94, 0x6a, 0x90, 0x73, 0x83, 0x0e, 0x2d, + 0x65, 0xeb, 0xcf, 0xb0, 0xc9, 0x57, 0x8b, 0x35, 0xd0, 0x2a, 0xe4, 0xc7, 0xb1, 0x75, 0x72, 0x41, + 0x63, 0x90, 0x3a, 0x09, 0x03, 0x65, 0x57, 0x07, 0x8f, 0x4d, 0x1f, 0x42, 0xc6, 0x18, 0x9e, 0x2a, + 0x8e, 0xed, 0x86, 0x10, 0x69, 0xe7, 0xfe, 0x36, 0x3c, 0xed, 0x69, 0xed, 0xc7, 0xf8, 0x92, 0x07, + 0xb9, 0xb4, 0x31, 0x3c, 0x7d, 0xcc, 0x4c, 0x9c, 0x0d, 0x23, 0x3e, 0x65, 0x18, 0x89, 0xd0, 0x30, + 0xd0, 0x1d, 0x28, 0x0c, 0xf4, 0x0e, 0x56, 0xd4, 0x4e, 0xc7, 0xc4, 0x16, 0x8b, 0xdd, 0x39, 0xae, + 0x39, 0x4f, 0x7a, 0x6a, 0xac, 0x43, 0xfa, 0x49, 0x00, 0x34, 0x1e, 0x68, 0xd1, 0x11, 0x2c, 0x79, + 0xb1, 0xda, 0x49, 0x00, 0x58, 0x24, 0x58, 0x8f, 0x0e, 0xd4, 0x81, 0x4b, 0xbe, 0x78, 0x11, 0x24, + 0x93, 0xac, 0x6f, 0xc5, 0x73, 0x55, 0x06, 0x9d, 0x2f, 0x5d, 0x94, 0xf8, 0x9c, 0x8b, 0x12, 0x93, + 0x91, 0x2b, 0xef, 0xf6, 0x84, 0x5d, 0x69, 0x62, 0xac, 0x44, 0x63, 0x40, 0xa5, 0x35, 0x26, 0xc6, + 0xe7, 0x19, 0x35, 0x24, 0xe1, 0x65, 0x86, 0x24, 0x3d, 0x00, 0xf1, 0x63, 0xf7, 0xfb, 0x5e, 0xfe, + 0xe8, 0x1f, 0xa6, 0x30, 0x36, 0xcc, 0x0b, 0xc8, 0x12, 0xef, 0x4b, 0x83, 0xc6, 0x5f, 0x41, 0xce, + 0x5d, 0x3d, 0xf7, 0x35, 0x4c, 0xe4, 0xb2, 0xf3, 0x91, 0x78, 0x22, 0xe8, 0x1e, 0x2c, 0x91, 0xb8, + 0xe1, 0x14, 0x4a, 0x19, 0x4c, 0x17, 0xa7, 0xde, 0xb0, 0xcc, 0x3a, 0xf6, 0x1c, 0x6c, 0x89, 0xc4, + 0x68, 0x91, 0xc5, 0x72, 0xdc, 0xf9, 0x63, 0x0c, 0x80, 0xdc, 0xeb, 0x42, 0x68, 0x25, 0xdb, 0xc3, + 0x62, 0x20, 0x99, 0x90, 0xfe, 0x2e, 0x0e, 0x79, 0x5f, 0xe1, 0x06, 0xfd, 0x79, 0x20, 0xb1, 0x5a, + 0x9f, 0x56, 0xe4, 0xf1, 0x65, 0x55, 0x81, 0x89, 0xc5, 0x17, 0x9f, 0x58, 0x54, 0xc9, 0xcc, 0xa9, + 0xdf, 0x26, 0x17, 0xae, 0xdf, 0xbe, 0x09, 0xc8, 0xd6, 0x6d, 0xb5, 0x47, 0x82, 0xb7, 0x36, 0x38, + 0x57, 0xd8, 0x69, 0x67, 0x35, 0x63, 0x91, 0xf6, 0x9c, 0xd0, 0x8e, 0x43, 0x42, 0x97, 0x7a, 0x90, + 0x75, 0xc1, 0x87, 0xc5, 0x1f, 0x99, 0x4c, 0xaa, 0x53, 0x57, 0x21, 0xdb, 0xc7, 0xb6, 0x4a, 0xc3, + 0x1e, 0x03, 0xa3, 0xdc, 0xf6, 0xbd, 0x0f, 0x20, 0xef, 0x7b, 0x79, 0x43, 0x22, 0xe1, 0x7e, 0xe3, + 0x13, 0x31, 0x56, 0xcd, 0x7c, 0xfb, 0xfd, 0x7a, 0x62, 0x1f, 0x3f, 0x23, 0x9f, 0x92, 0x1b, 0xf5, + 0x66, 0xa3, 0xfe, 0x58, 0x14, 0xaa, 0xf9, 0x6f, 0xbf, 0x5f, 0xcf, 0xc8, 0x98, 0xd6, 0x38, 0xee, + 0x3d, 0x86, 0x72, 0x68, 0x07, 0x82, 0x0e, 0x1a, 0x41, 0x69, 0xfb, 0xf8, 0x70, 0x6f, 0xa7, 0x5e, + 0x6b, 0x35, 0x94, 0x93, 0x83, 0x56, 0x43, 0x14, 0xd0, 0x35, 0x58, 0xde, 0xdb, 0xf9, 0xeb, 0x66, + 0x4b, 0xa9, 0xef, 0xed, 0x34, 0xf6, 0x5b, 0x4a, 0xad, 0xd5, 0xaa, 0xd5, 0x1f, 0x8b, 0xf1, 0xfb, + 0xff, 0x03, 0x50, 0xae, 0x6d, 0xd5, 0x77, 0x6a, 0x86, 0xd1, 0xd3, 0xda, 0x2a, 0x75, 0xf7, 0x75, + 0x48, 0x52, 0xe8, 0x77, 0xea, 0x1b, 0xdc, 0xea, 0xf4, 0xc2, 0x15, 0x7a, 0x04, 0x29, 0x8a, 0x0a, + 0xa3, 0xe9, 0x8f, 0x72, 0xab, 0x33, 0x2a, 0x59, 0x64, 0x30, 0xf4, 0xdc, 0x4c, 0x7d, 0xa5, 0x5b, + 0x9d, 0x5e, 0xd8, 0x42, 0x7b, 0x90, 0x71, 0x00, 0xb7, 0x59, 0x4f, 0x67, 0xab, 0x33, 0xab, 0x4d, + 0x64, 0x6a, 0x0c, 0xb8, 0x9c, 0xfe, 0x80, 0xb7, 0x3a, 0xa3, 0xe4, 0x85, 0x64, 0xc8, 0x79, 0x58, + 0xf3, 0xec, 0xb7, 0xc4, 0xd5, 0x39, 0x4a, 0x70, 0xe8, 0x0b, 0x28, 0x06, 0xa1, 0xb9, 0xf9, 0x9e, + 0xf9, 0x56, 0xe7, 0x2c, 0x8f, 0x11, 0xfd, 0x41, 0x9c, 0x6e, 0xbe, 0x67, 0xbf, 0xd5, 0x39, 0xab, + 0x65, 0xe8, 0x4b, 0x58, 0x1a, 0xc7, 0xd1, 0xe6, 0x7f, 0x05, 0x5c, 0x5d, 0xa0, 0x7e, 0x86, 0xfa, + 0x80, 0x26, 0xe0, 0x6f, 0x0b, 0x3c, 0x0a, 0xae, 0x2e, 0x52, 0x4e, 0x43, 0x1d, 0x28, 0x87, 0xb1, + 0xa9, 0x79, 0x1f, 0x09, 0x57, 0xe7, 0x2e, 0xad, 0xb1, 0xaf, 0x04, 0x31, 0x92, 0x79, 0x1f, 0x0d, + 0x57, 0xe7, 0xae, 0xb4, 0xa1, 0x63, 0x00, 0xdf, 0xdd, 0x76, 0x8e, 0x47, 0xc4, 0xd5, 0x79, 0x6a, + 0x6e, 0xc8, 0x80, 0xe5, 0x49, 0x97, 0xd9, 0x45, 0xde, 0x14, 0x57, 0x17, 0x2a, 0xc5, 0x11, 0x7b, + 0x0e, 0xde, 0x4b, 0xe7, 0x7b, 0x63, 0x5c, 0x9d, 0xb3, 0x26, 0xb7, 0xb5, 0xf5, 0xc3, 0x8b, 0x55, + 0xe1, 0xc7, 0x17, 0xab, 0xc2, 0x4f, 0x2f, 0x56, 0x85, 0xef, 0x7e, 0x5e, 0x8d, 0xfd, 0xf8, 0xf3, + 0x6a, 0xec, 0xbf, 0x7e, 0x5e, 0x8d, 0xfd, 0xcd, 0xdd, 0x73, 0xcd, 0xee, 0x0e, 0x4f, 0x37, 0xda, + 0x7a, 0x9f, 0xfe, 0xc5, 0xc3, 0x50, 0x2f, 0x37, 0x99, 0x4e, 0xd2, 0xf2, 0xfd, 0x91, 0xe4, 0x34, + 0x4d, 0x63, 0xdd, 0x83, 0x3f, 0x04, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x22, 0x4b, 0xdf, 0x68, 0x32, 0x00, 0x00, } @@ -7205,6 +7269,11 @@ func (m *ResponsePrepareProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if m.AppVersion != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.AppVersion)) + i-- + dAtA[i] = 0x38 + } if m.ValidatorSetUpdate != nil { { size, err := m.ValidatorSetUpdate.MarshalToSizedBuffer(dAtA[:i]) @@ -7299,6 +7368,20 @@ func (m *ResponseProcessProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l + if len(m.Events) > 0 { + for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } if m.ValidatorSetUpdate != nil { { size, err := m.ValidatorSetUpdate.MarshalToSizedBuffer(dAtA[:i]) @@ -7372,6 +7455,15 @@ func (m *ExtendVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XSignRequestId != nil { + { + size := m.XSignRequestId.Size() + i -= size + if _, err := m.XSignRequestId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } if len(m.Extension) > 0 { i -= len(m.Extension) copy(dAtA[i:], m.Extension) @@ -7387,6 +7479,22 @@ func (m *ExtendVoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ExtendVoteExtension_SignRequestId) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExtendVoteExtension_SignRequestId) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignRequestId != nil { + i -= len(m.SignRequestId) + copy(dAtA[i:], m.SignRequestId) + i = encodeVarintTypes(dAtA, i, uint64(len(m.SignRequestId))) + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} func (m *ResponseExtendVote) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7477,20 +7585,6 @@ func (m *ResponseFinalizeBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x10 } - if len(m.Events) > 0 { - for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } return len(dAtA) - i, nil } @@ -7687,11 +7781,6 @@ func (m *ExecTxResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x30 } - if m.GasWanted != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.GasWanted)) - i-- - dAtA[i] = 0x28 - } if len(m.Info) > 0 { i -= len(m.Info) copy(dAtA[i:], m.Info) @@ -9266,6 +9355,9 @@ func (m *ResponsePrepareProposal) Size() (n int) { l = m.ValidatorSetUpdate.Size() n += 1 + l + sovTypes(uint64(l)) } + if m.AppVersion != 0 { + n += 1 + sovTypes(uint64(m.AppVersion)) + } return n } @@ -9296,6 +9388,12 @@ func (m *ResponseProcessProposal) Size() (n int) { l = m.ValidatorSetUpdate.Size() n += 1 + l + sovTypes(uint64(l)) } + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } return n } @@ -9312,9 +9410,24 @@ func (m *ExtendVoteExtension) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.XSignRequestId != nil { + n += m.XSignRequestId.Size() + } return n } +func (m *ExtendVoteExtension_SignRequestId) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignRequestId != nil { + l = len(m.SignRequestId) + n += 1 + l + sovTypes(uint64(l)) + } + return n +} func (m *ResponseExtendVote) Size() (n int) { if m == nil { return 0 @@ -9348,12 +9461,6 @@ func (m *ResponseFinalizeBlock) Size() (n int) { } var l int _ = l - if len(m.Events) > 0 { - for _, e := range m.Events { - l = e.Size() - n += 1 + l + sovTypes(uint64(l)) - } - } if m.RetainHeight != 0 { n += 1 + sovTypes(uint64(m.RetainHeight)) } @@ -9446,9 +9553,6 @@ func (m *ExecTxResult) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - if m.GasWanted != 0 { - n += 1 + sovTypes(uint64(m.GasWanted)) - } if m.GasUsed != 0 { n += 1 + sovTypes(uint64(m.GasUsed)) } @@ -15345,6 +15449,25 @@ func (m *ResponsePrepareProposal) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AppVersion", wireType) + } + m.AppVersion = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AppVersion |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -15554,6 +15677,40 @@ func (m *ResponseProcessProposal) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -15657,6 +15814,39 @@ func (m *ExtendVoteExtension) Unmarshal(dAtA []byte) error { m.Extension = []byte{} } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignRequestId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.XSignRequestId = &ExtendVoteExtension_SignRequestId{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -15860,40 +16050,6 @@ func (m *ResponseFinalizeBlock) Unmarshal(dAtA []byte) error { return fmt.Errorf("proto: ResponseFinalizeBlock: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Events = append(m.Events, Event{}) - if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) @@ -16501,25 +16657,6 @@ func (m *ExecTxResult) Unmarshal(dAtA []byte) error { } m.Info = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GasWanted", wireType) - } - m.GasWanted = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GasWanted |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field GasUsed", wireType) diff --git a/abci/types/types_test.go b/abci/types/types_test.go index b55ffe8bec..a46581c3fd 100644 --- a/abci/types/types_test.go +++ b/abci/types/types_test.go @@ -53,7 +53,6 @@ func TestHashDeterministicFieldsOnly(t *testing.T) { Data: []byte("transaction"), Log: "nondeterministic data: abc", Info: "nondeterministic data: abc", - GasWanted: 1000, GasUsed: 1000, Events: []abci.Event{}, Codespace: "nondeterministic.data.abc", @@ -63,7 +62,6 @@ func TestHashDeterministicFieldsOnly(t *testing.T) { Data: []byte("transaction"), Log: "nondeterministic data: def", Info: "nondeterministic data: def", - GasWanted: 1000, GasUsed: 1000, Events: []abci.Event{}, Codespace: "nondeterministic.data.def", diff --git a/cmd/abcidump/cmd/capture.go b/cmd/abcidump/cmd/capture.go index f05d359efe..af76ebdd8e 100644 --- a/cmd/abcidump/cmd/capture.go +++ b/cmd/abcidump/cmd/capture.go @@ -68,7 +68,7 @@ func (captureCmd *CaptureCmd) Command() *cobra.Command { } // RunE executes traffic capture -func (captureCmd *CaptureCmd) RunE(cmd *cobra.Command, args []string) error { +func (captureCmd *CaptureCmd) RunE(_cmd *cobra.Command, _args []string) error { logger.Debug("Starting packet capture", "port", captureCmd.Port) return captureCmd.capture() diff --git a/cmd/abcidump/cmd/cbor.go b/cmd/abcidump/cmd/cbor.go index 69d3769914..827260871e 100644 --- a/cmd/abcidump/cmd/cbor.go +++ b/cmd/abcidump/cmd/cbor.go @@ -54,7 +54,7 @@ func (cborCmd *CborCmd) Command() *cobra.Command { } // PreRunE parses command line arguments -func (cborCmd *CborCmd) PreRunE(_ *cobra.Command, args []string) (err error) { +func (cborCmd *CborCmd) PreRunE(_ *cobra.Command, _args []string) (err error) { if cborCmd.Input, err = loadInputData(cborCmd.InputData, cborCmd.InputFormat); err != nil { return err } @@ -142,7 +142,7 @@ func marshalMap(mapToMarshal map[interface{}]interface{}, indent []byte, out io. } // RunE executes main logic of this command -func (cborCmd *CborCmd) RunE(_ *cobra.Command, args []string) error { +func (cborCmd *CborCmd) RunE(_ *cobra.Command, _args []string) error { data, err := io.ReadAll(cborCmd.Input) if err != nil { return fmt.Errorf("cannot read data: %w", err) diff --git a/cmd/abcidump/cmd/parse.go b/cmd/abcidump/cmd/parse.go index 7739a7967a..b0c750f2bc 100644 --- a/cmd/abcidump/cmd/parse.go +++ b/cmd/abcidump/cmd/parse.go @@ -72,7 +72,7 @@ func (parseCmd *ParseCmd) Command() *cobra.Command { } // PreRunE parses command line arguments -func (parseCmd *ParseCmd) PreRunE(cmd *cobra.Command, args []string) (err error) { +func (parseCmd *ParseCmd) PreRunE(_cmd *cobra.Command, _args []string) (err error) { if parseCmd.Input, err = loadInputData(parseCmd.InputData, parseCmd.InputFormat); err != nil { return err } @@ -114,7 +114,7 @@ func loadInputData(input, format string) (reader io.Reader, err error) { } // RunE executes parsing logic -func (parseCmd *ParseCmd) RunE(cmd *cobra.Command, args []string) error { +func (parseCmd *ParseCmd) RunE(cmd *cobra.Command, _args []string) error { var err error parser := parser.NewParser(cmd.InOrStdin()) diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go index 0866830269..9d32721102 100644 --- a/cmd/priv_val_server/main.go +++ b/cmd/priv_val_server/main.go @@ -161,7 +161,11 @@ func registerPrometheus(addr string, s *grpc.Server) *http.Server { // Initialize all metrics. grpcMetrics.InitializeMetrics(s) // create http server to serve prometheus - httpServer := &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: addr} + httpServer := &http.Server{ + Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), + Addr: addr, + ReadHeaderTimeout: 10 * time.Second, + } go func() { if err := httpServer.ListenAndServe(); err != nil { diff --git a/cmd/tenderdash/commands/compact.go b/cmd/tenderdash/commands/compact.go index 93903f3dad..a9360c0a01 100644 --- a/cmd/tenderdash/commands/compact.go +++ b/cmd/tenderdash/commands/compact.go @@ -27,7 +27,7 @@ the planned refactor to the storage engine. Currently, only GoLevelDB is supported. `, - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(_cmd *cobra.Command, _args []string) error { if cfg.DBBackend != "goleveldb" { return errors.New("compaction is currently only supported with goleveldb") } diff --git a/cmd/tenderdash/commands/debug/kill.go b/cmd/tenderdash/commands/debug/kill.go index c137475a98..4bfdb394cd 100644 --- a/cmd/tenderdash/commands/debug/kill.go +++ b/cmd/tenderdash/commands/debug/kill.go @@ -123,7 +123,7 @@ func killProc(pid int, dir string) error { // pipe STDERR output from tailing the Tendermint process to a file // // NOTE: This will only work on UNIX systems. - cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) // nolint: gosec + cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) //nolint:gosec outFile, err := os.Create(filepath.Join(dir, "stacktrace.out")) if err != nil { diff --git a/cmd/tenderdash/commands/debug/util.go b/cmd/tenderdash/commands/debug/util.go index 4bc169b4fa..445f37a7dd 100644 --- a/cmd/tenderdash/commands/debug/util.go +++ b/cmd/tenderdash/commands/debug/util.go @@ -67,7 +67,7 @@ func copyConfig(home, dir string) error { func dumpProfile(dir, addr, profile string, debug int) error { endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug) - resp, err := http.Get(endpoint) // nolint: gosec + resp, err := http.Get(endpoint) //nolint:gosec if err != nil { return fmt.Errorf("failed to query for %s profile: %w", profile, err) } diff --git a/cmd/tenderdash/commands/gen_node_key.go b/cmd/tenderdash/commands/gen_node_key.go index 3141662058..a17a95a8a5 100644 --- a/cmd/tenderdash/commands/gen_node_key.go +++ b/cmd/tenderdash/commands/gen_node_key.go @@ -57,7 +57,7 @@ Seed phrase and optional password is read from standard input.`, return cmd } -func genNodeKeyFlagsPreRunE(cmd *cobra.Command, args []string) error { +func genNodeKeyFlagsPreRunE(_cmd *cobra.Command, _args []string) error { if useSeedPhrase && pemFile != "" { return fmt.Errorf("--%s cannot be be used with --%s", flagFromMnemonic, flagFromPem) } @@ -138,7 +138,7 @@ func readMnemonic(in io.Reader, out io.Writer) (mnemonic string, password string } // nodeKeyFromMnemonic reads BIP39 mnemonic and optional passphrase from stdin, and derives node key from it. -func nodeKeyFromMnemonic(cmd *cobra.Command, args []string) (types.NodeKey, error) { +func nodeKeyFromMnemonic(cmd *cobra.Command, _args []string) (types.NodeKey, error) { mnemonic, password, err := readMnemonic(cmd.InOrStdin(), cmd.OutOrStdout()) if err != nil { return types.NodeKey{}, err diff --git a/cmd/tenderdash/commands/init.go b/cmd/tenderdash/commands/init.go index 9cd4b28640..5af1f04add 100644 --- a/cmd/tenderdash/commands/init.go +++ b/cmd/tenderdash/commands/init.go @@ -59,7 +59,6 @@ func initFilesWithConfig(ctx context.Context, conf nodeConfig, logger log.Logger pv *privval.FilePV err error ) - if conf.Mode == config.ModeValidator { // private validator privValKeyFile := conf.PrivValidator.KeyFile() @@ -74,8 +73,8 @@ func initFilesWithConfig(ctx context.Context, conf nodeConfig, logger log.Logger "stateFile", privValStateFile) } else { pv = privval.GenFilePV(privValKeyFile, privValStateFile) - if err != nil { - return err + if pv == nil { + return fmt.Errorf("failed to create priv validator at %v", privValKeyFile) } if err := pv.Save(); err != nil { return err @@ -150,6 +149,5 @@ func initFilesWithConfig(ctx context.Context, conf nodeConfig, logger log.Logger return err } logger.Info("Generated config", "mode", conf.Mode) - return nil } diff --git a/cmd/tenderdash/commands/reindex_event.go b/cmd/tenderdash/commands/reindex_event.go index 961a60e635..43250430b8 100644 --- a/cmd/tenderdash/commands/reindex_event.go +++ b/cmd/tenderdash/commands/reindex_event.go @@ -202,9 +202,7 @@ func eventReIndex(cmd *cobra.Command, args eventReIndexArgs) error { NumTxs: int64(len(b.Txs)), ResultProcessProposal: *r.ProcessProposal, } - if r.FinalizeBlock != nil { - e.ResultFinalizeBlock = *r.FinalizeBlock - } + var batch *indexer.Batch if e.NumTxs > 0 { batch = indexer.NewBatch(e.NumTxs) diff --git a/cmd/tenderdash/commands/replay.go b/cmd/tenderdash/commands/replay.go index 57059ae975..6dbd20e838 100644 --- a/cmd/tenderdash/commands/replay.go +++ b/cmd/tenderdash/commands/replay.go @@ -13,8 +13,8 @@ func MakeReplayCommand(conf *config.Config, logger log.Logger) *cobra.Command { return &cobra.Command{ Use: "replay", Short: "Replay messages from WAL", - RunE: func(cmd *cobra.Command, args []string) error { - return consensus.RunReplayFile(cmd.Context(), logger, conf.BaseConfig, conf.Consensus, false) + RunE: func(cmd *cobra.Command, _args []string) error { + return consensus.RunReplayFile(cmd.Context(), logger, *conf, conf.Consensus, false) }, } } @@ -24,8 +24,8 @@ func MakeReplayConsoleCommand(conf *config.Config, logger log.Logger) *cobra.Com return &cobra.Command{ Use: "replay-console", Short: "Replay messages from WAL in a console", - RunE: func(cmd *cobra.Command, args []string) error { - return consensus.RunReplayFile(cmd.Context(), logger, conf.BaseConfig, conf.Consensus, true) + RunE: func(cmd *cobra.Command, _args []string) error { + return consensus.RunReplayFile(cmd.Context(), logger, *conf, conf.Consensus, true) }, } } diff --git a/cmd/tenderdash/commands/reset.go b/cmd/tenderdash/commands/reset.go index a8c491dcfe..30822ddcdd 100644 --- a/cmd/tenderdash/commands/reset.go +++ b/cmd/tenderdash/commands/reset.go @@ -146,7 +146,7 @@ func ResetState(dbDir string, logger log.Logger) error { // ResetFilePV loads the file private validator and resets the watermark to 0. If used on an existing network, // this can cause the node to double sign. // XXX: this is unsafe and should only suitable for testnets. -func ResetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, keyType string) error { +func ResetFilePV(privValKeyFile, privValStateFile string, logger log.Logger, _keyType string) error { if _, err := os.Stat(privValKeyFile); err == nil { pv, err := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile) if err != nil { diff --git a/cmd/tenderdash/commands/run_node.go b/cmd/tenderdash/commands/run_node.go index 8abe55755c..820eb7f8fe 100644 --- a/cmd/tenderdash/commands/run_node.go +++ b/cmd/tenderdash/commands/run_node.go @@ -48,10 +48,10 @@ func AddNodeFlags(cmd *cobra.Command, conf *cfg.Config) { // abci flags cmd.Flags().String( "proxy-app", - conf.ProxyApp, + conf.Abci.Address, "proxy app address, or one of: 'kvstore',"+ " 'persistent_kvstore', 'e2e' or 'noop' for local testing.") - cmd.Flags().String("abci", conf.ABCI, "specify abci transport (socket | grpc)") + cmd.Flags().String("abci", conf.Abci.Transport, "specify abci transport (socket | grpc | routed)") // rpc flags cmd.Flags().String("rpc.laddr", conf.RPC.ListenAddress, "RPC listen address. Port required") @@ -98,7 +98,7 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider, conf *cfg.Config, logger lo Use: "start", Aliases: []string{"node", "run"}, Short: "Run the tendermint node", - RunE: func(cmd *cobra.Command, args []string) error { + RunE: func(cmd *cobra.Command, _args []string) error { if err := checkGenesisHash(conf); err != nil { return err } diff --git a/config/config.go b/config/config.go index 985236acef..82557e8412 100644 --- a/config/config.go +++ b/config/config.go @@ -12,6 +12,7 @@ import ( "time" "github.com/dashpay/dashd-go/btcjson" + "github.com/hashicorp/go-multierror" "github.com/dashpay/tenderdash/internal/test/factory" "github.com/dashpay/tenderdash/libs/log" @@ -66,6 +67,9 @@ type Config struct { // Top level options use an anonymous struct BaseConfig `mapstructure:",squash"` + // Options for ABCI application connectivity + Abci *AbciConfig `mapstructure:"abci"` + // Options for services RPC *RPCConfig `mapstructure:"rpc"` P2P *P2PConfig `mapstructure:"p2p"` @@ -80,6 +84,7 @@ type Config struct { // DefaultConfig returns a default configuration for a Tendermint node func DefaultConfig() *Config { return &Config{ + Abci: DefaultAbciConfig(), BaseConfig: DefaultBaseConfig(), RPC: DefaultRPCConfig(), P2P: DefaultP2PConfig(), @@ -102,6 +107,7 @@ func DefaultValidatorConfig() *Config { // TestConfig returns a configuration that can be used for testing func TestConfig() *Config { return &Config{ + Abci: TestAbciConfig(), BaseConfig: TestBaseConfig(), RPC: TestRPCConfig(), P2P: TestP2PConfig(), @@ -128,25 +134,36 @@ func (cfg *Config) SetRoot(root string) *Config { // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *Config) ValidateBasic() error { + var errs error + if err := cfg.BaseConfig.ValidateBasic(); err != nil { - return err + errs = multierror.Append(errs, err) + } + + // ignore [abci] section on seed nodes + if cfg.Mode != ModeSeed { + if err := cfg.Abci.ValidateBasic(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("error in [abci] section: %w", err)) + } } + if err := cfg.RPC.ValidateBasic(); err != nil { - return fmt.Errorf("error in [rpc] section: %w", err) + errs = multierror.Append(errs, fmt.Errorf("error in [rpc] section: %w", err)) } if err := cfg.Mempool.ValidateBasic(); err != nil { - return fmt.Errorf("error in [mempool] section: %w", err) + errs = multierror.Append(errs, fmt.Errorf("error in [mempool] section: %w", err)) } if err := cfg.StateSync.ValidateBasic(); err != nil { - return fmt.Errorf("error in [statesync] section: %w", err) + errs = multierror.Append(errs, fmt.Errorf("error in [statesync] section: %w", err)) } if err := cfg.Consensus.ValidateBasic(); err != nil { - return fmt.Errorf("error in [consensus] section: %w", err) + errs = multierror.Append(errs, fmt.Errorf("error in [consensus] section: %w", err)) } if err := cfg.Instrumentation.ValidateBasic(); err != nil { - return fmt.Errorf("error in [instrumentation] section: %w", err) + errs = multierror.Append(errs, fmt.Errorf("error in [instrumentation] section: %w", err)) } - return nil + + return errs } func (cfg *Config) DeprecatedFieldWarning() error { @@ -165,10 +182,6 @@ type BaseConfig struct { //nolint: maligned // This should be set in viper so it can unmarshal into this struct RootDir string `mapstructure:"home"` - // TCP or UNIX socket address of the ABCI application, - // or the name of an ABCI application compiled in with the Tendermint binary - ProxyApp string `mapstructure:"proxy-app"` - // A custom human readable name for this node Moniker string `mapstructure:"moniker"` @@ -223,9 +236,6 @@ type BaseConfig struct { //nolint: maligned // A JSON file containing the private key to use for p2p authenticated encryption NodeKey string `mapstructure:"node-key-file"` - // Mechanism to connect to the ABCI application: socket | grpc - ABCI string `mapstructure:"abci"` - // If true, query the ABCI app on connecting to a new peer // so the app can decide if we should keep the connection or not FilterPeers bool `mapstructure:"filter-peers"` // false @@ -240,25 +250,6 @@ func DefaultBaseConfig() BaseConfig { NodeKey: defaultNodeKeyPath, Mode: defaultMode, Moniker: defaultMoniker, - ProxyApp: "tcp://127.0.0.1:26658", - ABCI: "socket", - LogLevel: DefaultLogLevel, - LogFormat: log.LogFormatPlain, - FilterPeers: false, - DBBackend: "goleveldb", - DBPath: "data", - } -} - -// SingleNodeBaseConfig returns a default base configuration for a Tendermint node -func SingleNodeBaseConfig() BaseConfig { - return BaseConfig{ - Genesis: defaultGenesisJSONPath, - NodeKey: defaultNodeKeyPath, - Mode: defaultMode, - Moniker: defaultMoniker, - ProxyApp: "tcp://127.0.0.1:26658", - ABCI: "socket", LogLevel: DefaultLogLevel, LogFormat: log.LogFormatPlain, FilterPeers: false, @@ -272,7 +263,6 @@ func TestBaseConfig() BaseConfig { cfg := DefaultBaseConfig() cfg.chainID = factory.DefaultTestChainID cfg.Mode = ModeValidator - cfg.ProxyApp = "kvstore" cfg.DBBackend = "memdb" return cfg } @@ -441,6 +431,73 @@ func (cfg *PrivValidatorConfig) AreSecurityOptionsPresent() bool { } } +//----------------------------------------------------------------------------- +// AbciConfig + +// AbciConfig defines the configuration options for the ABCI client connection +type AbciConfig struct { + // TCP or UNIX socket address of the ABCI application,or routing rules for routed ABCI client, + // or the name of an ABCI application compiled in with the Tendermint binary + Address string `mapstructure:"address"` + + // Transport protocol used to connect to the ABCI application: socket | grpc | routed + Transport string `mapstructure:"transport"` + + // Maximum number of simultaneous connections to the ABCI application + // per each method. Mapped method names, like "echo", to the number of concurrent requests. + // Special method name "*" can be used to set the default limit for methods not explicitly listed. + GrpcConcurrency map[string]uint16 `mapstructure:"grpc-concurrency"` + + // Other options should be empty + Other map[string]interface{} `mapstructure:",remain"` +} + +// DefaultAbciConfig returns a default ABCI configuration for a Tendermint node +func DefaultAbciConfig() *AbciConfig { + return &AbciConfig{ + Address: "tcp://127.0.0.1:26658", + Transport: "socket", + GrpcConcurrency: map[string]uint16{"*": 0}, + } +} + +// TestAbciConfig returns a configuration for testing the ABCI client +func TestAbciConfig() *AbciConfig { + cfg := DefaultAbciConfig() + cfg.Address = "kvstore" + return cfg +} + +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *AbciConfig) ValidateBasic() error { + var err error + + if cfg == nil { + err = multierror.Append(err, errors.New("[abci] config section is nil")) + return err + } + + for key := range cfg.Other { + err = multierror.Append(err, fmt.Errorf("unknown field: %s", key)) + } + + // empty transport and address is allowed, as it means we use built in app + if !(cfg.Transport == "" && cfg.Address == "") { + switch cfg.Transport { + case "socket", "grpc", "routed": + default: + err = multierror.Append(err, fmt.Errorf("unknown ABCI connection method: %v", cfg.Transport)) + } + + if len(cfg.Address) == 0 { + err = multierror.Append(err, errors.New("address cannot be empty")) + } + } + + return err +} + //----------------------------------------------------------------------------- // RPCConfig @@ -517,6 +574,13 @@ type RPCConfig struct { // See https://github.com/tendermint/tendermint/issues/3435 TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout-broadcast-tx-commit"` + // Timeout of transaction broadcast to mempool; 0 to disable. + // + // This setting affects timeout of CheckTX operations used before + // adding transaction to the mempool. If the operation takes longer, + // the transaction is rejected with an error. + TimeoutBroadcastTx time.Duration `mapstructure:"timeout-broadcast-tx"` + // Maximum size of request body, in bytes MaxBodyBytes int64 `mapstructure:"max-body-bytes"` @@ -564,6 +628,7 @@ func DefaultRPCConfig() *RPCConfig { EventLogMaxItems: 0, TimeoutBroadcastTxCommit: 10 * time.Second, + TimeoutBroadcastTx: 0, MaxBodyBytes: int64(1000000), // 1MB MaxHeaderBytes: 1 << 20, // same as the net/http default @@ -602,6 +667,9 @@ func (cfg *RPCConfig) ValidateBasic() error { if cfg.TimeoutBroadcastTxCommit < 0 { return errors.New("timeout-broadcast-tx-commit can't be negative") } + if cfg.TimeoutBroadcastTx < 0 { + return errors.New("timeout-broadcast-tx can't be negative") + } if cfg.MaxBodyBytes < 0 { return errors.New("max-body-bytes can't be negative") } @@ -788,6 +856,7 @@ type MempoolConfig struct { MaxTxsBytes int64 `mapstructure:"max-txs-bytes"` // Size of the cache (used to filter transactions we saw earlier) in transactions + // Should be much bigger than mempool size. CacheSize int `mapstructure:"cache-size"` // Do not remove invalid transactions from the cache (default: false) @@ -819,6 +888,28 @@ type MempoolConfig struct { // has existed in the mempool at least TTLNumBlocks number of blocks or if // it's insertion time into the mempool is beyond TTLDuration. TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"` + + // TxSendRateLimit is the rate limit for sending transactions to peers, in + // transactions per second. If zero, the rate limiter is disabled. + // + // Default: 0 + TxSendRateLimit float64 `mapstructure:"tx-send-rate-limit"` + + // TxRecvRateLimit is the rate limit for receiving transactions from peers, in + // transactions per second. If zero, the rate limiter is disabled. + // + // Default: 0 + TxRecvRateLimit float64 `mapstructure:"tx-recv-rate-limit"` + + // TxEnqueueTimeout defines how long new mempool transaction will wait when internal + // processing queue is full (most likely due to busy CheckTx execution). + // Once the timeout is reached, the transaction will be silently dropped. + // If set to 0, the timeout is disabled and transactions will wait indefinitely. + TxEnqueueTimeout time.Duration `mapstructure:"tx-enqueue-timeout"` + + // Timeout of check TX operations received from other nodes. + // Use 0 to disable. + TimeoutCheckTx time.Duration `mapstructure:"timeout-check-tx"` } // DefaultMempoolConfig returns a default configuration for the Tendermint mempool. @@ -827,12 +918,13 @@ func DefaultMempoolConfig() *MempoolConfig { Broadcast: true, // Each signature verification takes .5ms, Size reduced until we implement // ABCI Recheck - Size: 5000, - MaxTxsBytes: 1024 * 1024 * 1024, // 1GB - CacheSize: 10000, - MaxTxBytes: 1024 * 1024, // 1MB - TTLDuration: 0 * time.Second, - TTLNumBlocks: 0, + Size: 5000, + MaxTxsBytes: 1024 * 1024 * 1024, // 1GB + CacheSize: 10000, + MaxTxBytes: 1024 * 1024, // 1MB + TTLDuration: 0 * time.Second, + TTLNumBlocks: 0, + TimeoutCheckTx: 0, } } @@ -864,6 +956,12 @@ func (cfg *MempoolConfig) ValidateBasic() error { if cfg.TTLNumBlocks < 0 { return errors.New("ttl-num-blocks can't be negative") } + if cfg.TxEnqueueTimeout < 0 { + return errors.New("tx-enqueue-timeout can't be negative") + } + if cfg.TimeoutCheckTx < 0 { + return errors.New("timeout-check-tx can't be negative") + } return nil } @@ -1049,20 +1147,6 @@ type ConsensusConfig struct { // If it is set to true, the consensus engine will proceed to the next height // as soon as the node has gathered votes from all of the validators on the network. UnsafeBypassCommitTimeoutOverride *bool `mapstructure:"unsafe-bypass-commit-timeout-override"` - - // Deprecated timeout parameters. These parameters are present in this struct - // so that they can be parsed so that validation can check if they have erroneously - // been included and provide a helpful error message. - // These fields should be completely removed in v0.37. - // See: https://github.com/tendermint/tendermint/issues/8188 - DeprecatedTimeoutPropose *interface{} `mapstructure:"timeout-propose"` - DeprecatedTimeoutProposeDelta *interface{} `mapstructure:"timeout-propose-delta"` - DeprecatedTimeoutPrevote *interface{} `mapstructure:"timeout-prevote"` - DeprecatedTimeoutPrevoteDelta *interface{} `mapstructure:"timeout-prevote-delta"` - DeprecatedTimeoutPrecommit *interface{} `mapstructure:"timeout-precommit"` - DeprecatedTimeoutPrecommitDelta *interface{} `mapstructure:"timeout-precommit-delta"` - DeprecatedTimeoutCommit *interface{} `mapstructure:"timeout-commit"` - DeprecatedSkipTimeoutCommit *interface{} `mapstructure:"skip-timeout-commit"` } // DefaultConsensusConfig returns a default configuration for the consensus service @@ -1141,30 +1225,7 @@ func (cfg *ConsensusConfig) ValidateBasic() error { func (cfg *ConsensusConfig) DeprecatedFieldWarning() error { var fields []string - if cfg.DeprecatedSkipTimeoutCommit != nil { - fields = append(fields, "skip-timeout-commit") - } - if cfg.DeprecatedTimeoutPropose != nil { - fields = append(fields, "timeout-propose") - } - if cfg.DeprecatedTimeoutProposeDelta != nil { - fields = append(fields, "timeout-propose-delta") - } - if cfg.DeprecatedTimeoutPrevote != nil { - fields = append(fields, "timeout-prevote") - } - if cfg.DeprecatedTimeoutPrevoteDelta != nil { - fields = append(fields, "timeout-prevote-delta") - } - if cfg.DeprecatedTimeoutPrecommit != nil { - fields = append(fields, "timeout-precommit") - } - if cfg.DeprecatedTimeoutPrecommitDelta != nil { - fields = append(fields, "timeout-precommit-delta") - } - if cfg.DeprecatedTimeoutCommit != nil { - fields = append(fields, "timeout-commit") - } + if cfg.DeprecatedQuorumType != 0 { fields = append(fields, "quorum-type") } diff --git a/config/config_test.go b/config/config_test.go index a86ab84636..5833f00e1b 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -34,6 +34,65 @@ func TestConfigValidateBasic(t *testing.T) { assert.Error(t, cfg.ValidateBasic()) } +func TestAbciConfigValidation(t *testing.T) { + type testCase struct { + name string + *AbciConfig + expectErr string // empty when no error, or error message to expect + } + // negative test cases that should fail on validator, but pass on seeds + invalidCases := []testCase{ + { + name: "no abci config", + expectErr: "", + }, + { + name: "unexpected data", + AbciConfig: &AbciConfig{Other: map[string]interface{}{"foo": "bar"}}, + expectErr: "", + }, + { + name: "invalid transport", + AbciConfig: &AbciConfig{ + Transport: "invalid", + Address: "tcp://127.0.0.1:1234", + }, + expectErr: "", + }, + { + name: "missing address", + AbciConfig: &AbciConfig{ + Transport: "invalid", + Address: "", + }, + expectErr: "", + }, + } + + for _, tc := range invalidCases { + tc := tc + + t.Run(tc.name+" on validator", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Mode = ModeValidator + cfg.Abci = nil + + err := cfg.ValidateBasic() + if tc.expectErr != "" { + assert.ErrorContains(t, err, tc.expectErr) + } + }) + t.Run(tc.name+" on seed", func(t *testing.T) { + cfg := DefaultConfig() + cfg.Mode = ModeSeed + cfg.Abci = nil + + err := cfg.ValidateBasic() + assert.NoError(t, err) + }) + } +} + func TestTLSConfiguration(t *testing.T) { cfg := DefaultConfig() cfg.SetRoot("/home/user") diff --git a/config/toml.go b/config/toml.go index d3e7aa2eca..90d3163f38 100644 --- a/config/toml.go +++ b/config/toml.go @@ -86,10 +86,6 @@ const defaultConfigTemplate = `# This is a TOML config file. ### Main Base Config Options ### ####################################################################### -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy-app = "{{ .BaseConfig.ProxyApp }}" - # A custom human readable name for this node moniker = "{{ .BaseConfig.Moniker }}" @@ -146,13 +142,45 @@ genesis-file = "{{ js .BaseConfig.Genesis }}" # Path to the JSON file containing the private key to use for node authentication in the p2p protocol node-key-file = "{{ js .BaseConfig.NodeKey }}" -# Mechanism to connect to the ABCI application: socket | grpc -abci = "{{ .BaseConfig.ABCI }}" - # If true, query the ABCI app on connecting to a new peer # so the app can decide if we should keep the connection or not filter-peers = {{ .BaseConfig.FilterPeers }} +####################################################### +### ABCI App Connection Options ### +####################################################### + + +[abci] + + +# TCP or UNIX socket address of the ABCI application, +# or routing rules for routed multi-app setup, +# or the name of an ABCI application compiled in with the Tendermint binary +# Example for routed multi-app setup: +# abci = "routed" +# address = "Info:socket:unix:///tmp/socket.1,Info:socket:unix:///tmp/socket.2,CheckTx:socket:unix:///tmp/socket.1,*:socket:unix:///tmp/socket.3" + +address = "{{ .Abci.Address }}" + +# Transport mechanism to connect to the ABCI application: socket | grpc | routed +transport = "{{ .Abci.Transport }}" + +# Maximum number of simultaneous connections to the ABCI application +# per each method. Map of a gRPC method name,like "echo", to the number of concurrent connections. +# Special value "*" can be used to set the default limit for methods not explicitly listed. +# +# Example: +# +# grpc-concurrency = [ +# { "*" = 10 }, +# { "echo" = 2 }, +# { "info" = 2 }, +#] +grpc-concurrency = [{{ range $key, $value := .Abci.GrpcConcurrency }} + { "{{ $key }}" = {{ $value }} },{{ end }} +] + ####################################################### ### Priv Validator Configuration ### @@ -268,6 +296,13 @@ event-log-max-items = {{ .RPC.EventLogMaxItems }} # See https://github.com/tendermint/tendermint/issues/3435 timeout-broadcast-tx-commit = "{{ .RPC.TimeoutBroadcastTxCommit }}" +# Timeout of transaction broadcast to mempool; 0 to disable. +# +# This setting affects timeout of CheckTX operations used before +# adding transaction to the mempool. If the operation takes longer, +# the transaction is rejected with an error. +timeout-broadcast-tx = "{{ .RPC.TimeoutBroadcastTx }}" + # Maximum size of request body, in bytes max-body-bytes = {{ .RPC.MaxBodyBytes }} @@ -389,6 +424,7 @@ size = {{ .Mempool.Size }} max-txs-bytes = {{ .Mempool.MaxTxsBytes }} # Size of the cache (used to filter transactions we saw earlier) in transactions +# Should be much bigger than mempool size. cache-size = {{ .Mempool.CacheSize }} # Do not remove invalid transactions from the cache (default: false) @@ -421,6 +457,33 @@ ttl-duration = "{{ .Mempool.TTLDuration }}" # it's insertion time into the mempool is beyond ttl-duration. ttl-num-blocks = {{ .Mempool.TTLNumBlocks }} + +# tx-send-rate-limit is the rate limit for sending transactions to peers, in transactions per second. +# This rate limit is individual for each peer. +# If zero, the rate limiter is disabled. +# +# Default: 0 +tx-send-rate-limit = {{ .Mempool.TxSendRateLimit }} + +# tx-recv-rate-limit is the rate limit for receiving transactions from peers, in transactions per second. +# This rate limit is individual for each peer. +# If zero, the rate limiter is disabled. +# +# Default: 0 +tx-recv-rate-limit = {{ .Mempool.TxRecvRateLimit }} + +# TxEnqueueTimeout defines how many nanoseconds new mempool transaction (received +# from other nodes) will wait when internal processing queue is full +# (most likely due to busy CheckTx execution).Once the timeout is reached, the transaction +# will be silently dropped. +# +# If set to 0, the timeout is disabled and transactions will wait indefinitely. +tx-enqueue-timeout = "{{ .Mempool.TxEnqueueTimeout }}" + +# Timeout of check TX operations received from other nodes, using p2p protocol. +# Use 0 to disable. +timeout-check-tx = "{{ .Mempool.TimeoutCheckTx }}" + ####################################################### ### State Sync Configuration Options ### ####################################################### @@ -659,9 +722,7 @@ const testGenesisFmt = `{ "propose": "30000000", "propose_delta": "50000", "vote": "30000000", - "vote_delta": "50000", - "commit": "10000000", - "bypass_timeout_commit": true + "vote_delta": "50000" }, "evidence": { "max_age_num_blocks": "100000", diff --git a/config/toml_test.go b/config/toml_test.go index cf27c4484a..c6a4795738 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -63,7 +63,7 @@ func checkConfig(t *testing.T, configFile string) { var elems = []string{ "moniker", "seeds", - "proxy-app", + "address", "create-empty-blocks", "peer", "timeout", diff --git a/crypto/bls12381/bls12381_test.go b/crypto/bls12381/bls12381_test.go index 8a05069ba0..8166461e44 100644 --- a/crypto/bls12381/bls12381_test.go +++ b/crypto/bls12381/bls12381_test.go @@ -1,4 +1,4 @@ -// nolint:lll +//nolint:lll package bls12381 import ( @@ -378,7 +378,8 @@ func TestRecoverThresholdSignatureFromSharesCaseStudy(t *testing.T) { msg: "C4E3500CAEC0AEB79CFA05F10EBE77717BEEF5D51159E2A89D8FE98B696BE4A9", }, } - for i, tc := range testCases { + for i, testCase := range testCases { + tc := testCase t.Run(fmt.Sprintf("test-case #%d", i), func(t *testing.T) { t.Parallel() var err error diff --git a/crypto/merkle/proof_key_path_test.go b/crypto/merkle/proof_key_path_test.go index 13d26b3601..0442c9be1b 100644 --- a/crypto/merkle/proof_key_path_test.go +++ b/crypto/merkle/proof_key_path_test.go @@ -1,12 +1,10 @@ package merkle import ( - // it is ok to use math/rand here: we do not need a cryptographically secure random - // number generator here and we can run the tests a bit faster - "math/rand" "testing" "github.com/stretchr/testify/require" + "golang.org/x/exp/rand" ) func TestKeyPath(t *testing.T) { @@ -26,7 +24,7 @@ func TestKeyPath(t *testing.T) { keys[i][j] = alphanum[rand.Intn(len(alphanum))] } case KeyEncodingHex: - rand.Read(keys[i]) + _, _ = rand.Read(keys[i]) default: require.Fail(t, "Unexpected encoding") } diff --git a/crypto/quorum.go b/crypto/quorum.go deleted file mode 100644 index 70b0753611..0000000000 --- a/crypto/quorum.go +++ /dev/null @@ -1,26 +0,0 @@ -package crypto - -import ( - bls "github.com/dashpay/bls-signatures/go-bindings" - "github.com/dashpay/dashd-go/btcjson" -) - -// SignID returns signing session data that will be signed to get threshold signature share. -// See DIP-0007 -func SignID(llmqType btcjson.LLMQType, quorumHash QuorumHash, requestID []byte, messageHash []byte) []byte { - var blsQuorumHash bls.Hash - copy(blsQuorumHash[:], quorumHash.Bytes()) - - var blsRequestID bls.Hash - copy(blsRequestID[:], requestID) - - var blsMessageHash bls.Hash - copy(blsMessageHash[:], messageHash) - - blsSignHash := bls.BuildSignHash(uint8(llmqType), blsQuorumHash, blsRequestID, blsMessageHash) - - signHash := make([]byte, 32) - copy(signHash, blsSignHash[:]) - - return signHash -} diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index 1827a126f8..bbb23e1fe3 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -254,15 +254,15 @@ func (privKey PrivKey) SignDigest(msg []byte) ([]byte, error) { return sigBytes, nil } -func (pubKey PubKey) AggregateSignatures(sigSharesData [][]byte, messages [][]byte) ([]byte, error) { +func (pubKey PubKey) AggregateSignatures(_sigSharesData [][]byte, _messages [][]byte) ([]byte, error) { return nil, errors.New("should not aggregate an edwards signature") } -func (pubKey PubKey) VerifyAggregateSignature(messages [][]byte, sig []byte) bool { +func (pubKey PubKey) VerifyAggregateSignature(_messages [][]byte, _sig []byte) bool { return false } -func (pubKey PubKey) VerifySignatureDigest(hash []byte, sig []byte) bool { +func (pubKey PubKey) VerifySignatureDigest(_hash []byte, _sig []byte) bool { return false } diff --git a/crypto/secp256k1/secp256k1_internal_test.go b/crypto/secp256k1/secp256k1_internal_test.go index dce8b53ca4..3556a4dd05 100644 --- a/crypto/secp256k1/secp256k1_internal_test.go +++ b/crypto/secp256k1/secp256k1_internal_test.go @@ -18,7 +18,7 @@ func Test_genPrivKey(t *testing.T) { copy(onePadded[32-len(oneB):32], oneB) t.Logf("one padded: %v, len=%v", onePadded, len(onePadded)) - validOne := append(empty, onePadded...) // nolint:gocritic + validOne := append(empty, onePadded...) //nolint:gocritic tests := []struct { name string notSoRand []byte diff --git a/dash/core/mock.go b/dash/core/mock.go index e396f646a7..8afd06d93f 100644 --- a/dash/core/mock.go +++ b/dash/core/mock.go @@ -118,7 +118,7 @@ func (mc *MockClient) GetNetworkInfo() (*btcjson.GetNetworkInfoResult, error) { }, nil } -func (mc *MockClient) MasternodeListJSON(filter string) (map[string]btcjson.MasternodelistResultJSON, error) { +func (mc *MockClient) MasternodeListJSON(_filter string) (map[string]btcjson.MasternodelistResultJSON, error) { proTxHash, err := mc.localPV.GetProTxHash(context.Background()) if err != nil { panic(err) @@ -152,13 +152,8 @@ func (mc *MockClient) QuorumSign( if !mc.canSign { return nil, errors.New("dash core mock client not set up for signing") } + signID := types.NewSignItemFromHash(quorumType, quorumHash, requestID, messageHash).SignHash - signID := crypto.SignID( - quorumType, - tmbytes.Reverse(quorumHash), - tmbytes.Reverse(requestID), - tmbytes.Reverse(messageHash), - ) privateKey, err := mc.localPV.GetPrivateKey(context.Background(), quorumHash) if err != nil { panic(err) @@ -190,12 +185,9 @@ func (mc *MockClient) QuorumVerify( if err := quorumType.Validate(); err != nil { return false, err } - signID := crypto.SignID( - quorumType, - tmbytes.Reverse(quorumHash), - tmbytes.Reverse(requestID), - tmbytes.Reverse(messageHash), - ) + + signID := types.NewSignItemFromHash(quorumType, quorumHash, requestID, messageHash).SignHash + thresholdPublicKey, err := mc.localPV.GetThresholdPublicKey(context.Background(), quorumHash) if err != nil { panic(err) diff --git a/dash/core/mocks/client.go b/dash/core/mocks/client.go index 8f5b8e7ba6..19511f16ac 100644 --- a/dash/core/mocks/client.go +++ b/dash/core/mocks/client.go @@ -18,6 +18,10 @@ type Client struct { func (_m *Client) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -32,6 +36,10 @@ func (_m *Client) Close() error { func (_m *Client) GetNetworkInfo() (*btcjson.GetNetworkInfoResult, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for GetNetworkInfo") + } + var r0 *btcjson.GetNetworkInfoResult var r1 error if rf, ok := ret.Get(0).(func() (*btcjson.GetNetworkInfoResult, error)); ok { @@ -58,6 +66,10 @@ func (_m *Client) GetNetworkInfo() (*btcjson.GetNetworkInfoResult, error) { func (_m *Client) MasternodeListJSON(filter string) (map[string]btcjson.MasternodelistResultJSON, error) { ret := _m.Called(filter) + if len(ret) == 0 { + panic("no return value specified for MasternodeListJSON") + } + var r0 map[string]btcjson.MasternodelistResultJSON var r1 error if rf, ok := ret.Get(0).(func(string) (map[string]btcjson.MasternodelistResultJSON, error)); ok { @@ -84,6 +96,10 @@ func (_m *Client) MasternodeListJSON(filter string) (map[string]btcjson.Masterno func (_m *Client) MasternodeStatus() (*btcjson.MasternodeStatusResult, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for MasternodeStatus") + } + var r0 *btcjson.MasternodeStatusResult var r1 error if rf, ok := ret.Get(0).(func() (*btcjson.MasternodeStatusResult, error)); ok { @@ -110,6 +126,10 @@ func (_m *Client) MasternodeStatus() (*btcjson.MasternodeStatusResult, error) { func (_m *Client) Ping() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Ping") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -124,6 +144,10 @@ func (_m *Client) Ping() error { func (_m *Client) QuorumInfo(quorumType btcjson.LLMQType, quorumHash bytes.HexBytes) (*btcjson.QuorumInfoResult, error) { ret := _m.Called(quorumType, quorumHash) + if len(ret) == 0 { + panic("no return value specified for QuorumInfo") + } + var r0 *btcjson.QuorumInfoResult var r1 error if rf, ok := ret.Get(0).(func(btcjson.LLMQType, bytes.HexBytes) (*btcjson.QuorumInfoResult, error)); ok { @@ -150,6 +174,10 @@ func (_m *Client) QuorumInfo(quorumType btcjson.LLMQType, quorumHash bytes.HexBy func (_m *Client) QuorumSign(quorumType btcjson.LLMQType, requestID bytes.HexBytes, messageHash bytes.HexBytes, quorumHash bytes.HexBytes) (*btcjson.QuorumSignResult, error) { ret := _m.Called(quorumType, requestID, messageHash, quorumHash) + if len(ret) == 0 { + panic("no return value specified for QuorumSign") + } + var r0 *btcjson.QuorumSignResult var r1 error if rf, ok := ret.Get(0).(func(btcjson.LLMQType, bytes.HexBytes, bytes.HexBytes, bytes.HexBytes) (*btcjson.QuorumSignResult, error)); ok { @@ -176,6 +204,10 @@ func (_m *Client) QuorumSign(quorumType btcjson.LLMQType, requestID bytes.HexByt func (_m *Client) QuorumVerify(quorumType btcjson.LLMQType, requestID bytes.HexBytes, messageHash bytes.HexBytes, signature bytes.HexBytes, quorumHash bytes.HexBytes) (bool, error) { ret := _m.Called(quorumType, requestID, messageHash, signature, quorumHash) + if len(ret) == 0 { + panic("no return value specified for QuorumVerify") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(btcjson.LLMQType, bytes.HexBytes, bytes.HexBytes, bytes.HexBytes, bytes.HexBytes) (bool, error)); ok { diff --git a/dash/llmq/llmq_test.go b/dash/llmq/llmq_test.go index 25827eea10..3e25b3cc5a 100644 --- a/dash/llmq/llmq_test.go +++ b/dash/llmq/llmq_test.go @@ -5,7 +5,6 @@ import ( "fmt" "math/rand" "testing" - "time" bls "github.com/dashpay/bls-signatures/go-bindings" "github.com/stretchr/testify/require" @@ -185,13 +184,11 @@ func TestLLMQ(t *testing.T) { }, } for i, tc := range testCases { + tc := tc t.Run(fmt.Sprintf("test-case #%d", i), func(t *testing.T) { t.Parallel() llmqData := tc.llmqDataGetter() - - // Validate threshold signature recoveryTestCase only works with minimum expected signatures - // and works from there up to the maximum - rand.Seed(time.Now().UnixNano()) + tc := tc for round := 0; round < tc.rounds; round++ { // For the given rounds shuffleSigsAndIDs the sigShares/ids each round to try various combinations @@ -242,7 +239,7 @@ type llmqTestCaseData struct { wantThresholdSig *bls.G2Element } -func mustGenerateLLMQData(proTxHashes []crypto.ProTxHash, m int, sigHash bls.Hash) *Data { +func mustGenerateLLMQData(proTxHashes []crypto.ProTxHash, m int, _sigHash bls.Hash) *Data { llmqData, err := Generate( proTxHashes, WithThreshold(m), diff --git a/dash/quorum/selectpeers/dip6_test.go b/dash/quorum/selectpeers/dip6_test.go index 2491eb3e0d..c56866caba 100644 --- a/dash/quorum/selectpeers/dip6_test.go +++ b/dash/quorum/selectpeers/dip6_test.go @@ -120,7 +120,7 @@ func TestDIP6(t *testing.T) { }, } - // nolint:scopelint + //nolint:scopelint for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got, err := NewDIP6ValidatorSelector(tt.quorumHash).SelectValidators(tt.validators, tt.me) diff --git a/dash/quorum/selectpeers/sortable_validator_test.go b/dash/quorum/selectpeers/sortable_validator_test.go index 108d6134c0..c2851c7453 100644 --- a/dash/quorum/selectpeers/sortable_validator_test.go +++ b/dash/quorum/selectpeers/sortable_validator_test.go @@ -51,7 +51,7 @@ func Test_sortableValidator_SortKey(t *testing.T) { }, } - // nolint:scopelint + //nolint:scopelint for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { v := newSortableValidator(*tt.Validator, tt.quorumHash) @@ -74,7 +74,7 @@ func Test_sortableValidator_Compare(t *testing.T) { want: 0, }, } - // nolint:scopelint + //nolint:scopelint for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.left.compare(tt.right) diff --git a/dash/quorum/selectpeers/sorted_validator_list_test.go b/dash/quorum/selectpeers/sorted_validator_list_test.go index 972efc5577..8d65a59424 100644 --- a/dash/quorum/selectpeers/sorted_validator_list_test.go +++ b/dash/quorum/selectpeers/sorted_validator_list_test.go @@ -36,7 +36,7 @@ func Test_sortedValidatorList_Index(t *testing.T) { want: 4, }, } - // nolint:scopelint + //nolint:scopelint for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { got := tt.list.index(tt.search) diff --git a/dash/quorum/validator_conn_executor_test.go b/dash/quorum/validator_conn_executor_test.go index e5fb35705c..8ea0bc2591 100644 --- a/dash/quorum/validator_conn_executor_test.go +++ b/dash/quorum/validator_conn_executor_test.go @@ -592,7 +592,7 @@ func setup( } // cleanup frees some resources allocated for tests -func cleanup(t *testing.T, bus *eventbus.EventBus, dialer p2p.DashDialer, vc *ValidatorConnExecutor) { +func cleanup(_t *testing.T, bus *eventbus.EventBus, _dialer p2p.DashDialer, vc *ValidatorConnExecutor) { bus.Stop() vc.Stop() } @@ -620,7 +620,7 @@ func makeState(nVals int, height int64) (sm.State, dbm.DB) { return s, stateDB } -func makeBlock(ctx context.Context, t *testing.T, blockExec *sm.BlockExecutor, state sm.State, height int64, commit *types.Commit) *types.Block { +func makeBlock(ctx context.Context, t *testing.T, blockExec *sm.BlockExecutor, state sm.State, _height int64, commit *types.Commit) *types.Block { block, crs, err := blockExec.CreateProposalBlock(ctx, 1, 0, state, commit, state.Validators.Proposer.ProTxHash, 1) require.NoError(t, err) @@ -658,8 +658,9 @@ func (app *testApp) PrepareProposal(_ context.Context, req *abci.RequestPrepareP } return &abci.ResponsePrepareProposal{ - AppHash: resultsHash, - TxResults: results, + AppHash: resultsHash, + TxResults: results, + AppVersion: 1, }, nil } diff --git a/docs/introduction/install.md b/docs/introduction/install.md index 01b8464ae8..1d83811c6a 100644 --- a/docs/introduction/install.md +++ b/docs/introduction/install.md @@ -4,45 +4,62 @@ order: 3 # Install Tenderdash +## Using Dashmate (Recommended Method) + +Dashmate is a part of the Dash Platform and provides a comprehensive solution for installing the entire platform. We highly recommend using Dashmate for a seamless and straightforward installation process. + +For detailed instructions on how to set up a node using Dashmate, please refer to the official Dash documentation: [Set Up a Node](https://docs.dash.org/projects/platform/en/stable/docs/tutorials/setup-a-node.html). + ## From Binary To download pre-built binaries, see the [releases page](https://github.com/dashevo/tenderdash/releases). ## From Source -You'll need `go` [installed](https://golang.org/doc/install) and the required -environment variables set, which can be done with the following commands: +Install official go into the standard location with standard PATH updates: -```sh -echo export GOPATH=\"\$HOME/go\" >> ~/.bash_profile -echo export PATH=\"\$PATH:\$GOPATH/bin\" >> ~/.bash_profile -``` + ```bash + curl https://webinstall.dev/go | sh + source ~/.config/envman/PATH.env + ``` -Get the source code: +Install git, cmake, and build-essential (apt) or build-base (apk) and other libs. -```sh -git clone https://github.com/dashevo/tenderdash.git -cd tenderdash -``` +For Debian-based (eg. Ubuntu): + + ```bash + sudo apt update + sudo apt install -y git build-essential cmake libgmp-dev + ``` -Then run: +For Alpine Linux: -```sh -make install -``` + ```bash + apk add --no-cache git build-base cmake gmp-dev + ``` -to put the binary in `$GOPATH/bin` or use: +Get the source code: -```sh -make build -``` + ```bash + git clone https://github.com/dashpay/tenderdash.git + pushd ./tenderdash/ + git submodule init + git submodule update + ``` + +Build: + +to put the binary in `$GOPATH/bin`: + + ```sh + make install + ``` -to put the binary in `./build`. +to put the binary in `./build`: -_DISCLAIMER_ The binary of Tenderdash is build/installed without the DWARF -symbol table. If you would like to build/install Tenderdash with the DWARF -symbol and debug information, remove `-s -w` from `BUILD_FLAGS` in the make -file. + ```sh + make build + ``` The latest Tenderdash is now installed. You can verify the installation by running: diff --git a/docs/tutorials/go-built-in.md b/docs/tutorials/go-built-in.md index 380ada0a72..3568be7736 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -43,10 +43,11 @@ Verify that you have the latest version of Go installed: ```sh $ go version -go version go1.19.x darwin/amd64 +go version go1.22.x darwin/amd64 ``` Note that the exact patch number may differ as Go releases come out. + ## 1.2 Creating a new Go project We'll start by creating a new Go project. First, initialize the project folder with `go mod init`. Running this command should create the `go.mod` file. @@ -270,6 +271,7 @@ func (app *KVStoreApplication) DeliverTx(req abcitypes.RequestDeliverTx) abcityp return abcitypes.ResponseDeliverTx{Code: 0} } ``` + Note that we check the validity of the transaction _again_ during `DeliverTx`. Transactions are not guaranteed to be valid when they are delivered to an application. This can happen if the application state is used to determine transaction @@ -300,6 +302,7 @@ func (app *KVStoreApplication) Commit() abcitypes.ResponseCommit { "github.com/dgraph-io/badger/v3" abcitypes "github.com/tendermint/tendermint/abci/types" ) + ``` You may have noticed that the application we are writing will _crash_ if it receives an @@ -493,6 +496,7 @@ Next, we create a database handle and use it to construct our ABCI application: ``` Then we construct a logger: + ```go ... logger := tmlog.MustNewDefaultLogger(tmlog.LogFormatPlain, tmlog.LogLevelInfo, false) @@ -592,7 +596,7 @@ This will populate the `go.mod` with a release number followed by a hash for Ten ```go module github.com//kvstore -go 1.19 +go 1.22 require ( github.com/dgraph-io/badger/v3 v3.2103.2 diff --git a/docs/tutorials/go.md b/docs/tutorials/go.md index 7524a14b52..25b4deb6a4 100644 --- a/docs/tutorials/go.md +++ b/docs/tutorials/go.md @@ -454,7 +454,7 @@ This will populate the `go.mod` with a release number followed by a hash for Ten ```go module github.com//kvstore -go 1.19 +go 1.22 require ( github.com/dgraph-io/badger/v3 v3.2103.2 diff --git a/go.mod b/go.mod index 949feb743e..7afcfdc826 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,9 @@ module github.com/dashpay/tenderdash -go 1.19 +go 1.22 require ( - github.com/BurntSushi/toml v1.2.0 + github.com/BurntSushi/toml v1.3.2 github.com/adlio/schema v1.3.3 github.com/btcsuite/btcd v0.22.1 github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce @@ -16,152 +16,196 @@ require ( github.com/go-chi/chi v4.1.2+incompatible // indirect github.com/go-kit/kit v0.12.0 github.com/gogo/protobuf v1.3.2 - github.com/golang/protobuf v1.5.2 - github.com/golang/snappy v0.0.4 // indirect - github.com/golangci/golangci-lint v1.48.0 + github.com/golang/protobuf v1.5.4 + github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect + github.com/golangci/golangci-lint v1.55.2 github.com/google/btree v1.1.2 // indirect github.com/google/gopacket v1.1.19 github.com/google/orderedcode v0.0.1 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.5.0 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/lib/pq v1.10.6 + github.com/lib/pq v1.10.9 github.com/libp2p/go-buffer-pool v0.1.0 github.com/mroth/weightedrand v0.4.1 github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae github.com/ory/dockertest v3.3.5+incompatible github.com/prometheus/client_golang v1.13.0 - github.com/rs/cors v1.8.2 + github.com/rs/cors v1.10.1 github.com/rs/zerolog v1.29.0 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v1.6.1 + github.com/spf13/cobra v1.8.0 github.com/spf13/viper v1.15.0 github.com/stretchr/testify v1.9.0 github.com/tendermint/tm-db v0.6.6 - golang.org/x/crypto v0.1.0 - golang.org/x/net v0.8.0 - golang.org/x/sync v0.1.0 - google.golang.org/grpc v1.52.0 + golang.org/x/crypto v0.21.0 + golang.org/x/net v0.22.0 + golang.org/x/sync v0.6.0 + google.golang.org/grpc v1.63.0 pgregory.net/rapid v0.4.8 ) require ( - github.com/bufbuild/buf v1.7.0 + github.com/bufbuild/buf v1.30.1 github.com/creachadair/atomicfile v0.2.6 github.com/creachadair/taskgroup v0.3.2 github.com/go-pkgz/jrpc v0.2.0 - github.com/google/go-cmp v0.5.9 - github.com/vektra/mockery/v2 v2.33.2 - gotest.tools v2.2.0+incompatible + github.com/google/go-cmp v0.6.0 + github.com/vektra/mockery/v2 v2.41.0 ) require ( + 4d63.com/gocheckcompilerdirectives v1.2.1 // indirect + buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240221180331-f05a6f4403ce.1 // indirect + connectrpc.com/connect v1.16.0 // indirect + connectrpc.com/otelconnect v0.7.0 // indirect + dario.cat/mergo v1.0.0 // indirect + github.com/4meepo/tagalign v1.3.3 // indirect + github.com/Abirdcfly/dupword v0.0.13 // indirect + github.com/Antonboom/testifylint v0.2.3 // indirect + github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 // indirect + github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect + github.com/alecthomas/go-check-sumtype v0.1.3 // indirect + github.com/alexkohler/nakedret/v2 v2.0.2 // indirect + github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect - github.com/bufbuild/connect-go v0.2.0 // indirect - github.com/chigopher/pathlib v0.15.0 // indirect - github.com/containerd/containerd v1.6.6 // indirect - github.com/containerd/typeurl v1.0.2 // indirect + github.com/bufbuild/protocompile v0.9.0 // indirect + github.com/bufbuild/protovalidate-go v0.6.0 // indirect + github.com/bufbuild/protoyaml-go v0.1.8 // indirect + github.com/butuzov/mirror v1.1.0 // indirect + github.com/catenacyber/perfsprint v0.2.0 // indirect + github.com/ccojocar/zxcvbn-go v1.0.1 // indirect + github.com/chigopher/pathlib v0.19.1 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect + github.com/curioswitch/go-reassign v0.2.0 // indirect github.com/dashpay/dashd-go/btcutil v1.2.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect github.com/didip/tollbooth/v6 v6.0.1 // indirect github.com/didip/tollbooth_chi v0.0.0-20200524181329-8b84cd7183d9 // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/docker/docker v20.10.17+incompatible // indirect - github.com/go-chi/chi/v5 v5.0.7 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/cli v26.0.0+incompatible // indirect + github.com/docker/distribution v2.8.3+incompatible // indirect + github.com/docker/docker v26.0.0+incompatible // indirect + github.com/docker/docker-credential-helpers v0.8.1 // indirect + github.com/felixge/fgprof v0.9.4 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/ghostiam/protogetter v0.2.3 // indirect + github.com/go-chi/chi/v5 v5.0.12 // indirect github.com/go-chi/render v1.0.1 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-pkgz/expirable-cache v0.0.3 // indirect github.com/go-pkgz/rest v1.5.0 // indirect - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/gofrs/uuid/v5 v5.0.0 // indirect + github.com/golang/glog v1.2.0 // indirect + github.com/google/cel-go v0.20.1 // indirect + github.com/google/go-containerregistry v0.19.1 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240327155427-868f304927ed // indirect + github.com/huandu/xstrings v1.4.0 // indirect github.com/iancoleman/strcase v0.2.0 // indirect + github.com/jdx/go-netrc v1.0.0 // indirect github.com/jinzhu/copier v0.3.5 // indirect - github.com/moby/buildkit v0.10.3 // indirect - github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect + github.com/kkHAIKE/contextcheck v1.1.4 // indirect + github.com/macabu/inamedparam v0.1.2 // indirect + github.com/maratori/testableexamples v1.0.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/term v0.5.0 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/nunnatsa/ginkgolinter v0.14.1 // indirect github.com/pelletier/go-toml/v2 v2.0.6 // indirect github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect - github.com/sashamelentyev/usestdlibvars v1.8.0 // indirect + github.com/rivo/uniseg v0.2.0 // indirect + github.com/sashamelentyev/interfacebloat v1.1.0 // indirect + github.com/sashamelentyev/usestdlibvars v1.24.0 // indirect + github.com/stoewer/go-strcase v1.3.0 // indirect + github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c // indirect github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect github.com/tendermint/tendermint v0.34.21 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.33.0 // indirect - go.opentelemetry.io/otel v1.8.0 // indirect - go.opentelemetry.io/otel/trace v1.8.0 // indirect - golang.org/x/time v0.1.0 // indirect - google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect + github.com/timonwong/loggercheck v0.9.4 // indirect + github.com/vbatts/tar-split v0.11.5 // indirect + github.com/xen0n/gosmopolitan v1.2.2 // indirect + github.com/ykadowak/zerologlint v0.1.3 // indirect + go-simpler.org/sloglint v0.1.2 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/sdk v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + go.opentelemetry.io/proto/otlp v0.12.0 // indirect + go.tmz.dev/musttag v0.7.2 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240325203815-454cdb8f5daa // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240325203815-454cdb8f5daa // indirect ) require ( - 4d63.com/gochecknoglobals v0.1.0 // indirect - github.com/Antonboom/errname v0.1.7 // indirect - github.com/Antonboom/nilnil v0.1.1 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect - github.com/DataDog/zstd v1.4.1 // indirect + 4d63.com/gochecknoglobals v0.2.1 // indirect + github.com/Antonboom/errname v0.1.12 // indirect + github.com/Antonboom/nilnil v0.1.7 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/DataDog/zstd v1.4.5 // indirect github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect - github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.2 // indirect github.com/Masterminds/semver v1.5.0 // indirect - github.com/Microsoft/go-winio v0.5.2 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 // indirect - github.com/OpenPeeDeeP/depguard v1.1.0 // indirect github.com/alexkohler/prealloc v1.0.0 // indirect github.com/alingse/asasalint v0.0.11 // indirect - github.com/ashanbrown/forbidigo v1.3.0 // indirect + github.com/ashanbrown/forbidigo v1.6.0 // indirect github.com/ashanbrown/makezero v1.1.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/bkielbasa/cyclop v1.2.0 // indirect + github.com/bkielbasa/cyclop v1.2.1 // indirect github.com/blizzy78/varnamelen v0.8.0 // indirect - github.com/bombsimon/wsl/v3 v3.3.0 // indirect - github.com/breml/bidichk v0.2.3 // indirect - github.com/breml/errchkjson v0.3.0 // indirect - github.com/butuzov/ireturn v0.1.1 // indirect + github.com/bombsimon/wsl/v3 v3.4.0 // indirect + github.com/breml/bidichk v0.2.7 // indirect + github.com/breml/errchkjson v0.3.6 // indirect + github.com/butuzov/ireturn v0.2.2 // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/charithe/durationcheck v0.0.9 // indirect - github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect - github.com/daixiang0/gci v0.6.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/charithe/durationcheck v0.0.10 // indirect + github.com/chavacava/garif v0.1.0 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.4 // indirect + github.com/daixiang0/gci v0.11.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/dgraph-io/badger/v2 v2.2007.2 // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 // indirect - github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/esimonov/ifshort v1.0.4 // indirect github.com/ettle/strcase v0.1.1 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/fatih/structtag v1.2.0 // indirect github.com/firefart/nonamedreturns v1.0.4 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/fzipp/gocyclo v0.6.0 // indirect - github.com/go-critic/go-critic v0.6.3 // indirect - github.com/go-toolsmith/astcast v1.0.0 // indirect - github.com/go-toolsmith/astcopy v1.0.0 // indirect - github.com/go-toolsmith/astequal v1.0.1 // indirect - github.com/go-toolsmith/astfmt v1.0.0 // indirect - github.com/go-toolsmith/astp v1.0.0 // indirect - github.com/go-toolsmith/strparse v1.0.0 // indirect - github.com/go-toolsmith/typep v1.0.2 // indirect - github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b // indirect + github.com/go-critic/go-critic v0.9.0 // indirect + github.com/go-toolsmith/astcast v1.1.0 // indirect + github.com/go-toolsmith/astcopy v1.1.0 // indirect + github.com/go-toolsmith/astequal v1.1.0 // indirect + github.com/go-toolsmith/astfmt v1.1.0 // indirect + github.com/go-toolsmith/astp v1.1.0 // indirect + github.com/go-toolsmith/strparse v1.1.0 // indirect + github.com/go-toolsmith/typep v1.1.0 // indirect + github.com/go-xmlfmt/xmlfmt v1.1.2 // indirect github.com/gobwas/glob v0.2.3 // indirect github.com/gofrs/flock v0.8.1 // indirect - github.com/gofrs/uuid v4.2.0+incompatible // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 // indirect github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a // indirect github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe // indirect - github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a // indirect + github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e // indirect github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect - github.com/golangci/misspell v0.3.5 // indirect - github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect + github.com/golangci/misspell v0.4.1 // indirect + github.com/golangci/revgrep v0.5.2 // indirect github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect - github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 // indirect + github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect github.com/gostaticanalysis/comment v1.4.2 // indirect github.com/gostaticanalysis/forcetypeassert v0.1.0 // indirect @@ -172,70 +216,65 @@ require ( github.com/hashicorp/hcl v1.0.0 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a // indirect - github.com/jgautheron/goconst v1.5.1 // indirect - github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f // indirect - github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b // indirect + github.com/jgautheron/goconst v1.6.0 // indirect github.com/jingyugao/rowserrcheck v1.1.1 // indirect github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af // indirect github.com/jmhodges/levigo v1.0.0 // indirect github.com/julz/importas v0.1.0 // indirect - github.com/kisielk/errcheck v1.6.2 // indirect + github.com/kisielk/errcheck v1.6.3 // indirect github.com/kisielk/gotool v1.0.0 // indirect - github.com/klauspost/compress v1.15.10 // indirect - github.com/klauspost/pgzip v1.2.5 // indirect + github.com/klauspost/compress v1.17.7 // indirect + github.com/klauspost/pgzip v1.2.6 // indirect github.com/kulti/thelper v0.6.3 // indirect - github.com/kunwardeep/paralleltest v1.0.6 // indirect - github.com/kyoh86/exportloopref v0.1.8 // indirect + github.com/kunwardeep/paralleltest v1.0.8 // indirect + github.com/kyoh86/exportloopref v0.1.11 // indirect github.com/ldez/gomoddirectives v0.2.3 // indirect - github.com/ldez/tagliatelle v0.3.1 // indirect - github.com/leonklingele/grouper v1.1.0 // indirect + github.com/ldez/tagliatelle v0.5.0 // indirect + github.com/leonklingele/grouper v1.1.1 // indirect github.com/lufeee/execinquery v1.2.1 // indirect github.com/magiconair/properties v1.8.7 // indirect - github.com/maratori/testpackage v1.1.0 // indirect - github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 // indirect + github.com/maratori/testpackage v1.1.1 // indirect + github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect - github.com/mattn/go-runewidth v0.0.9 // indirect + github.com/mattn/go-runewidth v0.0.13 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect github.com/mbilski/exhaustivestruct v1.2.0 // indirect - github.com/mgechev/revive v1.2.1 // indirect + github.com/mgechev/revive v1.3.4 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moricho/tparallel v0.2.1 // indirect + github.com/moricho/tparallel v0.3.1 // indirect github.com/nakabonne/nestif v0.3.1 // indirect - github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect - github.com/nishanths/exhaustive v0.8.1 // indirect + github.com/nishanths/exhaustive v0.11.0 // indirect github.com/nishanths/predeclared v0.2.2 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect + github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opencontainers/runc v1.1.3 // indirect - github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect - github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // indirect + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pkg/profile v1.6.0 // indirect + github.com/pkg/profile v1.7.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/polyfloyd/go-errorlint v1.0.0 // indirect - github.com/prometheus/client_model v0.2.0 + github.com/polyfloyd/go-errorlint v1.4.5 // indirect + github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a github.com/prometheus/common v0.37.0 github.com/prometheus/procfs v0.8.0 // indirect - github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a // indirect - github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 // indirect - github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect + github.com/quasilyte/go-ruleguard v0.4.0 // indirect + github.com/quasilyte/gogrep v0.5.0 // indirect + github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/ryancurrah/gomodguard v1.2.4 // indirect - github.com/ryanrolds/sqlclosecheck v0.3.0 // indirect - github.com/sanposhiho/wastedassign/v2 v2.0.6 // indirect - github.com/securego/gosec/v2 v2.12.0 // indirect + github.com/ryancurrah/gomodguard v1.3.0 // indirect + github.com/ryanrolds/sqlclosecheck v0.5.1 // indirect + github.com/sanposhiho/wastedassign/v2 v2.0.7 // indirect + github.com/securego/gosec/v2 v2.18.2 // indirect github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c // indirect - github.com/sirupsen/logrus v1.9.0 // indirect - github.com/sivchari/containedctx v1.0.2 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/sivchari/containedctx v1.0.3 // indirect github.com/sivchari/nosnakecase v1.7.0 // indirect - github.com/sivchari/tenv v1.7.0 // indirect - github.com/sonatard/noctx v0.0.1 // indirect - github.com/sourcegraph/go-diff v0.6.1 // indirect + github.com/sivchari/tenv v1.7.1 // indirect + github.com/sonatard/noctx v0.0.2 // indirect + github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/afero v1.9.3 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect @@ -244,39 +283,37 @@ require ( github.com/stbenjam/no-sprintf-host-port v0.1.1 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/subosito/gotenv v1.4.2 // indirect - github.com/sylvia7788/contextcheck v1.0.4 // indirect - github.com/tdakkota/asciicheck v0.1.1 // indirect - github.com/tetafro/godot v1.4.11 // indirect - github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 // indirect - github.com/tomarrell/wrapcheck/v2 v2.6.2 // indirect - github.com/tommy-muehle/go-mnd/v2 v2.5.0 // indirect - github.com/ultraware/funlen v0.0.3 // indirect + github.com/tdakkota/asciicheck v0.2.0 // indirect + github.com/tetafro/godot v1.4.15 // indirect + github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 // indirect + github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect + github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect + github.com/ultraware/funlen v0.1.0 // indirect github.com/ultraware/whitespace v0.0.5 // indirect - github.com/uudashr/gocognit v1.0.6 // indirect + github.com/uudashr/gocognit v1.1.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/yagipy/maintidx v1.0.0 // indirect github.com/yeya24/promlinter v0.2.0 // indirect - gitlab.com/bosi/decorder v0.2.3 // indirect + gitlab.com/bosi/decorder v0.4.1 // indirect go.etcd.io/bbolt v1.3.6 // indirect - go.opencensus.io v0.24.0 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.8.0 // indirect - go.uber.org/zap v1.23.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d // indirect - golang.org/x/mod v0.9.0 // indirect - golang.org/x/sys v0.6.0 // indirect - golang.org/x/term v0.6.0 - golang.org/x/text v0.8.0 // indirect - golang.org/x/tools v0.7.0 // indirect - google.golang.org/protobuf v1.28.1 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect + golang.org/x/mod v0.16.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.19.0 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - honnef.co/go/tools v0.3.3 // indirect - mvdan.cc/gofumpt v0.3.1 // indirect + honnef.co/go/tools v0.4.6 // indirect + mvdan.cc/gofumpt v0.5.0 // indirect mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect - mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 // indirect + mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d // indirect ) require ( @@ -284,8 +321,9 @@ require ( github.com/jonboulle/clockwork v0.3.0 github.com/oasisprotocol/oasis-core/go v0.2202.5 github.com/sasha-s/go-deadlock v0.3.1 - github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca + github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/tendermint/go-amino v0.16.0 github.com/tyler-smith/go-bip39 v1.1.0 - golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb + golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 + golang.org/x/time v0.5.0 ) diff --git a/go.sum b/go.sum index f8206f70cf..2139ab461e 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,9 @@ -4d63.com/gochecknoglobals v0.1.0 h1:zeZSRqj5yCg28tCkIV/z/lWbwvNm5qnKVS15PI8nhD0= -4d63.com/gochecknoglobals v0.1.0/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= +4d63.com/gocheckcompilerdirectives v1.2.1 h1:AHcMYuw56NPjq/2y615IGg2kYkBdTvOaojYCBcRE7MA= +4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= +4d63.com/gochecknoglobals v0.2.1 h1:1eiorGsgHOFOuoOiJDy2psSrQbRdIHrlge0IJIkUgDc= +4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240221180331-f05a6f4403ce.1 h1:0nWhrRcnkgw1kwJ7xibIO8bqfOA7pBzBjGCDBxIHch8= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.33.0-20240221180331-f05a6f4403ce.1/go.mod h1:Tgn5bgL220vkFOI0KPStlcClPeOJzAv4uT+V8JXGUnw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= @@ -15,113 +18,120 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= +connectrpc.com/connect v1.16.0 h1:rdtfQjZ0OyFkWPTegBNcH7cwquGAN1WzyJy80oFNibg= +connectrpc.com/connect v1.16.0/go.mod h1:XpZAduBQUySsb4/KO5JffORVkDI4B6/EYPi7N8xpNZw= +connectrpc.com/otelconnect v0.7.0 h1:ZH55ZZtcJOTKWWLy3qmL4Pam4RzRWBJFOqTPyAqCXkY= +connectrpc.com/otelconnect v0.7.0/go.mod h1:Bt2ivBymHZHqxvo4HkJ0EwHuUzQN6k2l0oH+mp/8nwc= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Antonboom/errname v0.1.7 h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako= -github.com/Antonboom/errname v0.1.7/go.mod h1:g0ONh16msHIPgJSGsecu1G/dcF2hlYR/0SddnIAGavU= -github.com/Antonboom/nilnil v0.1.1 h1:PHhrh5ANKFWRBh7TdYmyyq2gyT2lotnvFvvFbylF81Q= -github.com/Antonboom/nilnil v0.1.1/go.mod h1:L1jBqoWM7AOeTD+tSquifKSesRHs4ZdaxvZR+xdJEaI= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/4meepo/tagalign v1.3.3 h1:ZsOxcwGD/jP4U/aw7qeWu58i7dwYemfy5Y+IF1ACoNw= +github.com/4meepo/tagalign v1.3.3/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE= +github.com/Abirdcfly/dupword v0.0.13 h1:SMS17YXypwP000fA7Lr+kfyBQyW14tTT+nRv9ASwUUo= +github.com/Abirdcfly/dupword v0.0.13/go.mod h1:Ut6Ue2KgF/kCOawpW4LnExT+xZLQviJPE4klBPMK/5Y= +github.com/Antonboom/errname v0.1.12 h1:oh9ak2zUtsLp5oaEd/erjB4GPu9w19NyoIskZClDcQY= +github.com/Antonboom/errname v0.1.12/go.mod h1:bK7todrzvlaZoQagP1orKzWXv59X/x0W0Io2XT1Ssro= +github.com/Antonboom/nilnil v0.1.7 h1:ofgL+BA7vlA1K2wNQOsHzLJ2Pw5B5DpWRLdDAVvvTow= +github.com/Antonboom/nilnil v0.1.7/go.mod h1:TP+ScQWVEq0eSIxqU8CbdT5DFWoHp0MbP+KMUO1BKYQ= +github.com/Antonboom/testifylint v0.2.3 h1:MFq9zyL+rIVpsvLX4vDPLojgN7qODzWsrnftNX2Qh60= +github.com/Antonboom/testifylint v0.2.3/go.mod h1:IYaXaOX9NbfAyO+Y04nfjGI8wDemC1rUyM/cYolz018= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0= -github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= +github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/DataDog/zstd v1.4.5 h1:EndNeuB0l9syBZhut0wns3gV1hL8zX8LIu6ZiVHWLIQ= +github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.2 h1:DGdS4FlsdM6OkluXOhgkvwx05ZjD3Idm9WqtYnOmSuY= -github.com/GaijinEntertainment/go-exhaustruct/v2 v2.2.2/go.mod h1:xj0D2jwLdp6tOKLheyZCsfL0nz8DaicmJxSwj3VcHtY= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 h1:3ZBs7LAezy8gh0uECsA6CGU43FF3zsx5f4eah5FxTMA= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0/go.mod h1:rZLTje5A9kFBe0pzhpe2TdhRniBF++PRHQuRpR8esVc= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA= -github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/OpenPeeDeeP/depguard v1.1.0 h1:pjK9nLPS1FwQYGGpPxoMYpe7qACHOhAWQMQzV71i49o= -github.com/OpenPeeDeeP/depguard v1.1.0/go.mod h1:JtAMzWkmFEzDPyAd+W0NHl1lvpQKTvT9jnRVsohBKpc= +github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY= +github.com/OpenPeeDeeP/depguard/v2 v2.1.0/go.mod h1:PUBgk35fX4i7JDmwzlJwJ+GMe6NfO1723wmJMgPThNQ= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I= github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= +github.com/alecthomas/assert/v2 v2.2.2 h1:Z/iVC0xZfWTaFNE6bA3z07T86hd45Xe2eLt6WVy2bbk= +github.com/alecthomas/assert/v2 v2.2.2/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= +github.com/alecthomas/go-check-sumtype v0.1.3 h1:M+tqMxB68hcgccRXBMVCPI4UJ+QUfdSx0xdbypKCqA8= +github.com/alecthomas/go-check-sumtype v0.1.3/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= +github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexkohler/nakedret/v2 v2.0.2 h1:qnXuZNvv3/AxkAb22q/sEsEpcA99YxLFACDtEw9TPxE= +github.com/alexkohler/nakedret/v2 v2.0.2/go.mod h1:2b8Gkk0GsOrqQv/gPWjNLDSKwG8I5moSXG1K4VIBcTQ= github.com/alexkohler/prealloc v1.0.0 h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw= github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQRnw= github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= +github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= +github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/ashanbrown/forbidigo v1.3.0 h1:VkYIwb/xxdireGAdJNZoo24O4lmnEWkactplBlWTShc= -github.com/ashanbrown/forbidigo v1.3.0/go.mod h1:vVW7PEdqEFqapJe95xHkTfB1+XvZXBFg8t0sG2FIxmI= +github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY= +github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s= github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= -github.com/bkielbasa/cyclop v1.2.0/go.mod h1:qOI0yy6A7dYC4Zgsa72Ppm9kONl0RoIlPbzot9mhmeI= +github.com/bkielbasa/cyclop v1.2.1 h1:AeF71HZDob1P2/pRm1so9cd1alZnrpyc4q2uP2l0gJY= +github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= github.com/blizzy78/varnamelen v0.8.0 h1:oqSblyuQvFsW1hbBHh1zfwrKe3kcSj0rnXkKzsQ089M= github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= -github.com/bombsimon/wsl/v3 v3.3.0 h1:Mka/+kRLoQJq7g2rggtgQsjuI/K5Efd87WX96EWFxjM= -github.com/bombsimon/wsl/v3 v3.3.0/go.mod h1:st10JtZYLE4D5sC7b8xV4zTKZwAQjCH/Hy2Pm1FNZIc= -github.com/breml/bidichk v0.2.3 h1:qe6ggxpTfA8E75hdjWPZ581sY3a2lnl0IRxLQFelECI= -github.com/breml/bidichk v0.2.3/go.mod h1:8u2C6DnAy0g2cEq+k/A2+tr9O1s+vHGxWn0LTc70T2A= -github.com/breml/errchkjson v0.3.0 h1:YdDqhfqMT+I1vIxPSas44P+9Z9HzJwCeAzjB8PxP1xw= -github.com/breml/errchkjson v0.3.0/go.mod h1:9Cogkyv9gcT8HREpzi3TiqBxCqDzo8awa92zSDFcofU= +github.com/bombsimon/wsl/v3 v3.4.0 h1:RkSxjT3tmlptwfgEgTgU+KYKLI35p/tviNXNXiL2aNU= +github.com/bombsimon/wsl/v3 v3.4.0/go.mod h1:KkIB+TXkqy6MvK9BDZVbZxKNYsE1/oLRJbIFtf14qqo= +github.com/breml/bidichk v0.2.7 h1:dAkKQPLl/Qrk7hnP6P+E0xOodrq8Us7+U0o4UBOAlQY= +github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= +github.com/breml/errchkjson v0.3.6 h1:VLhVkqSBH96AvXEyclMR37rZslRrY2kcyq+31HCsVrA= +github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.22.1 h1:CnwP9LM/M9xuRrGSCGeMVs9iv09uMqwsVX7EeIpgV2c= github.com/btcsuite/btcd v0.22.1/go.mod h1:wqgTSL29+50LRkmOVknEdmt8ZojIzhuWvgu/iptuN7Y= github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= @@ -134,31 +144,50 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/bufbuild/buf v1.7.0 h1:uWRjhIXcrWkzIkA5TqXGyJbF51VW54QJsQZ3nwaes5Q= -github.com/bufbuild/buf v1.7.0/go.mod h1:Go40fMAF46PnPLC7jJgTQhAI95pmC0+VtxFKVC0qLq0= -github.com/bufbuild/connect-go v0.2.0 h1:WuMI/jLiJIhysHWvLWlxRozV67mGjCOUuDSl/lkDVic= -github.com/bufbuild/connect-go v0.2.0/go.mod h1:4efZ2eXFENwd4p7tuLaL9m0qtTsCOzuBvrohvRGevDM= -github.com/butuzov/ireturn v0.1.1 h1:QvrO2QF2+/Cx1WA/vETCIYBKtRjc30vesdoPUNo1EbY= -github.com/butuzov/ireturn v0.1.1/go.mod h1:Wh6Zl3IMtTpaIKbmwzqi6olnM9ptYQxxVacMsOEFPoc= +github.com/bufbuild/buf v1.30.1 h1:QFtanwsXodoGFAwzXFXGXpzBkb7N2u8ZDyA3jWB4Pbs= +github.com/bufbuild/buf v1.30.1/go.mod h1:7W8DJnj76wQa55EA3z2CmDxS0/nsHh8FqtE00dyDAdA= +github.com/bufbuild/protocompile v0.9.0 h1:DI8qLG5PEO0Mu1Oj51YFPqtx6I3qYXUAhJVJ/IzAVl0= +github.com/bufbuild/protocompile v0.9.0/go.mod h1:s89m1O8CqSYpyE/YaSGtg1r1YFMF5nLTwh4vlj6O444= +github.com/bufbuild/protovalidate-go v0.6.0 h1:Jgs1kFuZ2LHvvdj8SpCLA1W/+pXS8QSM3F/E2l3InPY= +github.com/bufbuild/protovalidate-go v0.6.0/go.mod h1:1LamgoYHZ2NdIQH0XGczGTc6Z8YrTHjcJVmiBaar4t4= +github.com/bufbuild/protoyaml-go v0.1.8 h1:X9QDLfl9uEllh4gsXUGqPanZYCOKzd92uniRtW2OnAQ= +github.com/bufbuild/protoyaml-go v0.1.8/go.mod h1:R8vE2+l49bSiIExP4VJpxOXleHE+FDzZ6HVxr3cYunw= +github.com/butuzov/ireturn v0.2.2 h1:jWI36dxXwVrI+RnXDwux2IZOewpmfv930OuIRfaBUJ0= +github.com/butuzov/ireturn v0.2.2/go.mod h1:RfGHUvvAuFFxoHKf4Z8Yxuh6OjlCw1KvR2zM1NFHeBk= +github.com/butuzov/mirror v1.1.0 h1:ZqX54gBVMXu78QLoiqdwpl2mgmoOJTk7s4p4o+0avZI= +github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7GF45AE= +github.com/catenacyber/perfsprint v0.2.0 h1:azOocHLscPjqXVJ7Mf14Zjlkn4uNua0+Hcg1wTR6vUo= +github.com/catenacyber/perfsprint v0.2.0/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/ccojocar/zxcvbn-go v1.0.1 h1:+sxrANSCj6CdadkcMnvde/GWU1vZiiXRbqYSCalV4/4= +github.com/ccojocar/zxcvbn-go v1.0.1/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/charithe/durationcheck v0.0.9 h1:mPP4ucLrf/rKZiIG/a9IPXHGlh8p4CzgpyTy6EEutYk= -github.com/charithe/durationcheck v0.0.9/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= -github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4 h1:tFXjAxje9thrTF4h57Ckik+scJjTWdwAtZqZPtOT48M= -github.com/chavacava/garif v0.0.0-20220316182200-5cad0b5181d4/go.mod h1:W8EnPSQ8Nv4fUjc/v1/8tHFqhuOJXnRub0dTfuAQktU= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/charithe/durationcheck v0.0.10 h1:wgw73BiocdBDQPik+zcEoBG/ob8uyBHf2iyoHGPf5w4= +github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= +github.com/chavacava/garif v0.1.0 h1:2JHa3hbYf5D9dsgseMKAmc/MZ109otzgNFk5s87H9Pc= +github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= -github.com/chigopher/pathlib v0.15.0 h1:1pg96WL3iC1/YyWV4UJSl3E0GBf4B+h5amBtsbAAieY= -github.com/chigopher/pathlib v0.15.0/go.mod h1:3+YPPV21mU9vyw8Mjp+F33CyCfE6iOzinpiqBcccv7I= +github.com/chigopher/pathlib v0.19.1 h1:RoLlUJc0CqBGwq239cilyhxPNLXTK+HXoASGyGznx5A= +github.com/chigopher/pathlib v0.19.1/go.mod h1:tzC1dZLW8o33UQpWkNkhvPwL5n4yyFRFm/jL1YGWFvY= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -168,42 +197,36 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/containerd/containerd v1.6.6 h1:xJNPhbrmz8xAMDNoVjHy9YHtWwEQNS+CDkcIRh7t8Y0= -github.com/containerd/containerd v1.6.6/go.mod h1:ZoP1geJldzCVY3Tonoz7b1IXk8rIX0Nltt5QE4OMNk0= github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= -github.com/containerd/typeurl v1.0.2 h1:Chlt8zIieDbzQFzXzAeBEF92KhExuE4p9p92/QmY7aY= -github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU= +github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4 h1:wfIWP927BUkWJb2NmU/kNDYIBTh/ziUX91+lVfRxZq4= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creachadair/atomicfile v0.2.6 h1:FgYxYvGcqREApTY8Nxg8msM6P/KVKK3ob5h9FaRUTNg= github.com/creachadair/atomicfile v0.2.6/go.mod h1:BRq8Une6ckFneYXZQ+kO7p1ZZP3I2fzVzf28JxrIkBc= github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= github.com/creachadair/tomledit v0.0.23 h1:ohYJjMsxwzj4dDzKaBWFbWH5J+3LO/8CYnlVY+baBWA= github.com/creachadair/tomledit v0.0.23/go.mod h1:cIu/4x5L855oSRejIqr+WRFh+mv9g4fWLiUFaApYn/Y= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/daixiang0/gci v0.6.2 h1:TXCP5RqjE/UupXO+p33MEhqdv7QxjKGw5MVkt9ATiMs= -github.com/daixiang0/gci v0.6.2/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= +github.com/daixiang0/gci v0.11.2 h1:Oji+oPsp3bQ6bNNgX30NBAVT18P4uBH4sRZnlOlTj7Y= +github.com/daixiang0/gci v0.11.2/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI= github.com/dashpay/bls-signatures/go-bindings v0.0.0-20230207105415-06df92693ac8 h1:v4K3CiDoFY1gjcWL/scRcwzyjBwh8TVG3ek8cWolK1g= github.com/dashpay/bls-signatures/go-bindings v0.0.0-20230207105415-06df92693ac8/go.mod h1:auvGS60NBZ+a21aCCQh366PdsjDvHinsCvl28VrYPu4= github.com/dashpay/dashd-go v0.24.1 h1:w+F5pDt+fqud4QQM/O9sAJihbQ3oswO8DKOmDS/pcNw= @@ -212,40 +235,44 @@ github.com/dashpay/dashd-go/btcec/v2 v2.1.0 h1:fXwlLf5H+TtgHxjGMU74NesKzk6NisjKM github.com/dashpay/dashd-go/btcec/v2 v2.1.0/go.mod h1:1i8XtxdOmvK6mYEUCneVXTzFbrCUw3wq1u91j8gvsns= github.com/dashpay/dashd-go/btcutil v1.2.0 h1:YMq7L0V0au5bbphIhpsBBc+nfOZqU+gJ4pkgRZB7Eiw= github.com/dashpay/dashd-go/btcutil v1.2.0/go.mod h1:7UHoqUh3LY3OI4mEcogx0CnL3rtzDQyoqvsOCZZtvzE= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= github.com/denis-tingaikin/go-header v0.4.3 h1:tEaZKAlqql6SKCY++utLmkPLd6K8IBM20Ha7UVm+mtU= github.com/denis-tingaikin/go-header v0.4.3/go.mod h1:0wOCWuN71D5qIgE2nz9KrKmuYBAC2Mra5RassOIQ2/c= github.com/denisenkom/go-mssqldb v0.12.0 h1:VtrkII767ttSPNRfFekePK3sctr+joXgO58stqQbtUA= +github.com/denisenkom/go-mssqldb v0.12.0/go.mod h1:iiK0YP1ZeepvmBQk/QpLEhhTNJgfzrpArPY/aFvc9yU= github.com/dgraph-io/badger/v2 v2.2007.2 h1:EjjK0KqwaFMlPin1ajhP943VPENHJdEz1KLIegjaI3k= github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/didip/tollbooth/v6 v6.0.1 h1:QvLvRpB1G2bzKvkRze0muMUBlGN9H1z7tJ4DH4ypWOU= github.com/didip/tollbooth/v6 v6.0.1/go.mod h1:j2pKs+JQ5PvU/K4jFnrnwntrmfUbYLJE5oSdxR37FD0= github.com/didip/tollbooth_chi v0.0.0-20200524181329-8b84cd7183d9 h1:gTh8fKuI/yLqQtZEPlDX3ZGsiTPZIe0ADHsxXSbwO1I= github.com/didip/tollbooth_chi v0.0.0-20200524181329-8b84cd7183d9/go.mod h1:YWyIfq3y4ArRfWZ9XksmuusP+7Mad+T0iFZ0kv0XG/M= -github.com/docker/cli v20.10.17+incompatible h1:eO2KS7ZFeov5UJeaDmIs1NFEDRf32PaqRpvoEkKBy5M= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE= -github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/cli v26.0.0+incompatible h1:90BKrx1a1HKYpSnnBFR6AgDq/FqkHxwlUyzJVPxD30I= +github.com/docker/cli v26.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= +github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v26.0.0+incompatible h1:Ng2qi+gdKADUa/VM+6b6YaY2nlZhk/lVJiKR/2bMudU= +github.com/docker/docker v26.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.8.1 h1:j/eKUktUltBtMzKqmfLB0PAgqYyMHOp5vfsD1807oKo= +github.com/docker/docker-credential-helpers v0.8.1/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M= +github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -253,48 +280,56 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.4 h1:gVPz/FMfvh57HdSJQyvBtF00j8JU4zdyUgIUNhlgg0A= +github.com/envoyproxy/protoc-gen-validate v1.0.4/go.mod h1:qys6tmnRsYrQqIhm2bvKZH4Blx/1gTIZ2UKVY1M+Yew= github.com/esimonov/ifshort v1.0.4 h1:6SID4yGWfRae/M7hkVDVVyppy8q/v9OuxNdmjLQStBA= github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1 h1:htFueZyVeE1XNnMEfbqp5r67qAN/4r6ya1ysq8Q+Zcw= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= +github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88= +github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/firefart/nonamedreturns v1.0.4 h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y= github.com/firefart/nonamedreturns v1.0.4/go.mod h1:TDhe/tjI1BXo48CmYbUduTV7BdIga8MAO/xbKdcVsGI= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY= +github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghostiam/protogetter v0.2.3 h1:qdv2pzo3BpLqezwqfGDLZ+nHEYmc5bUpIdsMbBVwMjw= +github.com/ghostiam/protogetter v0.2.3/go.mod h1:KmNLOsy1v04hKbvZs8EfGI1fk39AgTdRDxWNYPfXVc4= github.com/go-chi/chi v4.1.1+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= -github.com/go-chi/chi/v5 v5.0.7 h1:rDTPXLDHGATaeHvVlLcR4Qe0zftYethFucbjVQ1PxU8= -github.com/go-chi/chi/v5 v5.0.7/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= +github.com/go-chi/chi/v5 v5.0.12 h1:9euLV5sTrTNTRUU9POmDUvfxyj6LAABLUcEWO+JJb4s= +github.com/go-chi/chi/v5 v5.0.12/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-chi/render v1.0.1 h1:4/5tis2cKaNdnv9zFLfXzcquC9HbeZgCnxGnKrltBS8= github.com/go-chi/render v1.0.1/go.mod h1:pq4Rr7HbnsdaeHagklXub+p6Wd16Af5l9koip1OvJns= -github.com/go-critic/go-critic v0.6.3 h1:abibh5XYBTASawfTQ0rA7dVtQT+6KzpGqb/J+DxRDaw= -github.com/go-critic/go-critic v0.6.3/go.mod h1:c6b3ZP1MQ7o6lPR7Rv3lEf7pYQUmAcx8ABHgdZCQt/k= +github.com/go-critic/go-critic v0.9.0 h1:Pmys9qvU3pSML/3GEQ2Xd9RZ/ip+aXHKILuxczKGV/U= +github.com/go-critic/go-critic v0.9.0/go.mod h1:5P8tdXL7m/6qnyG6oRAlYLORvoXH0WDypYgAEmagT40= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -309,8 +344,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-pkgz/expirable-cache v0.0.3 h1:rTh6qNPp78z0bQE6HDhXBHUwqnV9i09Vm6dksJLXQDc= @@ -319,55 +354,55 @@ github.com/go-pkgz/jrpc v0.2.0 h1:CLy/eZyekjraVrxZV18N2R1mYLMJ/nWrgdfyIOGPY/E= github.com/go-pkgz/jrpc v0.2.0/go.mod h1:wd8vtQ4CgtCnuqua6x2b1SKIgv0VSOh5Dn0uUITbiUE= github.com/go-pkgz/rest v1.5.0 h1:C8SxXcXza4GiUUAn/95iCkvoIrGbS30qpwK19iqlrWQ= github.com/go-pkgz/rest v1.5.0/go.mod h1:nQaM3RhSTUAmbBZWY4hfe4buyeC9VckvhoCktiQXJxI= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.1 h1:JbSszi42Jiqu36Gnf363HWS9MTEAz67vTQLponh3Moc= -github.com/go-toolsmith/astequal v1.0.1/go.mod h1:4oGA3EZXTVItV/ipGiOx7NWkY5veFfcsOJVS2YxltLw= -github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5 h1:eD9POs68PHkwrx7hAB78z1cb6PfGq/jyWn3wJywsH1o= -github.com/go-toolsmith/pkgload v1.0.2-0.20220101231613-e814995d17c5/go.mod h1:3NAwwmD4uY/yggRxoEjk/S00MIV3A+H7rrE3i87eYxM= -github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-toolsmith/astcast v1.1.0 h1:+JN9xZV1A+Re+95pgnMgDboWNVnIMMQXwfBwLRPgSC8= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0 h1:YGwBN0WM+ekI/6SS6+52zLDEf8Yvp3n2seZITCUBt5s= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.0.3/go.mod h1:9Ai4UglvtR+4up+bAD4+hCj7iTo4m/OXVTSLnCyTAx4= +github.com/go-toolsmith/astequal v1.1.0 h1:kHKm1AWqClYn15R0K1KKE4RG614D46n+nqUQ06E1dTw= +github.com/go-toolsmith/astequal v1.1.0/go.mod h1:sedf7VIdCL22LD8qIvv7Nn9MuWJruQA/ysswh64lffQ= +github.com/go-toolsmith/astfmt v1.1.0 h1:iJVPDPp6/7AaeLJEruMsBUlOYCmvg0MoCfJprsOmcco= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0 h1:dXPuCl6u2llURjdPLLDxJeZInAeZ0/eZwFJmqZMnpQA= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/pkgload v1.2.2 h1:0CtmHq/02QhxcF7E9N5LIFcYFsMR5rdovfqTtRKkgIk= +github.com/go-toolsmith/pkgload v1.2.2/go.mod h1:R2hxLNRKuAsiXCo2i5J6ZQPhnPMOVtU+f0arbFPWCus= github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.2 h1:8xdsa1+FSIH/RhEkgnD1j2CJOy5mNllW1Q9tRiYwvlk= -github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b h1:khEcpUM4yFcxg4/FHQWkvVRmgijNXRfzkIDHh23ggEo= -github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/go-toolsmith/strparse v1.1.0 h1:GAioeZUK9TGxnLS+qfdqNbA4z0SSm5zVNtCQiyP2Bvw= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0 h1:fIRYDyF+JywLfqzyhdiHzRop/GQDxxNhLGQ6gFUNHus= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-xmlfmt/xmlfmt v1.1.2 h1:Nea7b4icn8s57fTx1M5AI4qQT5HEM3rVUO8MuE6g80U= +github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= -github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= -github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M= +github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188 h1:+eHOFJl1BaXrQxKX+T06f78590z4qA2ZzBTqahsKSE4= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang-sql/sqlexp v0.0.0-20170517235910-f1bb20e5a188/go.mod h1:vXjM/+wXQnTPR4KqTKDgJukSZ6amVRtWMPEjE6sQoK8= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -375,7 +410,6 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -392,37 +426,39 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= +github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6J5HIP8ZtyMdiDscjMLfRBSPuzVVeo= github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= -github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.48.0 h1:hRiBNk9iRqdAKMa06ntfEiLyza1/3IE9rHLNJaek4a8= -github.com/golangci/golangci-lint v1.48.0/go.mod h1:5N+oxduCho+7yuccW69upg/O7cxjfR/d+IQeiNxGmKM= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e h1:ULcKCDV1LOZPFxGZaA6TlQbiM3J2GCPnkx/bGF6sX/g= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= +github.com/golangci/golangci-lint v1.55.2 h1:yllEIsSJ7MtlDBwDJ9IMBkyEUz2fYE0b5B8IUgO1oP8= +github.com/golangci/golangci-lint v1.55.2/go.mod h1:H60CZ0fuqoTwlTvnbyjhpZPWp7KmsjwV2yupIMiMXbM= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= -github.com/golangci/misspell v0.3.5/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ= -github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs= +github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g= +github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI= +github.com/golangci/revgrep v0.5.2 h1:EndcWoRhcnfj2NHQ+28hyuXpLMF+dQmCN+YaeeIl4FU= +github.com/golangci/revgrep v0.5.2/go.mod h1:bjAMA+Sh/QUfTDcHzxfyHxr4xKvllVr/0sCv2e7jJHA= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= +github.com/google/cel-go v0.20.1 h1:nDx9r8S3L4pE61eDdt8igGj8rf5kjYR3ILxWIpWNi84= +github.com/google/cel-go v0.20.1/go.mod h1:kWcIzTsPX0zmQ+H3TirHstLLf9ep5QTsZBN9u4dOYLg= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -431,15 +467,19 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.19.1 h1:yMQ62Al6/V0Z7CqIrrS1iYoA5/oQCm88DeNujc7C1KY= +github.com/google/go-containerregistry v0.19.1/go.mod h1:YCMFNQeeXeLF+dnhhWkqDItx/JSkH01j1Kis4PsjzFI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -453,40 +493,29 @@ github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg= +github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= +github.com/google/pprof v0.0.0-20240327155427-868f304927ed h1:n8QtJTrwsv3P7dNxPaMeNkMcxvUpqocsHLr8iDLGlQI= +github.com/google/pprof v0.0.0-20240327155427-868f304927ed/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/gookit/color v1.5.1/go.mod h1:wZFzea4X8qN6vHOSP2apMb4/+w/orMznEzYsIHPaqKM= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8 h1:PVRE9d4AQKmbelZ7emNig1+NT27DUmKZn5qXxfio54U= -github.com/gordonklaus/ineffassign v0.0.0-20210914165742-4cc7213b9bc8/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= -github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= -github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 h1:mrEEilTAUmaAORhssPPkxj84TsHrPMLBGW2Z4SoTxm8= +github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.1.0/go.mod h1:dMhHRU9KTiDcuLGdy87/2gTR8WruwYZrKdRq9m1O6uw= github.com/gostaticanalysis/analysisutil v0.7.1 h1:ZMCjoue3DtDWQ5WyU16YbjbQEQ3VuzwxALrpYd+HeKk= github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= -github.com/gostaticanalysis/comment v1.3.0/go.mod h1:xMicKDx7XRXYdVwY9f9wQpDJVnqWxw9wCauCMKp+IBI= github.com/gostaticanalysis/comment v1.4.1/go.mod h1:ih6ZxzTHLdadaiSnF5WY3dxUoXfXAlTaRzuaNDlSado= github.com/gostaticanalysis/comment v1.4.2 h1:hlnx5+S2fY9Zo9ePo4AhgYsYHbM2+eAv8m/s1JiCd6Q= github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= @@ -496,16 +525,14 @@ github.com/gostaticanalysis/nilerr v0.1.1 h1:ThE+hJP0fEp4zWLkWHWcRyI2Od0p7DlgYG3 github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= +github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= github.com/gotestyourself/gotestyourself v2.2.0+incompatible h1:AQwinXlbQR2HvPjQZOmDhRqsv5mZf+Jb1RnSLxcqZcI= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= @@ -517,202 +544,160 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w= +github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a h1:d4+I1YEKVmWZrgkt6jpXBnLgV2ZjO0YxEtLDdfIZfH4= -github.com/jdxcode/netrc v0.0.0-20210204082910-926c7f70242a/go.mod h1:Zi/ZFkEqFHTm7qkjyNJjaWH4LQA9LQhGJyF0lTYGpxw= +github.com/jdx/go-netrc v1.0.0 h1:QbLMLyCZGj0NA8glAhxUpf1zDg6cxnWgMBbjq40W0gQ= +github.com/jdx/go-netrc v1.0.0/go.mod h1:Gh9eFQJnoTNIRHXl2j5bJXA1u84hQWJWgGh569zF3v8= github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jgautheron/goconst v1.5.1 h1:HxVbL1MhydKs8R8n/HE5NPvzfaYmQJA3o879lE4+WcM= -github.com/jgautheron/goconst v1.5.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= -github.com/jhump/gopoet v0.0.0-20190322174617-17282ff210b3/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/gopoet v0.1.0/go.mod h1:me9yfT6IJSlOL3FCfrg+L6yzUEZ+5jW6WHt4Sk+UPUI= -github.com/jhump/goprotoc v0.5.0/go.mod h1:VrbvcYrQOrTi3i0Vf+m+oqQWk9l72mjkJCYo7UvLHRQ= -github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f h1:BNuUg9k2EiJmlMwjoef3e8vZLHplbVw6DrjGFjLL+Yo= -github.com/jhump/protocompile v0.0.0-20220216033700-d705409f108f/go.mod h1:qr2b5kx4HbFS7/g4uYO5qv9ei8303JMsC7ESbYiqr2Q= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jhump/protoreflect v1.11.0/go.mod h1:U7aMIjN0NWq9swDP7xDdoMfRHb35uiuTd3Z9nFXJf5E= -github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b h1:izTof8BKh/nE1wrKOrloNA5q4odOarjf+Xpe+4qow98= -github.com/jhump/protoreflect v1.12.1-0.20220721211354-060cc04fc18b/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= +github.com/jgautheron/goconst v1.6.0 h1:gbMLWKRMkzAc6kYsQL6/TxaoBUg3Jm9LSF/Ih1ADWGA= +github.com/jgautheron/goconst v1.6.0/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.15.6 h1:WMYJbw2Wo+KOWwZFvgY0jMoVHM6i4XIvRs2RcBj5VmI= +github.com/jhump/protoreflect v1.15.6/go.mod h1:jCHoyYQIJnaabEYnbGwyo9hUqfyUMTbJw/tAut5t97E= github.com/jingyugao/rowserrcheck v1.1.1 h1:zibz55j/MJtLsjP1OF4bSdgXxwL1b+Vn7Tjzq7gFzUs= github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= github.com/jinzhu/copier v0.3.5 h1:GlvfUwHk62RokgqVNvYsku0TATCF7bAHVwEXoBh3iJg= github.com/jinzhu/copier v0.3.5/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af h1:KA9BjwUk7KlCh6S9EAGWBt1oExIUv9WyNCiRz5amv48= github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= -github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jonboulle/clockwork v0.3.0 h1:9BSCMi8C+0qdApAp4auwX0RkLGUjs956h0EkuQymUhg= github.com/jonboulle/clockwork v0.3.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julz/importas v0.1.0 h1:F78HnrsjY3cR7j0etXy5+TU1Zuy7Xt08X/1aJnH5xXY= github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/errcheck v1.6.2 h1:uGQ9xI8/pgc9iOoCe7kWQgRE6SBTrCGmTSf0LrEtY7c= -github.com/kisielk/errcheck v1.6.2/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= +github.com/kisielk/errcheck v1.6.3 h1:dEKh+GLHcWm2oN34nMvDzn1sqI0i0WxPvrgiJA5JuM8= +github.com/kisielk/errcheck v1.6.3/go.mod h1:nXw/i/MfnvRHqXa7XXmQMUB0oNFGuBrNI8d8NLy0LPw= github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kkHAIKE/contextcheck v1.1.4 h1:B6zAaLhOEEcjvUgIYEqystmnFk1Oemn8bvJhbt0GMb8= +github.com/kkHAIKE/contextcheck v1.1.4/go.mod h1:1+i/gWqokIa+dm31mqGLZhZJ7Uh44DJGZVmr6QRBNJg= github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.15.10 h1:Ai8UzuomSCDw90e1qNMtb15msBXsNpH6gzkkENQNcJo= -github.com/klauspost/compress v1.15.10/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= -github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= -github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= +github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg= +github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= +github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= -github.com/kunwardeep/paralleltest v1.0.6 h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g= -github.com/kunwardeep/paralleltest v1.0.6/go.mod h1:Y0Y0XISdZM5IKm3TREQMZ6iteqn1YuwCsJO/0kL9Zes= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= -github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= +github.com/kunwardeep/paralleltest v1.0.8 h1:Ul2KsqtzFxTlSU7IP0JusWlLiNqQaloB9vguyjbE558= +github.com/kunwardeep/paralleltest v1.0.8/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= +github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ= +github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA= github.com/ldez/gomoddirectives v0.2.3/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= -github.com/ldez/tagliatelle v0.3.1 h1:3BqVVlReVUZwafJUwQ+oxbx2BEX2vUG4Yu/NOfMiKiM= -github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/leonklingele/grouper v1.1.0 h1:tC2y/ygPbMFSBOs3DcyaEMKnnwH7eYKzohOtRrf0SAg= -github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= -github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/ldez/tagliatelle v0.5.0 h1:epgfuYt9v0CG3fms0pEgIMNPuFf/LpPIfjk4kyqSioo= +github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/ledongthuc/pdf v0.0.0-20220302134840-0c2507a12d80/go.mod h1:imJHygn/1yfhB7XSJJKlFZKl/J+dCPAknuiaGOshXAs= +github.com/leonklingele/grouper v1.1.1 h1:suWXRU57D4/Enn6pXR0QVqqWWrnJ9Osrz+5rjt8ivzU= +github.com/leonklingele/grouper v1.1.1/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/lufeee/execinquery v1.2.1 h1:hf0Ems4SHcUGBxpGN7Jz78z1ppVkP/837ZlETPCEtOM= github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/macabu/inamedparam v0.1.2 h1:RR5cnayM6Q7cDhQol32DE2BGAPGMnffJ31LFE+UklaU= +github.com/macabu/inamedparam v0.1.2/go.mod h1:Xg25QvY7IBRl1KLPV9Rbml8JOMZtF/iAkNkmV7eQgjw= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/maratori/testpackage v1.1.0 h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q= -github.com/maratori/testpackage v1.1.0/go.mod h1:PeAhzU8qkCwdGEMTEupsHJNlQu2gZopMC6RjbhmHeDc= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951 h1:pWxk9e//NbPwfxat7RXkts09K+dEBJWakUWwICVqYbA= -github.com/matoous/godox v0.0.0-20210227103229-6504466cf951/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/maratori/testableexamples v1.0.0 h1:dU5alXRrD8WKSjOUnmJZuzdxWOEQ57+7s93SLMxb2vI= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.1 h1:S58XVV5AD7HADMmD0fNnziNHqKvSdDuEKdPD1rNTU04= +github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26 h1:gWg6ZQ4JhDfJPqlo2srm/LN17lpybq15AryXIRcWYLE= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= github.com/matryer/is v1.4.0 h1:sosSmIWwkYITGrxZ25ULNDeKiMNzFSr4V/eqBQP0PeE= github.com/matryer/is v1.4.0/go.mod h1:8I/i5uYgLzgsgEloJE1U6xx5HkBQpAZvepWuujKwMRU= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= +github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.9 h1:10HX2Td0ocZpYEjhilsuo6WWtUqttj2Kb0KtD86/KYA= +github.com/mattn/go-sqlite3 v1.14.9/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwgOdMUQePUo= github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= -github.com/mgechev/dots v0.0.0-20210922191527-e955255bf517/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.2.1 h1:GjFml7ZsoR0IrQ2E2YIvWFNS5GPDV7xNwvA5GM1HZC4= -github.com/mgechev/revive v1.2.1/go.mod h1:+Ro3wqY4vakcYNtkBWdZC7dBg1xSB6sp054wWwmeFm0= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mgechev/revive v1.3.4 h1:k/tO3XTaWY4DEHal9tWBkkUMJYO/dLDVyMmAQxmIMDc= +github.com/mgechev/revive v1.3.4/go.mod h1:W+pZCMu9qj8Uhfs1iJMQsEFLRozUfvwFwqVvRbSNLVw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/buildkit v0.10.3 h1:/dGykD8FW+H4p++q5+KqKEo6gAkYKyBQHdawdjVwVAU= -github.com/moby/buildkit v0.10.3/go.mod h1:jxeOuly98l9gWHai0Ojrbnczrk/rf+o9/JqNhY+UCSo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= -github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= +github.com/moricho/tparallel v0.3.1 h1:fQKD4U1wRMAYNngDonW5XupoB/ZGJHdpzrWqgyg9krA= +github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= github.com/mroth/weightedrand v0.4.1 h1:rHcbUBopmi/3x4nnrvwGJBhX9d0vk+KgoLUZeDP6YyI= github.com/mroth/weightedrand v0.4.1/go.mod h1:3p2SIcC8al1YMzGhAIoXD+r9olo/g/cdJgAD905gyNE= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= github.com/nakabonne/nestif v0.3.1 h1:wm28nZjhQY5HyYPx+weN3Q65k6ilSBxDb8v5S81B81U= github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6FxaNu/BnU2OAaLF86eTVhP2hjTB6iMvItA= -github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.8.1 h1:0QKNascWv9qIHY7zRoZSxeRr6kuk5aAT3YXLTiDmjTo= -github.com/nishanths/exhaustive v0.8.1/go.mod h1:qj+zJJUgJ76tR92+25+03oYUhzF4R7/2Wk7fGTfCHmg= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= +github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8pzda2l0= +github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4= github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk= github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.14.1 h1:khx0CqR5U4ghsscjJ+lZVthp3zjIFytRXPTaQ/TMiyA= +github.com/nunnatsa/ginkgolinter v0.14.1/go.mod h1:nY0pafUSst7v7F637e7fymaMlQqI9c0Wka2fGsDkzWg= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -720,41 +705,38 @@ github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae h1:Fa github.com/oasisprotocol/curve25519-voi v0.0.0-20220708102147-0a8a51822cae/go.mod h1:hVoHR2EVESiICEMbg137etN/Lx+lSrHPTD39Z/uE+2s= github.com/oasisprotocol/oasis-core/go v0.2202.5 h1:SwT3XIKz4W5gYZd9I2fe+4qGPYaVvqG0kF8jsancd4E= github.com/oasisprotocol/oasis-core/go v0.2202.5/go.mod h1:hKUgtuPPq371HokUQL5ashT5MZLZxK/VkWNKRLb9m+w= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4 h1:GNapqRSid3zijZ9H77KrgVG4/8KqiyRsxcSxe+7ApXY= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.28.1 h1:MijcGUbfYuznzK/5R4CPNoUP/9Xvuo20sXfEm6XxoTA= +github.com/onsi/gomega v1.28.1/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec= -github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= +github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/ory/dockertest/v3 v3.9.1 h1:v4dkG+dlu76goxMiTT2j8zV7s4oPPEppKT8K8p2f1kY= -github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= +github.com/ory/dockertest/v3 v3.9.1/go.mod h1:42Ir9hmvaAPm0Mgibk6mBPi7SFvTXxEcnztDYOJ//uM= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= +github.com/otiai10/copy v1.11.0 h1:OKBD80J/mLBrwnzXqGtFCzprFSGioo30JcmR4APsNwc= +github.com/otiai10/copy v1.11.0/go.mod h1:rSaLseMUsZFFbsFGc7wCJnnkTAvdc5L6VWxPE4308Ww= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= @@ -762,25 +744,21 @@ github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU= github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= -github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= -github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.6.0 h1:hUDfIISABYI59DyeB3OTay/HxSRwTQ8rB/H83k6r5dM= -github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= +github.com/pkg/profile v1.7.0 h1:hnbDkaNWPCLMO9wGLdBFTIZvzDrDfBM2072E1S9gJkA= +github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDjvNo= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v1.0.0 h1:pDrQG0lrh68e602Wfp68BlUTRFoHn8PZYAjLgt2LFsM= -github.com/polyfloyd/go-errorlint v1.0.0/go.mod h1:KZy4xxPJyy88/gldCe5OdW6OQRtNO3EZE7hXzmnebgA= +github.com/polyfloyd/go-errorlint v1.4.5 h1:70YWmMy4FgRHehGNOUask3HtSFSOLKgmDn7ryNe7LqI= +github.com/polyfloyd/go-errorlint v1.4.5/go.mod h1:sIZEbFoDOCnTYYZoVkjc4hTnM459tuWA9H/EkdXwsKk= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= @@ -791,8 +769,9 @@ github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a h1:CmF68hwI0XsOQ5UwlBopMi2Ow4Pbg32akc4KIVCOm+Y= +github.com/prometheus/client_model v0.2.1-0.20210607210712-147c58e9608a/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= @@ -806,28 +785,22 @@ github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1 github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/quasilyte/go-ruleguard v0.3.1-0.20210203134552-1b5a410e1cc8/go.mod h1:KsAh3x0e7Fkpgs+Q9pNLS5XpFSvYCEVl5gP9Pp1xp30= -github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a h1:sWFavxtIctGrVs5SYZ5Ml1CvrDAs8Kf5kx2PI3C41dA= -github.com/quasilyte/go-ruleguard v0.3.16-0.20220213074421-6aa060fab41a/go.mod h1:VMX+OnnSw4LicdiEGtRSD/1X8kW7GuEscjYNr4cOIT4= -github.com/quasilyte/go-ruleguard/dsl v0.3.0/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/dsl v0.3.16/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20201231183845-9e62ed36efe1/go.mod h1:7JTjp89EGyU1d6XfBiXihJNG37wB2VRkd125Q1u7Plc= -github.com/quasilyte/go-ruleguard/rules v0.0.0-20211022131956-028d6511ab71/go.mod h1:4cgAphtvu7Ftv7vOT2ZOYhC6CvBxZixcasr8qIOTA50= -github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5 h1:PDWGei+Rf2bBiuZIbZmM20J2ftEy9IeUCHA8HbQqed8= -github.com/quasilyte/gogrep v0.0.0-20220120141003-628d8b3623b5/go.mod h1:wSEyW6O61xRV6zb6My3HxrQ5/8ke7NE2OayqCHa3xRM= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 h1:L8QM9bvf68pVdQ3bCFZMDmnt9yqcMBro1pC7F+IPYMY= -github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/go-ruleguard v0.4.0 h1:DyM6r+TKL+xbKB4Nm7Afd1IQh9kEUKQs2pboWGKtvQo= +github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10= +github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= -github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= +github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.29.0 h1:Zes4hju04hjbvkVkOhdl2HpZa+0PmVwigmo8XoORE5w= github.com/rs/zerolog v1.29.0/go.mod h1:NILgTygv/Uej1ra5XxGf82ZFSLk58MFGAUS2o6usyD0= @@ -835,20 +808,21 @@ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.4 h1:CpMSDKan0LtNGGhPrvupAoLeObRFjND8/tU1rEOtBp4= -github.com/ryancurrah/gomodguard v1.2.4/go.mod h1:+Kem4VjWwvFpUJRJSwa16s1tBJe+vbv02+naTow2f6M= -github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= -github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= -github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= -github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/ryancurrah/gomodguard v1.3.0 h1:q15RT/pd6UggBXVBuLps8BXRvl5GPBcwVA7BJHMLuTw= +github.com/ryancurrah/gomodguard v1.3.0/go.mod h1:ggBxb3luypPEzqVtq33ee7YSN35V28XeGnid8dnni50= +github.com/ryanrolds/sqlclosecheck v0.5.1 h1:dibWW826u0P8jNLsLN+En7+RqWWTYrjCB9fJfSfdyCU= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= +github.com/sanposhiho/wastedassign/v2 v2.0.7 h1:J+6nrY4VW+gC9xFzUc+XjPD3g3wF3je/NsJFwFK7Uxc= +github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/sashamelentyev/usestdlibvars v1.8.0 h1:QnWP9IOEuRyYKH+IG0LlQIjuJlc0rfdo4K3/Zh3WRMw= -github.com/sashamelentyev/usestdlibvars v1.8.0/go.mod h1:BFt7b5mSVHaaa26ZupiNRV2ODViQBxZZVhtAxAJRrjs= +github.com/sashamelentyev/interfacebloat v1.1.0 h1:xdRdJp0irL086OyW1H/RTZTr1h/tMEOsumirXcOJqAw= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.24.0 h1:MKNzmXtGh5N0y74Z/CIaJh4GlB364l0K1RUT08WSWAc= +github.com/sashamelentyev/usestdlibvars v1.24.0/go.mod h1:9cYkq+gYJ+a5W2RPdhfaSCnTVUC1OQP/bSiiBhq3OZE= github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg= -github.com/securego/gosec/v2 v2.12.0 h1:CQWdW7ATFpvLSohMVsajscfyHJ5rsGmEXmsNcsDNmAg= -github.com/securego/gosec/v2 v2.12.0/go.mod h1:iTpT+eKTw59bSgklBHlSnH5O2tNygHMDxfvMubA4i7I= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/securego/gosec/v2 v2.18.2 h1:DkDt3wCiOtAHf1XkiXZBhQ6m6mK/b9T/wD257R3/c+I= +github.com/securego/gosec/v2 v2.18.2/go.mod h1:xUuqSF6i0So56Y2wwohWAmB07EdBkUN6crbLlHwbyJs= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= @@ -857,23 +831,21 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= -github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sivchari/containedctx v1.0.2 h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI= -github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/sivchari/containedctx v1.0.3 h1:x+etemjbsh2fB5ewm5FeLNi5bUjK0V8n0RB+Wwfd0XE= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= github.com/sivchari/nosnakecase v1.7.0 h1:7QkpWIRMe8x25gckkFd2A5Pi6Ymo0qgr4JrhGt95do8= github.com/sivchari/nosnakecase v1.7.0/go.mod h1:CwDzrzPea40/GB6uynrNLiorAlgFRvRbFSgJx2Gs+QY= -github.com/sivchari/tenv v1.7.0 h1:d4laZMBK6jpe5PWepxlV9S+LC0yXqvYHiq8E6ceoVVE= -github.com/sivchari/tenv v1.7.0/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/sivchari/tenv v1.7.1 h1:PSpuD4bu6fSmtWMxSGWcvqUUgIn7k3yOJhOIzVWn8Ak= +github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sonatard/noctx v0.0.1 h1:VC1Qhl6Oxx9vvWo3UDgrGXYCeKCe3Wbw7qAWL6FrmTY= -github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4lqBjiZI= -github.com/sourcegraph/go-diff v0.6.1 h1:hmA1LzxW0n1c3Q4YbrFgg4P99GSnebYa3x8gr0HZqLQ= -github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/sonatard/noctx v0.0.2 h1:L7Dz4De2zDQhW8S0t+KUjY0MAQJd6SgVwhzNIc4ok00= +github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= @@ -883,14 +855,12 @@ github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcD github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -901,35 +871,35 @@ github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YE github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/stbenjam/no-sprintf-host-port v0.1.1 h1:tYugd/yrm1O0dV+ThCbaKZh195Dfm07ysF0U6JQXczc= github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= +github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= -github.com/sylvia7788/contextcheck v1.0.4 h1:MsiVqROAdr0efZc/fOCt0c235qm9XJqHtWwM+2h2B04= -github.com/sylvia7788/contextcheck v1.0.4/go.mod h1:vuPKJMQ7MQ91ZTqfdyreNKwZjyUg6KO+IebVyQDedZQ= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= -github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A= -github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c h1:+aPplBwWcHBo6q9xrfWdMrT9o4kltkmmvpemgIjep/8= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= +github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= +github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= @@ -942,97 +912,107 @@ github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA github.com/tenntenn/modver v1.0.1/go.mod h1:bePIyQPb7UeioSRkw3Q0XeMhYZSMx9B8ePqg6SAMGH0= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3 h1:f+jULpRQGxTSkNYKJ51yaw6ChIqO+Je8UqsTKN/cDag= github.com/tenntenn/text/transform v0.0.0-20200319021203-7eef512accb3/go.mod h1:ON8b8w4BN/kE1EOhwT0o+d62W65a6aPw1nouo9LMgyY= -github.com/tetafro/godot v1.4.11 h1:BVoBIqAf/2QdbFmSwAWnaIqDivZdOV0ZRwEm6jivLKw= -github.com/tetafro/godot v1.4.11/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= -github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144 h1:kl4KhGNsJIbDHS9/4U9yQo1UcPQM0kOMJHn29EoH/Ro= -github.com/timakin/bodyclose v0.0.0-20210704033933-f49887972144/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.6.2 h1:3dI6YNcrJTQ/CJQ6M/DUkc0gnqYSIk6o0rChn9E/D0M= -github.com/tomarrell/wrapcheck/v2 v2.6.2/go.mod h1:ao7l5p0aOlUNJKI0qVwB4Yjlqutd0IvAB9Rdwyilxvg= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/tommy-muehle/go-mnd/v2 v2.5.0 h1:iAj0a8e6+dXSL7Liq0aXPox36FiN1dBbjA6lt9fl65s= -github.com/tommy-muehle/go-mnd/v2 v2.5.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/tetafro/godot v1.4.15 h1:QzdIs+XB8q+U1WmQEWKHQbKmCw06QuQM7gLx/dky2RM= +github.com/tetafro/godot v1.4.15/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= +github.com/timonwong/loggercheck v0.9.4 h1:HKKhqrjcVj8sxL7K77beXh0adEm6DLjV/QOGeMXEVi4= +github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= +github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQpIXDJRhQ= +github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE= +github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8= github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA= -github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= +github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI= +github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI= github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y= -github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY= -github.com/vektra/mockery/v2 v2.33.2 h1:znIUwQ3FxnA5jvPy8irYBoiIqMZhuOJhoPOJYNoTJqU= -github.com/vektra/mockery/v2 v2.33.2/go.mod h1:9lREs4VEeQiUS3rizYQx1saxHu2JiIhThP0q9+fDegM= -github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= +github.com/uudashr/gocognit v1.1.2 h1:l6BAEKJqQH2UpKAPKdMfZf5kE4W/2xk8pfU1OVLvniI= +github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= +github.com/vbatts/tar-split v0.11.5 h1:3bHCTIheBm1qFTcgh9oPu+nNBtX+XJIupG/vacinCts= +github.com/vbatts/tar-split v0.11.5/go.mod h1:yZbwRsSeGjusneWgA781EKej9HF8vme8okylkAeNKLk= +github.com/vektra/mockery/v2 v2.41.0 h1:miv6vazLja/sknB/Rv1ZyKzxOG24QJgIPNN1renwkrs= +github.com/vektra/mockery/v2 v2.41.0/go.mod h1:XNTE9RIu3deGAGQRVjP1VZxGpQNm0YedZx4oDs3prr8= github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= +github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM= github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o= github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= +github.com/ykadowak/zerologlint v0.1.3 h1:TLy1dTW3Nuc+YE3bYRPToG1Q9Ej78b5UUN6bjbGdxPE= +github.com/ykadowak/zerologlint v0.1.3/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0= -gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +gitlab.com/bosi/decorder v0.4.1 h1:VdsdfxhstabyhZovHafFw+9eJ6eU0d2CkFNJcZz/NU4= +gitlab.com/bosi/decorder v0.4.1/go.mod h1:jecSqWUew6Yle1pCr2eLWTensJMmsxHsBwt+PVbkAqA= +go-simpler.org/assert v0.6.0 h1:QxSrXa4oRuo/1eHMXSBFHKvJIpWABayzKldqZyugG7E= +go-simpler.org/assert v0.6.0/go.mod h1:74Eqh5eI6vCK6Y5l3PI8ZYFXG4Sa+tkr70OIPJAUr28= +go-simpler.org/sloglint v0.1.2 h1:IjdhF8NPxyn0Ckn2+fuIof7ntSnVUAqBFcQRrnG9AiM= +go-simpler.org/sloglint v0.1.2/go.mod h1:2LL+QImPfTslD5muNPydAEYmpXIj6o/WYcqnJjLi4o4= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= -go.mozilla.org/mozlog v0.0.0-20170222151521-4bb13139d403/go.mod h1:jHoPAGnDrCy6kaI2tAze5Prf0Nr0w/oNkROt2lw3n3o= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.33.0 h1:z6rnla1Asjzn0FrhohzIbDi4bxbtc6EMmQ7f5ZPn+pA= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.33.0/go.mod h1:y/SlJpJQPd2UzfBCj0E9Flk9FDCtTyqUmaCB41qFrWI= -go.opentelemetry.io/otel v1.8.0 h1:zcvBFizPbpa1q7FehvFiHbQwGzmPILebO0tyqIR5Djg= -go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= -go.opentelemetry.io/otel/trace v1.8.0 h1:cSy0DF9eGI5WIfNwZ1q2iUyGj00tGzP24dE1lOlHrfY= -go.opentelemetry.io/otel/trace v1.8.0/go.mod h1:0Bt3PXY8w+3pheS3hQUt+wow8b1ojPaTBoTCh2zIFI4= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.4.1/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 h1:WPpPsAAs8I2rA47v5u0558meKmmwm1Dj99ZbqCV8sZ8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1/go.mod h1:o5RW5o2pKpJLD5dNTCmjF1DorYwMeFJmb/rKr5sLaa8= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0 h1:Xw8U6u2f8DK2XAkGRFV7BBLENgnTGX9i4rQRxJf+/vs= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.24.0/go.mod h1:6KW1Fm6R/s6Z3PGXwSJN2K4eT6wQB3vXX6CVnYX9NmM= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.4.1/go.mod h1:NBwHDgDIBYjwK2WNu1OPgsIc2IJzmBXNnvIJxJc8BpE= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= +go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= +go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c= +go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ= +go.tmz.dev/musttag v0.7.2 h1:1J6S9ipDbalBSODNT5jCep8dhZyMr4ttnjQagmGYR5s= +go.tmz.dev/musttag v0.7.2/go.mod h1:m6q5NiiSKMnQYokefa2xGoyoXnrswCbJ0AWYzf4Zs28= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY= -go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1041,13 +1021,12 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= +golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1058,11 +1037,12 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb h1:PaBZQdo+iSDyHT053FjUCgZQ/9uqVwPOcl7KSWhKn6w= -golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d h1:+W8Qf4iJtMGKkyAygcKohjxTk4JPsL9DpzApJ22m5Ic= -golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8 h1:aAcj0Da7eBAtrTp03QXWvm88pSyOt+UgdZw2BFZ+lEw= +golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8/go.mod h1:CQ1k9gNrJ50XIzaKCRR2hssIjF07kZFEiieALBM/ARQ= +golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 h1:jWGQJV4niP+CCmFW9ekjA9Zx8vYORzOUH2/Nl5WPuLQ= +golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1076,7 +1056,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1091,13 +1070,15 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= -golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -1109,9 +1090,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1119,7 +1097,6 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1131,19 +1108,21 @@ golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= +golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1155,12 +1134,10 @@ golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783 h1:nt+Q6cXKz4MosCSpnbMtqiQ8Oz0pxTef2B4Vca2lvfk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1168,16 +1145,16 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1188,12 +1165,9 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1201,7 +1175,6 @@ golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1209,7 +1182,6 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1225,7 +1197,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1235,30 +1206,39 @@ golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211105183446-c75c47738b0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220702020025-31831981b65f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1267,54 +1247,43 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= -golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190321232350-e250d351ecad/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190910044552-dd2b5c81c578/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190916130336-e45ffcd953cc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117220505-0cba7a3a9ee9/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1327,35 +1296,22 @@ golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjs golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200624225443-88f3c62a19ff/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200625211823-6506e20df31f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200724022722-7017fd6b1305/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200812195022-5ae4c3c160a0/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200820010801-b793a1359eac/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20201230224404-63754364767c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1364,12 +1320,16 @@ golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0t golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.9-0.20211228192929-ee1ca4ffc4da/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= -golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= -golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw= +golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1378,7 +1338,6 @@ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -1398,15 +1357,11 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -1414,7 +1369,6 @@ google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1435,8 +1389,6 @@ google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1447,21 +1399,21 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= +google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= +google.golang.org/genproto/googleapis/api v0.0.0-20240325203815-454cdb8f5daa h1:Jt1XW5PaLXF1/ePZrznsh/aAUvI7Adfc3LY1dAKlzRs= +google.golang.org/genproto/googleapis/api v0.0.0-20240325203815-454cdb8f5daa/go.mod h1:K4kfzHtI0kqWA79gecJarFtDn/Mls+GxQcg3Zox91Ac= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240325203815-454cdb8f5daa h1:RBgMaUMP+6soRkik4VoN8ojR2nex2TqZwjSSogic+eo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240325203815-454cdb8f5daa/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -1471,10 +1423,11 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.52.0 h1:kd48UiU7EHsV4rnLyOJRuP/Il/UHE7gdDAQ+SZI7nZk= -google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= +google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1487,47 +1440,36 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -1535,19 +1477,18 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= -honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= -mvdan.cc/gofumpt v0.3.1 h1:avhhrOmv0IuvQVK7fvwV91oFSGAk5/6Po8GXTzICeu8= -mvdan.cc/gofumpt v0.3.1/go.mod h1:w3ymliuxvzVx8DAutBnVyDqYb1Niy/yCJt/lk821YCE= +honnef.co/go/tools v0.4.6 h1:oFEHCKeID7to/3autwsWfnuv69j3NsfcXbvJKuIcep8= +honnef.co/go/tools v0.4.6/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= +mvdan.cc/gofumpt v0.5.0 h1:0EQ+Z56k8tXjj/6TQD25BFNKQXpCvT0rnansIc7Ug5E= +mvdan.cc/gofumpt v0.5.0/go.mod h1:HBeVDtMKRZpXyxFciAirzdKklDlGu8aAy1wEbH5Y9js= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20220706161116-678bad134442 h1:seuXWbRB1qPrS3NQnHmFKLJLtskWyueeIzmLXghMGgk= -mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2YjFf5CaW0Bw4RL8RfbEf4GRggJk= +mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d h1:3rvTIIM22r9pvXk+q3swxUQAQOxksVMGK7sml4nG57w= +mvdan.cc/unparam v0.0.0-20221223090309-7455f1af531d/go.mod h1:IeHQjmn6TOD+e4Z3RFiZMMsLVL+A96Nvptar8Fj71is= pgregory.net/rapid v0.4.8 h1:d+5SGZWUbJPbl3ss6tmPFqnNeQR6VDOFly+eTjwPiEw= pgregory.net/rapid v0.4.8/go.mod h1:Z5PbWqjvWR1I3UGjvboUuan4fe4ZYEYNLNQLExzCoUs= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/internal/blocksync/reactor_test.go b/internal/blocksync/reactor_test.go index a882b2e2a4..21aa67df43 100644 --- a/internal/blocksync/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -63,7 +63,7 @@ func setup( rts := &reactorTestSuite{ config: conf, logger: log.NewTestingLogger(t).With("module", "block_sync", "testCase", t.Name()), - network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{Config: conf, NumNodes: numNodes}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{Config: conf, NumNodes: numNodes}, log.NewNopLogger()), nodes: make([]types.NodeID, 0, numNodes), reactors: make(map[types.NodeID]*Reactor, numNodes), app: make(map[types.NodeID]abciclient.Client, numNodes), @@ -102,9 +102,9 @@ func makeReactor( t *testing.T, conf *config.Config, proTxHash types.ProTxHash, - nodeID types.NodeID, + _nodeID types.NodeID, genDoc *types.GenesisDoc, - privVal types.PrivValidator, + _privVal types.PrivValidator, channelCreator p2p.ChannelCreator, peerEvents p2p.PeerEventSubscriber) *Reactor { @@ -189,7 +189,7 @@ func (rts *reactorTestSuite) addNode( peerEvents := func(ctx context.Context, _ string) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] } reactor := makeReactor(ctx, t, rts.config, proTxHash, nodeID, genDoc, privVal, chCreator, peerEvents) - commit := types.NewCommit(0, 0, types.BlockID{}, nil) + commit := types.NewCommit(0, 0, types.BlockID{}, nil, nil) state, err := reactor.stateStore.Load() require.NoError(t, err) @@ -237,10 +237,11 @@ func makeNextBlock(ctx context.Context, vote.Height, vote.Round, blockID, + vote.VoteExtensions, &types.CommitSigns{ QuorumSigns: types.QuorumSigns{ - BlockSign: vote.BlockSignature, - ExtensionSigns: types.MakeThresholdExtensionSigns(vote.VoteExtensions), + BlockSign: vote.BlockSignature, + VoteExtensionSignatures: vote.VoteExtensions.GetSignatures(), }, QuorumHash: state.Validators.QuorumHash, }, diff --git a/internal/blocksync/synchronizer.go b/internal/blocksync/synchronizer.go index f035fd56b4..d4812731cf 100644 --- a/internal/blocksync/synchronizer.go +++ b/internal/blocksync/synchronizer.go @@ -38,7 +38,7 @@ const ( // Minimum recv rate to ensure we're receiving blocks from a peer fast // enough. If a peer is not sending us data at at least that rate, we - // consider them to have timedout and we disconnect. + // consider them to have timed out and we disconnect. // // Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s, // sending data across atlantic ~ 7.5 KB/s. @@ -56,7 +56,6 @@ const ( are not at peer limits, we can probably switch to consensus reactor */ -// Synchronizer keeps track of the block sync peers, block requests and block responses. type ( PeerAdder interface { AddPeer(peer PeerData) @@ -64,6 +63,8 @@ type ( PeerRemover interface { RemovePeer(peerID types.NodeID) } + + // Synchronizer keeps track of the block sync peers, block requests and block responses. Synchronizer struct { service.BaseService logger log.Logger diff --git a/internal/consensus/block_executor.go b/internal/consensus/block_executor.go index c2beffcb46..4cd6108f74 100644 --- a/internal/consensus/block_executor.go +++ b/internal/consensus/block_executor.go @@ -40,9 +40,8 @@ func (c *blockExecutor) create(ctx context.Context, rs *cstypes.RoundState, roun case rs.Height == c.committedState.InitialHeight: // We're creating a proposal for the first block. // The commit is empty, but not nil. - commit = types.NewCommit(0, 0, types.BlockID{}, nil) + commit = types.NewCommit(0, 0, types.BlockID{}, nil, nil) case rs.LastCommit != nil: - // Make the commit from LastPrecommits commit = rs.LastCommit default: // This shouldn't happen. @@ -65,7 +64,7 @@ func (c *blockExecutor) ensureProcess(ctx context.Context, rs *cstypes.RoundStat block := rs.ProposalBlock crs := rs.CurrentRoundState if crs.Params.Source != sm.ProcessProposalSource || !crs.MatchesBlock(block.Header, round) { - c.logger.Debug("CurrentRoundState is outdated, executing ProcessProposal", "crs", crs) + c.logger.Trace("CurrentRoundState is outdated, executing ProcessProposal", "crs", crs) uncommittedState, err := c.blockExec.ProcessProposal(ctx, block, round, c.committedState, true) if err != nil { return fmt.Errorf("ProcessProposal abci method: %w", err) diff --git a/internal/consensus/block_executor_test.go b/internal/consensus/block_executor_test.go index 842d0495cf..9fa00938ee 100644 --- a/internal/consensus/block_executor_test.go +++ b/internal/consensus/block_executor_test.go @@ -20,7 +20,7 @@ import ( "github.com/dashpay/tenderdash/types/mocks" ) -type BockExecutorTestSuite struct { +type BlockExecutorTestSuite struct { suite.Suite blockExec *blockExecutor @@ -28,11 +28,11 @@ type BockExecutorTestSuite struct { mockBlockExec *smmocks.Executor } -func TestBockExecutor(t *testing.T) { - suite.Run(t, new(BockExecutorTestSuite)) +func TestBlockExecutor(t *testing.T) { + suite.Run(t, new(BlockExecutorTestSuite)) } -func (suite *BockExecutorTestSuite) SetupTest() { +func (suite *BlockExecutorTestSuite) SetupTest() { logger := log.NewTestingLogger(suite.T()) suite.mockPrivVal = mocks.NewPrivValidator(suite.T()) suite.mockBlockExec = smmocks.NewExecutor(suite.T()) @@ -47,13 +47,13 @@ func (suite *BockExecutorTestSuite) SetupTest() { } } -func (suite *BockExecutorTestSuite) TestCreate() { +func (suite *BlockExecutorTestSuite) TestCreate() { ctx := context.Background() commitH99R0 := &types.Commit{ Height: 99, Round: 0, } - emptyCommit := types.NewCommit(0, 0, types.BlockID{}, nil) + emptyCommit := types.NewCommit(0, 0, types.BlockID{}, nil, nil) testCases := []struct { round int32 initialHeight int64 @@ -118,7 +118,7 @@ func (suite *BockExecutorTestSuite) TestCreate() { suite.mockBlockExec. On( "CreateProposalBlock", - mock.AnythingOfType("*context.emptyCtx"), + mock.Anything, tc.height, tc.round, stateData.state, @@ -137,7 +137,7 @@ func (suite *BockExecutorTestSuite) TestCreate() { } } -func (suite *BockExecutorTestSuite) TestProcess() { +func (suite *BlockExecutorTestSuite) TestProcess() { ctx := context.Background() const round = int32(0) wantDefaultCRS := sm.CurrentRoundState{ @@ -224,7 +224,7 @@ func (suite *BockExecutorTestSuite) TestProcess() { suite.mockBlockExec. On( "ProcessProposal", - mock.AnythingOfType("*context.emptyCtx"), + mock.Anything, stateData.ProposalBlock, round, stateData.state, diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 807dbea5d0..a3de1f11cd 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -157,10 +157,7 @@ func signVote( blockID types.BlockID, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash) *types.Vote { - exts := make(types.VoteExtensions) - if voteType == tmproto.PrecommitType && !blockID.IsNil() { - exts.Add(tmproto.VoteExtensionType_DEFAULT, []byte("extension")) - } + exts := make(types.VoteExtensions, 0) v, err := vs.signVote(ctx, voteType, chainID, blockID, quorumType, quorumHash, exts) require.NoError(t, err, "failed to sign vote") @@ -175,7 +172,7 @@ func signVotes( voteType tmproto.SignedMsgType, chainID string, blockID types.BlockID, - appHash []byte, + _appHash []byte, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, vss ...*validatorStub, @@ -249,7 +246,7 @@ func startConsensusState(ctx context.Context, cs *State, maxSteps int) { steps++ } }() - go cs.receiveRoutine(ctx, func(state *State) bool { + go cs.receiveRoutine(ctx, func(_state *State) bool { return maxSteps > 0 && steps >= maxSteps }) } @@ -362,7 +359,7 @@ func validatePrevote( } } -func validateLastCommit(ctx context.Context, t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { +func validateLastCommit(_ctx context.Context, t *testing.T, cs *State, _privVal *validatorStub, blockHash []byte) { t.Helper() stateData := cs.GetStateData() @@ -477,13 +474,14 @@ func newState( state sm.State, pv types.PrivValidator, app abci.Application, + opts ...StateOption, ) *State { t.Helper() cfg, err := config.ResetTestRoot(t.TempDir(), "consensus_state_test") require.NoError(t, err) - return newStateWithConfig(ctx, t, logger, cfg, state, pv, app) + return newStateWithConfig(ctx, t, logger, cfg, state, pv, app, opts...) } func newStateWithConfig( @@ -852,7 +850,7 @@ func consensusLogger(t *testing.T) log.Logger { func makeConsensusState( ctx context.Context, t *testing.T, - cfg *config.Config, + _cfg *config.Config, nValidators int, testName string, tickerFunc func() TimeoutTicker, @@ -878,7 +876,7 @@ func makeConsensusState( walDir := filepath.Dir(thisConfig.Consensus.WalFile()) ensureDir(t, walDir, 0700) - app, err := kvstore.NewMemoryApp() + app, err := kvstore.NewMemoryApp(kvstore.WithLogger(logger)) require.NoError(t, err) t.Cleanup(func() { _ = app.Close() }) @@ -922,9 +920,7 @@ func genFilePV(dir string) (types.PrivValidator, error) { return nil, err } privVal := privval.GenFilePV(tempKeyFile.Name(), tempStateFile.Name()) - if err != nil { - return nil, err - } + return privVal, nil } @@ -1031,7 +1027,7 @@ type genesisStateArgs struct { Time time.Time } -func makeGenesisState(ctx context.Context, t *testing.T, cfg *config.Config, args genesisStateArgs) (sm.State, []types.PrivValidator) { +func makeGenesisState(_ctx context.Context, t *testing.T, _cfg *config.Config, args genesisStateArgs) (sm.State, []types.PrivValidator) { t.Helper() if args.Power == 0 { args.Power = 1 @@ -1109,7 +1105,7 @@ func signDataIsEqual(v1 *types.Vote, v2 *tmproto.Vote) bool { if v1 == nil || v2 == nil { return false } - if v1.VoteExtensions.IsSameWithProto(v2.VoteExtensionsToMap()) { + if v1.VoteExtensions.IsSameWithProto(v2.VoteExtensions) { return false } return v1.Type == v2.Type && diff --git a/internal/consensus/core_chainlock_test.go b/internal/consensus/core_chainlock_test.go index 227d40bf56..b64d9edc0b 100644 --- a/internal/consensus/core_chainlock_test.go +++ b/internal/consensus/core_chainlock_test.go @@ -97,7 +97,7 @@ func TestReactorInvalidBlockChainLock(t *testing.T) { } func newCounterWithCoreChainLocks(initCoreChainHeight uint32, step int32) func(logger log.Logger, _ string) abci.Application { - return func(logger log.Logger, _ string) abci.Application { + return func(_logger log.Logger, _ string) abci.Application { counterApp := counter.NewApplication(true) counterApp.InitCoreChainLock(initCoreChainHeight, step) return counterApp diff --git a/internal/consensus/gossip_handlers.go b/internal/consensus/gossip_handlers.go index d9bddb8c8c..deaf9060ca 100644 --- a/internal/consensus/gossip_handlers.go +++ b/internal/consensus/gossip_handlers.go @@ -117,9 +117,14 @@ func shouldProposalBeGossiped(rs cstypes.RoundState, prs *cstypes.PeerRoundState } func shouldBlockPartsBeGossiped(rs cstypes.RoundState, prs *cstypes.PeerRoundState, isValidator bool) bool { + if rs.Height != prs.Height || rs.Round < prs.Round { + return false + } + if isValidator && rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartSetHeader) { return true } + return prs.HasCommit && rs.ProposalBlockParts != nil } diff --git a/internal/consensus/gossip_peer_worker_test.go b/internal/consensus/gossip_peer_worker_test.go index 23edd6b80c..dc9a16031a 100644 --- a/internal/consensus/gossip_peer_worker_test.go +++ b/internal/consensus/gossip_peer_worker_test.go @@ -26,10 +26,10 @@ func TestPeerGossipWorker(t *testing.T) { clock: fakeClock, logger: logger, handlers: []gossipHandler{ - newGossipHandler(func(ctx context.Context, appState StateData) { + newGossipHandler(func(_ctx context.Context, _appState StateData) { handlerCalledCh <- struct{}{} }, 1*time.Second), - newGossipHandler(func(ctx context.Context, appState StateData) { + newGossipHandler(func(_ctx context.Context, _appState StateData) { handlerCalledCh <- struct{}{} }, 1*time.Second), }, diff --git a/internal/consensus/gossiper.go b/internal/consensus/gossiper.go index 4a3dc5991c..c1ec94adb0 100644 --- a/internal/consensus/gossiper.go +++ b/internal/consensus/gossiper.go @@ -311,10 +311,6 @@ func (g *msgGossiper) ensurePeerPartSetHeader(blockPartSetHeader types.PartSetHe // there is a vote to send and (nil,false) otherwise. func (g *msgGossiper) pickVoteForGossip(rs cstypes.RoundState, prs *cstypes.PeerRoundState) (*types.Vote, bool) { var voteSets []*types.VoteSet - // if there are lastPrecommits to send - if prs.Step == cstypes.RoundStepNewHeight { - voteSets = append(voteSets, rs.LastPrecommits) - } if prs.Round != -1 && prs.Round <= rs.Round { // if there are POL prevotes to send if prs.Step <= cstypes.RoundStepPropose && prs.ProposalPOLRound != -1 { diff --git a/internal/consensus/gossiper_test.go b/internal/consensus/gossiper_test.go index 09d0cd9347..fdfb30e440 100644 --- a/internal/consensus/gossiper_test.go +++ b/internal/consensus/gossiper_test.go @@ -470,10 +470,6 @@ func (suite *GossiperSuiteTest) TestGossipGossipVote() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - precommitH99 := suite.makeSignedVote(99, 0, tmproto.PrecommitType) - lastPercommits := types.NewVoteSet(factory.DefaultTestChainID, 99, 0, tmproto.PrecommitType, suite.valSet) - _, _ = lastPercommits.AddVote(precommitH99) - prevoteH100R0 := suite.makeSignedVote(100, 0, tmproto.PrevoteType) prevoteH100R1 := suite.makeSignedVote(100, 1, tmproto.PrevoteType) prevoteH100R2 := suite.makeSignedVote(100, 2, tmproto.PrevoteType) @@ -489,16 +485,6 @@ func (suite *GossiperSuiteTest) TestGossipGossipVote() { prs cstypes.PeerRoundState wantMsg *tmproto.Vote }{ - { - rs: cstypes.RoundState{LastPrecommits: lastPercommits}, - prs: cstypes.PeerRoundState{ - Height: 100, - Round: -1, - ProposalPOLRound: -1, - Step: cstypes.RoundStepNewHeight, - }, - wantMsg: precommitH99.ToProto(), - }, { rs: cstypes.RoundState{Votes: votesH100}, prs: cstypes.PeerRoundState{ @@ -603,6 +589,6 @@ func (suite *GossiperSuiteTest) signVote(vote *types.Vote) { err := privVal.SignVote(ctx, factory.DefaultTestChainID, suite.valSet.QuorumType, suite.valSet.QuorumHash, protoVote, nil) suite.Require().NoError(err) vote.BlockSignature = protoVote.BlockSignature - err = vote.VoteExtensions.CopySignsFromProto(protoVote.VoteExtensionsToMap()) + err = vote.VoteExtensions.CopySignsFromProto(protoVote.VoteExtensions) suite.Require().NoError(err) } diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index a3f75e729e..1e71ec3bff 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -285,7 +285,7 @@ func NewCounterApplication() *CounterApplication { return &CounterApplication{} } -func (app *CounterApplication) Info(_ context.Context, req *abci.RequestInfo) (*abci.ResponseInfo, error) { +func (app *CounterApplication) Info(_ context.Context, _req *abci.RequestInfo) (*abci.ResponseInfo, error) { app.mu.Lock() defer app.mu.Unlock() @@ -310,7 +310,7 @@ func (app *CounterApplication) txResults(txs [][]byte) []*abci.ExecTxResult { return respTxs } -func (app *CounterApplication) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { +func (app *CounterApplication) FinalizeBlock(_ context.Context, _req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { return &abci.ResponseFinalizeBlock{}, nil } @@ -349,9 +349,10 @@ func (app *CounterApplication) PrepareProposal(_ context.Context, req *abci.Requ }) } return &abci.ResponsePrepareProposal{ - AppHash: make([]byte, crypto.DefaultAppHashSize), - TxRecords: trs, - TxResults: app.txResults(req.Txs), + AppHash: make([]byte, crypto.DefaultAppHashSize), + TxRecords: trs, + TxResults: app.txResults(req.Txs), + AppVersion: 1, }, nil } diff --git a/internal/consensus/msg_handlers.go b/internal/consensus/msg_handlers.go index 8477e812b1..073417917c 100644 --- a/internal/consensus/msg_handlers.go +++ b/internal/consensus/msg_handlers.go @@ -49,17 +49,23 @@ func (c *msgInfoDispatcher) dispatch(ctx context.Context, stateData *StateData, return handler(ctx, stateData, envelope) } +// msgInfoDispatcher creates a new dispatcher for messages that are received from peers. +// It is used to dispatch messages to the appropriate handler. func newMsgInfoDispatcher( ctrl *Controller, proposaler cstypes.Proposaler, wal WALWriteFlusher, logger log.Logger, + middleware ...msgMiddlewareFunc, ) *msgInfoDispatcher { + mws := []msgMiddlewareFunc{ msgInfoWithCtxMiddleware(), loggingMiddleware(logger), walMiddleware(wal, logger), } + mws = append(mws, middleware...) + proposalHandler := withMiddleware(proposalMessageHandler(proposaler), mws...) blockPartHandler := withMiddleware(blockPartMessageHandler(ctrl), mws...) voteHandler := withMiddleware(voteMessageHandler(ctrl), mws...) @@ -73,7 +79,7 @@ func newMsgInfoDispatcher( } func proposalMessageHandler(propSetter cstypes.ProposalSetter) msgHandlerFunc { - return func(ctx context.Context, stateData *StateData, envelope msgEnvelope) error { + return func(_ctx context.Context, stateData *StateData, envelope msgEnvelope) error { msg := envelope.Msg.(*ProposalMessage) return propSetter.Set(msg.Proposal, envelope.ReceiveTime, &stateData.RoundState) } @@ -185,13 +191,6 @@ func msgInfoWithCtxMiddleware() msgMiddlewareFunc { } } -func logKeyValsWithError(keyVals []any, err error) []any { - if err == nil { - return keyVals - } - return append(keyVals, "error", err) -} - func makeLogArgsFromMessage(msg Message) []any { switch m := msg.(type) { case *ProposalMessage: diff --git a/internal/consensus/msgs_test.go b/internal/consensus/msgs_test.go index 73b5a1dede..419102e828 100644 --- a/internal/consensus/msgs_test.go +++ b/internal/consensus/msgs_test.go @@ -384,9 +384,10 @@ func TestConsMsgsVectors(t *testing.T) { Round: 0, Type: tmproto.PrecommitType, BlockID: bi, - VoteExtensions: types.VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []types.VoteExtension{{Extension: []byte("extension")}}, - }, + VoteExtensions: types.VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_DEFAULT, + Extension: []byte("extension"), + }), } vpb := v.ToProto() @@ -501,7 +502,7 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) { malleateFn func(*VoteSetBitsMessage) expErr string }{ - {func(msg *VoteSetBitsMessage) {}, ""}, + {func(_msg *VoteSetBitsMessage) {}, ""}, {func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"}, {func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"}, {func(msg *VoteSetBitsMessage) { @@ -615,7 +616,7 @@ func TestNewValidBlockMessageValidateBasic(t *testing.T) { malleateFn func(*NewValidBlockMessage) expErr string }{ - {func(msg *NewValidBlockMessage) {}, ""}, + {func(_msg *NewValidBlockMessage) {}, ""}, {func(msg *NewValidBlockMessage) { msg.Height = -1 }, "negative Height"}, {func(msg *NewValidBlockMessage) { msg.Round = -1 }, "negative Round"}, { @@ -661,7 +662,7 @@ func TestProposalPOLMessageValidateBasic(t *testing.T) { malleateFn func(*ProposalPOLMessage) expErr string }{ - {func(msg *ProposalPOLMessage) {}, ""}, + {func(_msg *ProposalPOLMessage) {}, ""}, {func(msg *ProposalPOLMessage) { msg.Height = -1 }, "negative Height"}, {func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"}, {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"}, diff --git a/internal/consensus/pbts_test.go b/internal/consensus/pbts_test.go index 203fc32f46..2811f296e4 100644 --- a/internal/consensus/pbts_test.go +++ b/internal/consensus/pbts_test.go @@ -106,7 +106,22 @@ func newPBTSTestHarness(ctx context.Context, t *testing.T, tc pbtsTestConfigurat kvApp, err := kvstore.NewMemoryApp(kvstore.WithLogger(logger)) require.NoError(t, err) - cs := newState(ctx, t, logger.With("module", "consensus"), state, privVals[0], kvApp) + msgMw := func(cs *State) { + cs.msgMiddlewares = append(cs.msgMiddlewares, + func(hd msgHandlerFunc) msgHandlerFunc { + return func(ctx context.Context, stateData *StateData, msg msgEnvelope) error { + if proposal, ok := msg.Msg.(*ProposalMessage); ok { + if cfg, ok := tc.heights[stateData.Height]; ok { + msg.ReceiveTime = proposal.Proposal.Timestamp.Add(cfg.deliveryDelay) + } + + } + return hd(ctx, stateData, msg) + } + }) + } + + cs := newState(ctx, t, logger.With("module", "consensus"), state, privVals[0], kvApp, msgMw) vss := make([]*validatorStub, validators) for i := 0; i < validators; i++ { vss[i] = newValidatorStub(privVals[i], int32(i), 0) @@ -171,7 +186,6 @@ func (p *pbtsTestHarness) nextHeight( t *testing.T, currentHeightConfig pbtsTestHeightConfiguration, ) heightResult { - deliveryDelay := currentHeightConfig.deliveryDelay proposalDelay := currentHeightConfig.proposalDelay bid := types.BlockID{} @@ -186,7 +200,7 @@ func (p *pbtsTestHarness) nextHeight( time.Sleep(proposalDelay) prop, _, ps := p.newProposal(ctx, t) - time.Sleep(deliveryDelay) + // time.Sleep(deliveryDelay) -- handled by the middleware if err := p.observedState.SetProposalAndBlock(ctx, &prop, ps, "peerID"); err != nil { t.Fatal(err) } @@ -239,7 +253,7 @@ func timestampedCollector(ctx context.Context, t *testing.T, eb *eventbus.EventB return eventCh } -func collectHeightResults(ctx context.Context, t *testing.T, eventCh <-chan timestampedEvent, height int64, proTxHash crypto.ProTxHash) heightResult { +func collectHeightResults(_ctx context.Context, t *testing.T, eventCh <-chan timestampedEvent, height int64, proTxHash crypto.ProTxHash) heightResult { t.Helper() var res heightResult for event := range eventCh { @@ -475,20 +489,20 @@ func TestTooFarInThePastProposal(t *testing.T) { cfg := pbtsTestConfiguration{ synchronyParams: types.SynchronyParams{ Precision: 1 * time.Millisecond, - MessageDelay: 10 * time.Millisecond, + MessageDelay: 30 * time.Millisecond, }, timeoutPropose: 50 * time.Millisecond, heights: map[int64]pbtsTestHeightConfiguration{ 2: { proposalDelay: 15 * time.Millisecond, - deliveryDelay: 13 * time.Millisecond, + deliveryDelay: 33 * time.Millisecond, }, }, maxHeight: 2, } pbtsTest := newPBTSTestHarness(ctx, t, cfg) - pbtsTest.logger.AssertMatch(regexp.MustCompile(`"proposal is not timely","height":2`)) + pbtsTest.logger.AssertContains("proposal is not timely: received too late: height 2") results := pbtsTest.run(ctx, t) require.Nil(t, results[2].prevote.BlockID.Hash) @@ -503,23 +517,20 @@ func TestTooFarInTheFutureProposal(t *testing.T) { cfg := pbtsTestConfiguration{ synchronyParams: types.SynchronyParams{ Precision: 1 * time.Millisecond, - MessageDelay: 10 * time.Millisecond, + MessageDelay: 30 * time.Millisecond, }, timeoutPropose: 500 * time.Millisecond, heights: map[int64]pbtsTestHeightConfiguration{ 2: { proposalDelay: 100 * time.Millisecond, - deliveryDelay: 10 * time.Millisecond, - }, - 4: { - proposalDelay: 50 * time.Millisecond, + deliveryDelay: -40 * time.Millisecond, // Recv time will be 40 ms before proposal time }, }, maxHeight: 2, } pbtsTest := newPBTSTestHarness(ctx, t, cfg) - pbtsTest.logger.AssertMatch(regexp.MustCompile(`"proposal is not timely","height":2`)) + pbtsTest.logger.AssertMatch(regexp.MustCompile("proposal is not timely: received too early: height 2,")) results := pbtsTest.run(ctx, t) require.Nil(t, results[2].prevote.BlockID.Hash) diff --git a/internal/consensus/peer_state.go b/internal/consensus/peer_state.go index 607fc3be86..8ab8f90be6 100644 --- a/internal/consensus/peer_state.go +++ b/internal/consensus/peer_state.go @@ -59,7 +59,7 @@ type PeerState struct { func NewPeerState(logger log.Logger, peerID types.NodeID) *PeerState { return &PeerState{ peerID: peerID, - logger: logger, + logger: logger.With("peer", peerID), PRS: cstypes.PeerRoundState{ Round: -1, ProposalPOLRound: -1, @@ -268,7 +268,7 @@ func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType tmprot return nil case tmproto.PrecommitType: - return ps.PRS.LastPrecommits + return ps.PRS.Precommits } } @@ -335,10 +335,6 @@ func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) { if ps.PRS.ProposalPOL == nil { ps.PRS.ProposalPOL = bits.NewBitArray(numValidators) } - } else if ps.PRS.Height == height+1 { - if ps.PRS.LastPrecommits == nil { - ps.PRS.LastPrecommits = bits.NewBitArray(numValidators) - } } } @@ -487,6 +483,8 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { ps.mtx.Lock() defer ps.mtx.Unlock() + ps.logger.Trace("apply new round step message", "peer", ps.peerID, "msg", msg.String()) + // ignore duplicates or decreases if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 { return @@ -530,10 +528,8 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { // Shift Precommits to LastPrecommits. if psHeight+1 == msg.Height && psRound == msg.LastCommitRound { ps.PRS.LastCommitRound = msg.LastCommitRound - ps.PRS.LastPrecommits = ps.PRS.Precommits.Copy() } else { ps.PRS.LastCommitRound = msg.LastCommitRound - ps.PRS.LastPrecommits = nil } // we'll update the BitArray capacity later diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index bd789bfeec..222f92e978 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -30,54 +30,7 @@ var ( _ p2p.Wrapper = (*tmcons.Message)(nil) ) -// GetChannelDescriptor produces an instance of a descriptor for this -// package's required channels. -func getChannelDescriptors() map[p2p.ChannelID]*p2p.ChannelDescriptor { - return map[p2p.ChannelID]*p2p.ChannelDescriptor{ - StateChannel: { - ID: StateChannel, - Priority: 8, - SendQueueCapacity: 64, - RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 128, - Name: "state", - }, - DataChannel: { - // TODO: Consider a split between gossiping current block and catchup - // stuff. Once we gossip the whole block there is nothing left to send - // until next height or round. - ID: DataChannel, - Priority: 12, - SendQueueCapacity: 64, - RecvBufferCapacity: 512, - RecvMessageCapacity: maxMsgSize, - Name: "data", - }, - VoteChannel: { - ID: VoteChannel, - Priority: 10, - SendQueueCapacity: 64, - RecvBufferCapacity: 4096, - RecvMessageCapacity: maxMsgSize, - Name: "vote", - }, - VoteSetBitsChannel: { - ID: VoteSetBitsChannel, - Priority: 5, - SendQueueCapacity: 8, - RecvBufferCapacity: 128, - RecvMessageCapacity: maxMsgSize, - Name: "voteSet", - }, - } -} - const ( - StateChannel = p2p.ChannelID(0x20) - DataChannel = p2p.ChannelID(0x21) - VoteChannel = p2p.ChannelID(0x22) - VoteSetBitsChannel = p2p.ChannelID(0x23) - maxMsgSize = 1048576 // 1MB; NOTE: keep in sync with types.PartSet sizes. blocksToContributeToBecomeGoodPeer = 10000 @@ -173,23 +126,23 @@ func (r *Reactor) OnStart(ctx context.Context) error { var chBundle channelBundle var err error - chans := getChannelDescriptors() - chBundle.state, err = r.chCreator(ctx, chans[StateChannel]) + chans := p2p.ConsensusChannelDescriptors() + chBundle.state, err = r.chCreator(ctx, chans[p2p.ConsensusStateChannel]) if err != nil { return err } - chBundle.data, err = r.chCreator(ctx, chans[DataChannel]) + chBundle.data, err = r.chCreator(ctx, chans[p2p.ConsensusDataChannel]) if err != nil { return err } - chBundle.vote, err = r.chCreator(ctx, chans[VoteChannel]) + chBundle.vote, err = r.chCreator(ctx, chans[p2p.ConsensusVoteChannel]) if err != nil { return err } - chBundle.voteSet, err = r.chCreator(ctx, chans[VoteSetBitsChannel]) + chBundle.voteSet, err = r.chCreator(ctx, chans[p2p.VoteSetBitsChannel]) if err != nil { return err } @@ -497,7 +450,7 @@ func (r *Reactor) peerUp(ctx context.Context, peerUpdate p2p.PeerUpdate, retries } } -func (r *Reactor) peerDown(ctx context.Context, peerUpdate p2p.PeerUpdate, chans channelBundle) { +func (r *Reactor) peerDown(_ context.Context, peerUpdate p2p.PeerUpdate, _chans channelBundle) { r.mtx.RLock() ps, ok := r.peers[peerUpdate.NodeID] r.mtx.RUnlock() @@ -682,13 +635,18 @@ func (r *Reactor) handleVoteMessage(ctx context.Context, envelope *p2p.Envelope, case *tmcons.Vote: stateData := r.state.stateDataStore.Get() isValidator := stateData.isValidator(r.state.privValidator.ProTxHash) - height, valSize, lastCommitSize := stateData.Height, stateData.Validators.Size(), stateData.LastPrecommits.Size() + height, valSize := stateData.Height, stateData.Validators.Size() + lastValSize := len(stateData.LastValidators.Validators) if isValidator { // ignore votes on non-validator nodes; TODO don't even send it vMsg := msgI.(*VoteMessage) + if err := vMsg.Vote.ValidateBasic(); err != nil { + return fmt.Errorf("invalid vote received from %s: %w", envelope.From, err) + } + ps.EnsureVoteBitArrays(height, valSize) - ps.EnsureVoteBitArrays(height-1, lastCommitSize) + ps.EnsureVoteBitArrays(height-1, lastValSize) if err := ps.SetHasVote(vMsg.Vote); err != nil { return err } @@ -705,7 +663,7 @@ func (r *Reactor) handleVoteMessage(ctx context.Context, envelope *p2p.Envelope, // VoteSetBitsChannel. If we fail to find the peer state for the envelope sender, // we perform a no-op and return. This can happen when we process the envelope // after the peer is removed. -func (r *Reactor) handleVoteSetBitsMessage(ctx context.Context, envelope *p2p.Envelope, msgI Message) error { +func (r *Reactor) handleVoteSetBitsMessage(_ context.Context, envelope *p2p.Envelope, msgI Message) error { logger := r.logger.With("peer", envelope.From, "ch_id", "VoteSetBitsChannel") ps, ok := r.GetPeerState(envelope.From) @@ -780,13 +738,13 @@ func (r *Reactor) handleMessage(ctx context.Context, envelope *p2p.Envelope, cha } switch envelope.ChannelID { - case StateChannel: + case p2p.ConsensusStateChannel: err = r.handleStateMessage(ctx, envelope, msg, chans.voteSet) - case DataChannel: + case p2p.ConsensusDataChannel: err = r.handleDataMessage(ctx, envelope, msg) - case VoteChannel: + case p2p.ConsensusVoteChannel: err = r.handleVoteMessage(ctx, envelope, msg) - case VoteSetBitsChannel: + case p2p.VoteSetBitsChannel: err = r.handleVoteSetBitsMessage(ctx, envelope, msg) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", envelope.ChannelID, envelope) diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index fde234c87a..269ff7a578 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -81,31 +81,31 @@ func setup( privProTxHashes[i] = state.privValidator.ProTxHash } rts := &reactorTestSuite{ - network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes, ProTxHashes: privProTxHashes}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes, ProTxHashes: privProTxHashes}, log.NewNopLogger()), states: make(map[types.NodeID]*State), reactors: make(map[types.NodeID]*Reactor, numNodes), subs: make(map[types.NodeID]eventbus.Subscription, numNodes), blocksyncSubs: make(map[types.NodeID]eventbus.Subscription, numNodes), } - rts.stateChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(StateChannel, size)) - rts.dataChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(DataChannel, size)) - rts.voteChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(VoteChannel, size)) - rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(VoteSetBitsChannel, size)) + rts.stateChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(p2p.ConsensusStateChannel, size)) + rts.dataChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(p2p.ConsensusDataChannel, size)) + rts.voteChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(p2p.ConsensusVoteChannel, size)) + rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(p2p.VoteSetBitsChannel, size)) ctx, cancel := context.WithCancel(ctx) t.Cleanup(cancel) chCreator := func(nodeID types.NodeID) p2p.ChannelCreator { - return func(ctx context.Context, desc *p2p.ChannelDescriptor) (p2p.Channel, error) { + return func(_ctx context.Context, desc *p2p.ChannelDescriptor) (p2p.Channel, error) { switch desc.ID { - case StateChannel: + case p2p.ConsensusStateChannel: return rts.stateChannels[nodeID], nil - case DataChannel: + case p2p.ConsensusDataChannel: return rts.dataChannels[nodeID], nil - case VoteChannel: + case p2p.ConsensusVoteChannel: return rts.voteChannels[nodeID], nil - case VoteSetBitsChannel: + case p2p.VoteSetBitsChannel: return rts.voteSetBitsChannels[nodeID], nil default: return nil, fmt.Errorf("invalid channel; %v", desc.ID) @@ -556,7 +556,6 @@ func TestReactorValidatorSetChanges(t *testing.T) { validatorUpdates: updates, consensusParams: factory.ConsensusParams(func(cp *types.ConsensusParams) { cp.Timeout.Propose = 2 * time.Second - cp.Timeout.Commit = 1 * time.Second cp.Timeout.Vote = 1 * time.Second }), } diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 0c13b4dd27..210dbc7568 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -34,7 +34,7 @@ const ( func RunReplayFile( ctx context.Context, logger log.Logger, - cfg config.BaseConfig, + cfg config.Config, csConfig *config.ConsensusConfig, console bool, ) error { @@ -298,7 +298,7 @@ func (pb *playback) replayConsoleLoop(ctx context.Context) (int, error) { // convenience for replay mode func newConsensusStateForReplay( ctx context.Context, - cfg config.BaseConfig, + cfg config.Config, logger log.Logger, csConfig *config.ConsensusConfig, ) (*State, error) { @@ -327,7 +327,7 @@ func newConsensusStateForReplay( return nil, err } - client, _, err := proxy.ClientFactory(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + client, _, err := proxy.ClientFactory(logger, *cfg.Abci, cfg.DBDir()) if err != nil { return nil, err } diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index e02fc9e728..9fce5090a2 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -25,9 +25,9 @@ func (emptyMempool) Size() int { return 0 } func (emptyMempool) CheckTx(context.Context, types.Tx, func(*abci.ResponseCheckTx), mempool.TxInfo) error { return nil } -func (emptyMempool) RemoveTxByKey(txKey types.TxKey) error { return nil } +func (emptyMempool) RemoveTxByKey(_txKey types.TxKey) error { return nil } func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } -func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } +func (emptyMempool) ReapMaxTxs(_n int) types.Txs { return types.Txs{} } func (emptyMempool) Update( _ context.Context, _ int64, @@ -39,11 +39,11 @@ func (emptyMempool) Update( ) error { return nil } -func (emptyMempool) Flush() {} -func (emptyMempool) FlushAppConn(ctx context.Context) error { return nil } -func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } -func (emptyMempool) EnableTxsAvailable() {} -func (emptyMempool) SizeBytes() int64 { return 0 } +func (emptyMempool) Flush() {} +func (emptyMempool) FlushAppConn(_ctx context.Context) error { return nil } +func (emptyMempool) TxsAvailable() <-chan struct{} { return make(chan struct{}) } +func (emptyMempool) EnableTxsAvailable() {} +func (emptyMempool) SizeBytes() int64 { return 0 } func (emptyMempool) TxsFront() *clist.CElement { return nil } func (emptyMempool) TxsWaitChan() <-chan struct{} { return nil } @@ -104,7 +104,7 @@ type mockProxyApp struct { abciResponses *tmstate.ABCIResponses } -func (mock *mockProxyApp) ProcessProposal(_ context.Context, req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { +func (mock *mockProxyApp) ProcessProposal(_ context.Context, _req *abci.RequestProcessProposal) (*abci.ResponseProcessProposal, error) { r := mock.abciResponses.ProcessProposal if r == nil { return &abci.ResponseProcessProposal{}, nil @@ -112,11 +112,8 @@ func (mock *mockProxyApp) ProcessProposal(_ context.Context, req *abci.RequestPr return r, nil } -func (mock *mockProxyApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { - r := mock.abciResponses.FinalizeBlock +func (mock *mockProxyApp) FinalizeBlock(_ context.Context, _req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { mock.txCount++ - if r == nil { - return &abci.ResponseFinalizeBlock{}, nil - } - return r, nil + + return &abci.ResponseFinalizeBlock{}, nil } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index a82fe66731..ff6efde35f 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -129,10 +129,10 @@ func TestWALCrash(t *testing.T) { heightToStop int64 }{ {"empty block", - func(stateDB dbm.DB, cs *State, ctx context.Context) {}, + func(_stateDB dbm.DB, _cs *State, _ctx context.Context) {}, 1}, {"many non-empty blocks", - func(stateDB dbm.DB, cs *State, ctx context.Context) { + func(_stateDB dbm.DB, cs *State, ctx context.Context) { go sendTxs(ctx, t, cs) }, 3}, @@ -1121,10 +1121,11 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm if p.Type == tmproto.PrecommitType { thisBlockCommit = types.NewCommit(p.Height, p.Round, p.BlockID, + p.VoteExtensions, &types.CommitSigns{ QuorumSigns: types.QuorumSigns{ - BlockSign: p.BlockSignature, - ExtensionSigns: types.MakeThresholdExtensionSigns(p.VoteExtensions), + BlockSign: p.BlockSignature, + VoteExtensionSignatures: p.VoteExtensions.GetSignatures(), }, QuorumHash: crypto.RandQuorumHash(), }, @@ -1212,10 +1213,10 @@ func (bs *mockBlockStore) Base() int64 { return bs.base } func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } func (bs *mockBlockStore) LoadBaseMeta() *types.BlockMeta { return bs.LoadBlockMeta(bs.base) } func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } -func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { +func (bs *mockBlockStore) LoadBlockByHash(_hash []byte) *types.Block { return bs.chain[int64(len(bs.chain))-1] } -func (bs *mockBlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { return nil } +func (bs *mockBlockStore) LoadBlockMetaByHash(_hash []byte) *types.BlockMeta { return nil } func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { block := bs.chain[height-1] bps, err := block.MakePartSet(types.BlockPartSizeBytes) @@ -1227,10 +1228,10 @@ func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { Header: block.Header, } } -func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } +func (bs *mockBlockStore) LoadBlockPart(_height int64, _index int) *types.Part { return nil } func (bs *mockBlockStore) SaveBlock( block *types.Block, - blockParts *types.PartSet, + _blockParts *types.PartSet, seenCommit *types.Commit, ) { bs.chain = append(bs.chain, block) @@ -1456,7 +1457,7 @@ type initChainApp struct { initialCoreHeight uint32 } -func (ica *initChainApp) InitChain(_ context.Context, req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { +func (ica *initChainApp) InitChain(_ context.Context, _req *abci.RequestInitChain) (*abci.ResponseInitChain, error) { resp := abci.ResponseInitChain{ InitialCoreHeight: ica.initialCoreHeight, } diff --git a/internal/consensus/replayer.go b/internal/consensus/replayer.go index 6cb6e3c646..416c5a24b0 100644 --- a/internal/consensus/replayer.go +++ b/internal/consensus/replayer.go @@ -170,7 +170,8 @@ func (r *BlockReplayer) syncStateIfItIsOneAheadOfStore(ctx context.Context, rs r return r.replayBlocks(ctx, rs, state, true) } if rs.appHeight == rs.stateHeight { - // We haven't run Commit (both the state and app are one block behind), + // Store is ahead of both App and State + // We haven't run FinalizeBlock (both the state and app are one block behind), // so replay with the real app. // NOTE: We could instead use the cs.WAL on cs.Start, // but we'd have to allow the WAL to block a block that wrote its #ENDHEIGHT @@ -182,6 +183,7 @@ func (r *BlockReplayer) syncStateIfItIsOneAheadOfStore(ctx context.Context, rs r return state.LastAppHash, nil } if rs.appHeight == rs.storeHeight { + // Store and App are ahead of State // We ran Commit, but didn't save the state, so replay with mock app. abciResponses, err := r.stateStore.LoadABCIResponses(rs.storeHeight) if err != nil { @@ -232,19 +234,18 @@ func (r *BlockReplayer) replayBlocks( var ( block *types.Block commit *types.Commit - fbResp *abci.ResponseFinalizeBlock ucState sm.CurrentRoundState ) for i := firstBlock; i <= finalBlock; i++ { block = r.store.LoadBlock(i) commit = r.store.LoadSeenCommitAt(i) - ucState, fbResp, err = r.replayBlock(ctx, block, commit, state, i) + ucState, err = r.replayBlock(ctx, block, commit, state, i) if err != nil { return nil, err } } if !mutateState { - err = r.publishEvents(block, ucState, fbResp) + err = r.publishEvents(block, ucState) if err != nil { return nil, err } @@ -271,27 +272,26 @@ func (r *BlockReplayer) replayBlock( commit *types.Commit, state sm.State, height int64, -) (sm.CurrentRoundState, *abci.ResponseFinalizeBlock, error) { +) (sm.CurrentRoundState, error) { r.logger.Info("Replay: applying block", "height", height) // Extra check to ensure the app was not changed in a way it shouldn't have. ucState, err := r.blockExec.ProcessProposal(ctx, block, commit.Round, state, false) if err != nil { - return sm.CurrentRoundState{}, nil, fmt.Errorf("blockReplayer process proposal: %w", err) + return sm.CurrentRoundState{}, fmt.Errorf("blockReplayer process proposal: %w", err) } - // We emit events for the index services at the final block due to the sync issue when // the node shutdown during the block committing status. // For all other cases, we disable emitting events by providing blockExec=nil in ExecReplayedCommitBlock - fbResp, err := sm.ExecReplayedCommitBlock(ctx, r.appClient, block, commit, r.logger) + _, err = sm.ExecReplayedCommitBlock(ctx, r.appClient, block, commit, r.logger) if err != nil { - return sm.CurrentRoundState{}, nil, err + return sm.CurrentRoundState{}, err } // Extra check to ensure the app was not changed in a way it shouldn't have. if err := checkAppHashEqualsOneFromBlock(ucState.AppHash, block); err != nil { - return sm.CurrentRoundState{}, nil, err + return sm.CurrentRoundState{}, err } r.nBlocks++ - return ucState, fbResp, nil + return ucState, nil } // syncStateAt loads block's data for a height H to sync it with the application. @@ -320,7 +320,7 @@ func (r *BlockReplayer) execInitChain(ctx context.Context, rs *replayState, stat return nil } stateBlockHeight := state.LastBlockHeight - nextVals, err := validatorSetUpdateFromGenesis(r.genDoc, r.nodeProTxHash) + nextVals, err := validatorSetUpdateFromGenesis(r.genDoc) if err != nil { return err } @@ -372,10 +372,9 @@ func (r *BlockReplayer) execInitChain(ctx context.Context, rs *replayState, stat func (r *BlockReplayer) publishEvents( block *types.Block, ucState sm.CurrentRoundState, - fbResp *abci.ResponseFinalizeBlock, ) error { blockID := block.BlockID(nil) - es := sm.NewFullEventSet(block, blockID, ucState, fbResp, ucState.NextValidators) + es := sm.NewFullEventSet(block, blockID, ucState, ucState.NextValidators) err := es.Publish(r.publisher) if err != nil { r.logger.Error("failed publishing event", "err", err) @@ -383,7 +382,7 @@ func (r *BlockReplayer) publishEvents( return nil } -func validatorSetUpdateFromGenesis(genDoc *types.GenesisDoc, nodeProTxHash types.ProTxHash) (*abci.ValidatorSetUpdate, error) { +func validatorSetUpdateFromGenesis(genDoc *types.GenesisDoc) (*abci.ValidatorSetUpdate, error) { if len(genDoc.QuorumHash) != crypto.DefaultHashSize { return nil, nil } @@ -395,12 +394,11 @@ func validatorSetUpdateFromGenesis(genDoc *types.GenesisDoc, nodeProTxHash types return nil, fmt.Errorf("blockReplayer blocks error when validating validator: %s", err) } } - validatorSet := types.NewValidatorSetWithLocalNodeProTxHash( + validatorSet := types.NewValidatorSetCheckPublicKeys( validators, genDoc.ThresholdPublicKey, genDoc.QuorumType, genDoc.QuorumHash, - nodeProTxHash, ) err := validatorSet.ValidateBasic() if err != nil { diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 91af6fac30..d3faa21491 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -180,6 +180,7 @@ type State struct { voteSigner *voteSigner ctrl *Controller roundScheduler *roundScheduler + msgMiddlewares []msgMiddlewareFunc stopFn func(cs *State) bool } @@ -282,7 +283,7 @@ func NewState( for _, sub := range subs { sub.Subscribe(cs.emitter) } - cs.msgDispatcher = newMsgInfoDispatcher(cs.ctrl, propler, wal, cs.logger) + cs.msgDispatcher = newMsgInfoDispatcher(cs.ctrl, propler, wal, cs.logger, cs.msgMiddlewares...) // this is not ideal, but it lets the consensus tests start // node-fragments gracefully while letting the nodes @@ -512,8 +513,9 @@ func (cs *State) OnStop() { if cs.GetRoundState().Step == cstypes.RoundStepApplyCommit { select { case <-cs.getOnStopCh(): - case <-time.After(stateData.state.ConsensusParams.Timeout.Commit): - cs.logger.Error("OnStop: timeout waiting for commit to finish", "time", stateData.state.ConsensusParams.Timeout.Commit) + case <-time.After(stateData.state.ConsensusParams.Timeout.Vote): + // we wait vote timeout, just in case + cs.logger.Error("OnStop: timeout waiting for commit to finish", "time", stateData.state.ConsensusParams.Timeout.Vote) } } @@ -722,18 +724,18 @@ func (cs *State) handleTimeout( ti timeoutInfo, stateData *StateData, ) { - cs.logger.Trace("received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) // timeouts must be for current height, round, step if ti.Height != stateData.Height || ti.Round < stateData.Round || (ti.Round == stateData.Round && ti.Step < stateData.Step) { - cs.logger.Debug("ignoring tock because we are ahead", - "height", stateData.Height, - "round", stateData.Round, - "step", stateData.Step.String(), + cs.logger.Trace("ignoring tock because we are ahead", + "timeout", ti.Duration, "tock_height", ti.Height, "tock_round", ti.Round, "tock_step", ti.Step, + "height", stateData.Height, "round", stateData.Round, "step", stateData.Step.String(), ) return } + cs.logger.Trace("received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) + // the timeout will now cause a state transition cs.mtx.Lock() defer cs.mtx.Unlock() @@ -767,6 +769,8 @@ func (cs *State) handleTimeout( } func (cs *State) handleTxsAvailable(ctx context.Context, stateData *StateData) { + // TODO: Change to trace + cs.logger.Debug("new transactions are available", "height", stateData.Height, "round", stateData.Round, "step", stateData.Step) // We only need to do this for round 0. if stateData.Round != 0 { return diff --git a/internal/consensus/state_add_commit.go b/internal/consensus/state_add_commit.go index 17981f909f..3cfcfe70d4 100644 --- a/internal/consensus/state_add_commit.go +++ b/internal/consensus/state_add_commit.go @@ -47,9 +47,10 @@ func (c *AddCommitAction) Execute(ctx context.Context, stateEvent StateEvent) er if err != nil { return fmt.Errorf("error adding commit: %w", err) } - if stateData.bypassCommitTimeout() { - _ = stateEvent.Ctrl.Dispatch(ctx, &EnterNewRoundEvent{Height: stateData.Height}, stateData) - } + + // We go to next round, as in Tenderdash we don't need to wait for new commits + _ = stateEvent.Ctrl.Dispatch(ctx, &EnterNewRoundEvent{Height: stateData.Height}, stateData) + _ = c.statsQueue.send(ctx, msgInfoFromCtx(ctx)) return nil } diff --git a/internal/consensus/state_add_prop_block.go b/internal/consensus/state_add_prop_block.go index bf2b5d4376..f75303ad78 100644 --- a/internal/consensus/state_add_prop_block.go +++ b/internal/consensus/state_add_prop_block.go @@ -179,6 +179,7 @@ func (c *AddProposalBlockPartAction) addProposalBlockPart( "proposal_height", stateData.ProposalBlock.Height, "hash", stateData.ProposalBlock.Hash(), "round_height", stateData.RoundState.GetHeight(), + "num_txs", len(stateData.ProposalBlock.Txs), ) c.eventPublisher.PublishCompleteProposalEvent(stateData.CompleteProposalEvent()) diff --git a/internal/consensus/state_add_vote.go b/internal/consensus/state_add_vote.go index 0644beb518..284964cbbd 100644 --- a/internal/consensus/state_add_vote.go +++ b/internal/consensus/state_add_vote.go @@ -45,7 +45,6 @@ func newAddVoteAction(cs *State, ctrl *Controller, statsQueue *chanQueue[msgInfo statsMw := addVoteStatsMw(statsQueue) dispatchPrecommitMw := addVoteDispatchPrecommitMw(ctrl) verifyVoteExtensionMw := addVoteVerifyVoteExtensionMw(cs.privValidator, cs.blockExec, cs.metrics, cs.emitter) - addToLastPrecommitMw := addVoteToLastPrecommitMw(cs.eventPublisher, ctrl) return &AddVoteAction{ prevote: withVoterMws( addToVoteSet, @@ -62,7 +61,6 @@ func newAddVoteAction(cs *State, ctrl *Controller, statsQueue *chanQueue[msgInfo dispatchPrecommitMw, verifyVoteExtensionMw, validateVoteMw, - addToLastPrecommitMw, errorMw, statsMw, ), @@ -86,7 +84,7 @@ func (c *AddVoteAction) Execute(ctx context.Context, stateEvent StateEvent) erro // addVoteToVoteSetFunc adds a vote to the vote-set func addVoteToVoteSetFunc(metrics *Metrics, ep *EventPublisher) AddVoteFunc { - return func(ctx context.Context, stateData *StateData, vote *types.Vote) (bool, error) { + return func(_ctx context.Context, stateData *StateData, vote *types.Vote) (bool, error) { added, err := stateData.Votes.AddVote(vote) if !added || err != nil { return added, err @@ -101,45 +99,6 @@ func addVoteToVoteSetFunc(metrics *Metrics, ep *EventPublisher) AddVoteFunc { } } -func addVoteToLastPrecommitMw(ep *EventPublisher, ctrl *Controller) AddVoteMiddlewareFunc { - return func(next AddVoteFunc) AddVoteFunc { - return func(ctx context.Context, stateData *StateData, vote *types.Vote) (bool, error) { - if vote.Height+1 != stateData.Height || vote.Type != tmproto.PrecommitType { - return next(ctx, stateData, vote) - } - logger := log.FromCtxOrNop(ctx) - if stateData.Step != cstypes.RoundStepNewHeight { - // Late precommit at prior height is ignored - logger.Trace("precommit vote came in after commit timeout and has been ignored") - return false, nil - } - if stateData.LastPrecommits == nil { - logger.Debug("no last round precommits on node", "vote", vote) - return false, nil - } - added, err := stateData.LastPrecommits.AddVote(vote) - if !added { - logger.Debug("vote not added to last precommits", logKeyValsWithError(nil, err)...) - return false, nil - } - logger.Trace("added vote to last precommits", "last_precommits", stateData.LastPrecommits) - - err = ep.PublishVoteEvent(vote) - if err != nil { - return added, err - } - - // if we can skip timeoutCommit and have all the votes now, - if stateData.bypassCommitTimeout() && stateData.LastPrecommits.HasAll() { - // go straight to new round (skip timeout commit) - // c.scheduleTimeout(time.Duration(0), c.Height, 0, cstypes.RoundStepNewHeight) - _ = ctrl.Dispatch(ctx, &EnterNewRoundEvent{Height: stateData.Height}, stateData) - } - return added, err - } - } -} - func addVoteUpdateValidBlockMw(ep *EventPublisher) AddVoteMiddlewareFunc { return func(next AddVoteFunc) AddVoteFunc { return func(ctx context.Context, stateData *StateData, vote *types.Vote) (bool, error) { @@ -250,7 +209,7 @@ func addVoteDispatchPrecommitMw(ctrl *Controller) AddVoteMiddlewareFunc { return added, err } _ = ctrl.Dispatch(ctx, &EnterCommitEvent{Height: height, CommitRound: vote.Round}, stateData) - if stateData.bypassCommitTimeout() && precommits.HasAll() { + if precommits.HasTwoThirdsMajority() { _ = ctrl.Dispatch(ctx, &EnterNewRoundEvent{Height: stateData.Height}, stateData) } return added, err @@ -395,7 +354,7 @@ func addVoteLoggingMw() AddVoteMiddlewareFunc { return added, err } votes := stateData.Votes.GetVoteSet(vote.Round, vote.Type) - logger.Trace("vote added", "data", votes) + logger.Debug("vote added", "data", votes, "nil", vote.BlockID.IsNil()) return added, err } } diff --git a/internal/consensus/state_add_vote_test.go b/internal/consensus/state_add_vote_test.go index c1caff2734..2667f1eeba 100644 --- a/internal/consensus/state_add_vote_test.go +++ b/internal/consensus/state_add_vote_test.go @@ -60,11 +60,11 @@ func (suite *AddVoteTestSuite) TestAddVoteAction() { prevoteCalled := false precommitCalled := false cmd := AddVoteAction{ - prevote: func(ctx context.Context, stateData *StateData, vote *types.Vote) (bool, error) { + prevote: func(_ctx context.Context, _stateData *StateData, _vote *types.Vote) (bool, error) { prevoteCalled = true return true, nil }, - precommit: func(ctx context.Context, stateData *StateData, vote *types.Vote) (bool, error) { + precommit: func(_ctx context.Context, _stateData *StateData, _vote *types.Vote) (bool, error) { precommitCalled = true return true, nil }, @@ -105,7 +105,7 @@ func (suite *AddVoteTestSuite) TestAddVoteToVoteSet() { defer cancel() const H100 = int64(100) eventFired := false - suite.emitter.AddListener(types.EventVoteValue, func(data eventemitter.EventData) error { + suite.emitter.AddListener(types.EventVoteValue, func(_data eventemitter.EventData) error { eventFired = true return nil }) @@ -171,7 +171,7 @@ func (suite *AddVoteTestSuite) TestAddVoteUpdateValidBlockMw() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() eventFired := false - suite.emitter.AddListener(types.EventValidBlockValue, func(data eventemitter.EventData) error { + suite.emitter.AddListener(types.EventValidBlockValue, func(_data eventemitter.EventData) error { eventFired = true return nil }) @@ -197,7 +197,7 @@ func (suite *AddVoteTestSuite) TestAddVoteUpdateValidBlockMw() { suite.NoError(err) returnAdded := true var returnError error - mockFn := func(ctx context.Context, stateData *StateData, vote *types.Vote) (bool, error) { + mockFn := func(_ctx context.Context, _stateData *StateData, _vote *types.Vote) (bool, error) { return returnAdded, returnError } fn := addVoteUpdateValidBlockMw(suite.publisher)(mockFn) diff --git a/internal/consensus/state_data.go b/internal/consensus/state_data.go index 519d60175a..13739ad48f 100644 --- a/internal/consensus/state_data.go +++ b/internal/consensus/state_data.go @@ -226,7 +226,6 @@ func (s *StateData) updateToState(state sm.State, commit *types.Commit) { switch { case state.LastBlockHeight == 0: // Very first commit should be empty. s.LastCommit = (*types.Commit)(nil) - s.LastPrecommits = (*types.VoteSet)(nil) case s.CommitRound > -1 && s.Votes != nil && commit == nil: // Otherwise, use cs.Votes if !s.Votes.Precommits(s.CommitRound).HasTwoThirdsMajority() { panic(fmt.Sprintf( @@ -234,14 +233,9 @@ func (s *StateData) updateToState(state sm.State, commit *types.Commit) { state.LastBlockHeight, s.CommitRound, s.Votes.Precommits(s.CommitRound), )) } - s.LastPrecommits = s.Votes.Precommits(s.CommitRound) - s.LastCommit = s.LastPrecommits.MakeCommit() + precommits := s.Votes.Precommits(s.CommitRound) + s.LastCommit = precommits.MakeCommit() case commit != nil: - // We either got the commit from a remote node - // In which Last precommits will be nil - // Or we got the commit from finalize commit - // In which Last precommits will not be nil - s.LastPrecommits = s.Votes.Precommits(s.CommitRound) s.LastCommit = commit case s.LastCommit == nil: // NOTE: when Tendermint starts, it has no votes. reconstructLastCommit @@ -266,13 +260,9 @@ func (s *StateData) updateToState(state sm.State, commit *types.Commit) { if s.CommitTime.IsZero() { // "Now" makes it easier to sync up dev nodes. - // We add timeoutCommit to allow transactions - // to be gathered for the first block. - // And alternative solution that relies on clocks: - // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) - s.StartTime = s.commitTime(tmtime.Now()) + s.StartTime = tmtime.Now() } else { - s.StartTime = s.commitTime(s.CommitTime) + s.StartTime = s.CommitTime } if s.Validators == nil || !bytes.Equal(s.Validators.QuorumHash, validators.QuorumHash) { @@ -314,28 +304,70 @@ func (s *StateData) HeightVoteSet() (int64, *cstypes.HeightVoteSet) { return s.Height, s.Votes } -func (s *StateData) commitTime(t time.Time) time.Time { - c := s.state.ConsensusParams.Timeout.Commit - if s.config.UnsafeCommitTimeoutOverride != 0 { - c = s.config.UnsafeProposeTimeoutOverride - } - return t.Add(c) -} - -func (s *StateData) proposalIsTimely() bool { +// proposalIsTimely returns an error if the proposal is not timely +func (s *StateData) proposalIsTimely() error { if s.Height == s.state.InitialHeight { // by definition, initial block must have genesis time - return s.Proposal.Timestamp.Equal(s.state.LastBlockTime) + if !s.Proposal.Timestamp.Equal(s.state.LastBlockTime) { + return fmt.Errorf( + "%w: initial block must have genesis time: height %d, round %d, proposal time %v, genesis time %v", + errPrevoteProposalNotTimely, s.Height, s.Round, s.Proposal.Timestamp, s.state.LastBlockTime, + ) + } + + return nil } + sp := s.state.ConsensusParams.Synchrony.SynchronyParamsOrDefaults() - return s.Proposal.IsTimely(s.ProposalReceiveTime, sp, s.Round) + switch s.Proposal.CheckTimely(s.ProposalReceiveTime, sp, s.Round) { + case 0: + return nil + case -1: // too early + return fmt.Errorf( + "%w: received too early: height %d, round %d, delay %s", + errPrevoteProposalNotTimely, s.Height, s.Round, + s.ProposalReceiveTime.Sub(s.Proposal.Timestamp).String(), + ) + case 1: // too late + return fmt.Errorf( + "%w: received too late: height %d, round %d, delay %s", + errPrevoteProposalNotTimely, s.Height, s.Round, + s.ProposalReceiveTime.Sub(s.Proposal.Timestamp).String(), + ) + default: + panic("unexpected return value from isTimely") + } } -func (s *StateData) updateValidBlock() { +// Updates ValidBlock to current proposal. +// Returns true if the block was updated. +func (s *StateData) updateValidBlock() bool { s.ValidRound = s.Round - s.ValidBlock = s.ProposalBlock - s.ValidBlockRecvTime = s.ProposalReceiveTime - s.ValidBlockParts = s.ProposalBlockParts + // we only update valid block if it's not set already; otherwise we might overwrite the recv time + if !s.ValidBlock.HashesTo(s.ProposalBlock.Hash()) { + s.ValidBlock = s.ProposalBlock + s.ValidBlockRecvTime = s.ProposalReceiveTime + s.ValidBlockParts = s.ProposalBlockParts + + return true + } + + s.logger.Debug("valid block is already up to date, not updating", + "proposal_block", s.ProposalBlock.Hash(), + "proposal_round", s.Round, + "valid_block", s.ValidBlock.Hash(), + "valid_block_round", s.ValidRound, + ) + + return false +} + +// Locks the proposed block. +// You might also need to call updateValidBlock(). +func (s *StateData) updateLockedBlock() { + s.LockedRound = s.Round + s.LockedBlock = s.ProposalBlock + s.LockedBlockParts = s.ProposalBlockParts } func (s *StateData) verifyCommit(commit *types.Commit, peerID types.NodeID, ignoreProposalBlock bool) (verified bool, err error) { @@ -459,13 +491,6 @@ func (s *StateData) voteTimeout(round int32) time.Duration { ) * time.Nanosecond } -func (s *StateData) bypassCommitTimeout() bool { - if s.config.UnsafeBypassCommitTimeoutOverride != nil { - return *s.config.UnsafeBypassCommitTimeoutOverride - } - return s.state.ConsensusParams.Timeout.BypassCommitTimeout -} - func (s *StateData) isValidForPrevote() error { // Check that a proposed block was not received within this round (and thus executing this from a timeout). if s.ProposalBlock == nil { @@ -477,13 +502,16 @@ func (s *StateData) isValidForPrevote() error { if !s.Proposal.Timestamp.Equal(s.ProposalBlock.Header.Time) { return errPrevoteTimestampNotEqual } - //TODO: Remove this temporary fix when the complete solution is ready. See #8739 - if !s.replayMode && s.Proposal.POLRound == -1 && s.LockedRound == -1 && !s.proposalIsTimely() { - return errPrevoteProposalNotTimely + + // if this block was not validated yet, we check if it's timely + if !s.replayMode && !s.ProposalBlock.HashesTo(s.ValidBlock.Hash()) { + if err := s.proposalIsTimely(); err != nil { + return err + } } + // Validate proposal core chain lock - err := sm.ValidateBlockChainLock(s.state, s.ProposalBlock) - if err != nil { + if err := sm.ValidateBlockChainLock(s.state, s.ProposalBlock); err != nil { return errPrevoteInvalidChainLock } return nil diff --git a/internal/consensus/state_enter_precommit.go b/internal/consensus/state_enter_precommit.go index 41e53b1075..ce0981af58 100644 --- a/internal/consensus/state_enter_precommit.go +++ b/internal/consensus/state_enter_precommit.go @@ -134,12 +134,15 @@ func (c *EnterPrecommitAction) Execute(ctx context.Context, stateEvent StateEven // Validate the block. c.blockExec.mustValidate(ctx, stateData) - stateData.LockedRound = round - stateData.LockedBlock = stateData.ProposalBlock - stateData.LockedBlockParts = stateData.ProposalBlockParts + stateData.updateLockedBlock() c.eventPublisher.PublishLockEvent(stateData.RoundState) c.voteSigner.signAddVote(ctx, stateData, tmproto.PrecommitType, blockID) + + if stateData.updateValidBlock() { + c.eventPublisher.PublishValidBlockEvent(stateData.RoundState) + } + return nil } diff --git a/internal/consensus/state_enter_wait.go b/internal/consensus/state_enter_wait.go index 875d0bb928..6d79fcb464 100644 --- a/internal/consensus/state_enter_wait.go +++ b/internal/consensus/state_enter_wait.go @@ -28,7 +28,7 @@ type EnterPrecommitWaitAction struct { // Execute ... // Enter: any +2/3 precommits for next round. -func (c *EnterPrecommitWaitAction) Execute(ctx context.Context, stateEvent StateEvent) error { +func (c *EnterPrecommitWaitAction) Execute(_ context.Context, stateEvent StateEvent) error { stateData := stateEvent.StateData event := stateEvent.Data.(*EnterPrecommitWaitEvent) height, round := event.Height, event.Round @@ -86,7 +86,7 @@ type EnterPrevoteWaitAction struct { // Execute ... // Enter: any +2/3 prevotes at next round. -func (c *EnterPrevoteWaitAction) Execute(ctx context.Context, stateEvent StateEvent) error { +func (c *EnterPrevoteWaitAction) Execute(_ context.Context, stateEvent StateEvent) error { stateData := stateEvent.StateData event := stateEvent.Data.(*EnterPrevoteWaitEvent) height, round := event.Height, event.Round diff --git a/internal/consensus/state_prevoter.go b/internal/consensus/state_prevoter.go index fdc7913603..c562b3a3d8 100644 --- a/internal/consensus/state_prevoter.go +++ b/internal/consensus/state_prevoter.go @@ -39,7 +39,11 @@ func (p *prevoter) Do(ctx context.Context, stateData *StateData) error { err := stateData.isValidForPrevote() if err != nil { keyVals := append(prevoteKeyVals(stateData), "error", err) - p.logger.Error("prevote is invalid", keyVals...) + + if !errors.Is(err, errPrevoteProposalBlockNil) { + p.logger.Error("prevote is invalid", keyVals...) + } + p.logger.Debug("we don't have a valid block for this round, prevoting nil", keyVals...) p.signAndAddNilVote(ctx, stateData) return nil } @@ -102,6 +106,7 @@ func (p *prevoter) checkProposalBlock(rs cstypes.RoundState) bool { or the proposal matches our locked block, we prevote the proposal. */ if rs.Proposal.POLRound != -1 { + p.logger.Trace("prevote step: proposal has POLRound; no decision", "POLRound", rs.Proposal.POLRound) return false } if rs.LockedRound == -1 { @@ -112,6 +117,11 @@ func (p *prevoter) checkProposalBlock(rs cstypes.RoundState) bool { p.logger.Debug("prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal") return true } + + p.logger.Debug("prevote step: this block is not locked", + "locked_block_hash", rs.LockedBlock.Hash(), + "proposal_block_hash", rs.ProposalBlock.Hash()) + return false } @@ -136,9 +146,13 @@ func (p *prevoter) checkPrevoteMaj23(rs cstypes.RoundState) bool { */ blockID, ok := rs.Votes.Prevotes(rs.Proposal.POLRound).TwoThirdsMajority() if !ok { + p.logger.Trace("prevote step: no 2/3 majority for proposal block", "POLRound", rs.Proposal.POLRound) return false } if !rs.ProposalBlock.HashesTo(blockID.Hash) { + p.logger.Trace("prevote step: proposal block does not match 2/3 majority", "POLRound", rs.Proposal.POLRound, + "proposal_block_hash", rs.ProposalBlock.Hash(), + "majority_block_hash", blockID.Hash) return false } if rs.Proposal.POLRound < 0 { @@ -155,9 +169,14 @@ func (p *prevoter) checkPrevoteMaj23(rs cstypes.RoundState) bool { return true } if rs.ProposalBlock.HashesTo(rs.LockedBlock.Hash()) { - p.logger.Debug("prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal") + p.logger.Debug("prevote step: ProposalBlock is valid and matches our locked block", + "outcome", "prevoting the proposal") return true } + p.logger.Debug("prevote step: ProposalBlock does not match our locked block", + "proposal_block_hash", rs.ProposalBlock.Hash(), + "majority_block_hash", blockID.Hash) + return false } diff --git a/internal/consensus/state_proposaler.go b/internal/consensus/state_proposaler.go index 48848927b7..75c0b6942e 100644 --- a/internal/consensus/state_proposaler.go +++ b/internal/consensus/state_proposaler.go @@ -44,8 +44,15 @@ func NewProposaler( // Set updates Proposal, ProposalReceiveTime and ProposalBlockParts in RoundState if the passed proposal met conditions func (p *Proposaler) Set(proposal *types.Proposal, receivedAt time.Time, rs *cstypes.RoundState) error { - // Does not apply - if rs.Proposal != nil || proposal.Height != rs.Height || proposal.Round != rs.Round { + + if rs.Proposal != nil { + // We already have a proposal + return nil + } + + if proposal.Height != rs.Height || proposal.Round != rs.Round { + p.logger.Debug("received proposal for invalid height/round, ignoring", "proposal", proposal, + "height", rs.Height, "round", rs.Round, "received", receivedAt) return nil } @@ -65,6 +72,7 @@ func (p *Proposaler) Set(proposal *types.Proposal, receivedAt time.Time, rs *cst } rs.Proposal = proposal rs.ProposalReceiveTime = receivedAt + p.proposalTimestampDifferenceMetric(*rs) // We don't update cs.ProposalBlockParts if it is already set. // This happens if we're already in cstypes.RoundStepApplyCommit or if there is a valid block in the current round. @@ -74,19 +82,23 @@ func (p *Proposaler) Set(proposal *types.Proposal, receivedAt time.Time, rs *cst rs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) } - p.logger.Info("received proposal", "proposal", proposal) + p.logger.Info("received proposal", "proposal", proposal, "received", receivedAt) return nil } // Create creates, sings and sends a created proposal to the queue +// // To create a proposal is used RoundState.ValidBlock if it isn't nil and valid, otherwise create a new one func (p *Proposaler) Create(ctx context.Context, height int64, round int32, rs *cstypes.RoundState) error { - // If there is valid block, choose that. + // Create a block. + // Note that we only create a block if we don't have a valid block already. block, blockParts := rs.ValidBlock, rs.ValidBlockParts - // Create on block if !p.checkValidBlock(rs) { var err error + start := time.Now() block, blockParts, err = p.createProposalBlock(ctx, round, rs) + p.logger.Trace("createProposalBlock executed", "took", time.Since(start).String()) + if err != nil { return err } @@ -94,8 +106,10 @@ func (p *Proposaler) Create(ctx context.Context, height int64, round int32, rs * logger := p.logger.With( "height", height, "round", round) + // Make proposal proposal := makeProposal(height, round, rs.ValidRound, block, blockParts) + // Sign proposal err := p.signProposal(ctx, height, proposal) if err != nil { @@ -161,11 +175,11 @@ func (p *Proposaler) checkValidBlock(rs *cstypes.RoundState) bool { } if !rs.ValidBlock.IsTimely(rs.ValidBlockRecvTime, sp, rs.ValidRound) { p.logger.Error( - "proposal block is outdated", + "proposal block is not timely", "height", rs.Height, "round", rs.ValidRound, "received", rs.ValidBlockRecvTime, - "block", rs.ValidBlock) + "block", rs.ValidBlock.Hash()) return false } return true @@ -178,15 +192,19 @@ func (p *Proposaler) proposalTimestampDifferenceMetric(rs cstypes.RoundState) { if rs.Height == p.committedState.InitialHeight { recvTime = p.committedState.LastBlockTime // genesis time } - isTimely := rs.Proposal.IsTimely(recvTime, sp, rs.Round) - p.metrics.ProposalTimestampDifference.With("is_timely", fmt.Sprintf("%t", isTimely)). + timely := rs.Proposal.CheckTimely(recvTime, sp, rs.Round) + p.metrics.ProposalTimestampDifference.With("is_timely", fmt.Sprintf("%t", timely == 0)). Observe(rs.ProposalReceiveTime.Sub(rs.Proposal.Timestamp).Seconds()) } } func (p *Proposaler) sendMessages(ctx context.Context, msgs ...Message) { for _, msg := range msgs { - _ = p.msgInfoQueue.send(ctx, msg, "") + err := p.msgInfoQueue.send(ctx, msg, "") + if err != nil { + // just warning, we don't want to stop the proposaler + p.logger.Error("proposaler failed to send message to msgInfoQueue", "error", err) + } } } diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index 2c3587b114..c50c146ce9 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -303,7 +303,7 @@ func TestStateProposalTime(t *testing.T) { expectNewBlock: false, }, { // TEST 1: BLOCK TIME IS IN FUTURE - blockTimeFunc: func(s *State) time.Time { return tmtime.Now().Add(delay + precision + 24*time.Hour) }, + blockTimeFunc: func(_s *State) time.Time { return tmtime.Now().Add(delay + precision + 24*time.Hour) }, expectNewBlock: true, }, { // TEST 2: BLOCK TIME IS OLDER THAN PREVIOUS BLOCK TIME @@ -2025,7 +2025,7 @@ func TestProcessProposalAccept(t *testing.T) { Status: status, }, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ - AppHash: make([]byte, crypto.DefaultAppHashSize), + AppHash: make([]byte, crypto.DefaultAppHashSize), AppVersion: 1, }, nil).Maybe() cs1, _ := makeState(ctx, t, makeStateArgs{config: config, application: m}) @@ -2080,7 +2080,7 @@ func TestFinalizeBlockCalled(t *testing.T) { Status: abci.ResponseProcessProposal_ACCEPT, }, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ - AppHash: make([]byte, crypto.DefaultAppHashSize), + AppHash: make([]byte, crypto.DefaultAppHashSize), AppVersion: 1, }, nil) // We only expect VerifyVoteExtension to be called on non-nil precommits. // https://github.com/tendermint/tendermint/issues/8487 @@ -2141,12 +2141,10 @@ func TestExtendVote(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - voteExtensions := []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - } + voteExtensions := types.VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("extension"), + }) m := abcimocks.NewApplication(t) m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{ @@ -2154,12 +2152,12 @@ func TestExtendVote(t *testing.T) { Status: abci.ResponseProcessProposal_ACCEPT, }, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ - AppHash: make([]byte, crypto.DefaultAppHashSize), + AppHash: make([]byte, crypto.DefaultAppHashSize), AppVersion: 1, }, nil) m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ VoteExtensions: []*abci.ExtendVoteExtension{ { - Type: tmproto.VoteExtensionType_DEFAULT, + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, Extension: []byte("extension"), }, }, @@ -2200,14 +2198,14 @@ func TestExtendVote(t *testing.T) { assert.Equal(t, req.Round, round) }) m.On("ExtendVote", mock.Anything, reqExtendVoteFunc).Return(&abci.ResponseExtendVote{ - VoteExtensions: voteExtensions, + VoteExtensions: voteExtensions.ToExtendProto(), }, nil) reqVerifyVoteExtFunc := mock.MatchedBy(func(req *abci.RequestVerifyVoteExtension) bool { _, ok := proTxHashMap[types.ProTxHash(req.ValidatorProTxHash).String()] - return assert.Equal(t, req.Hash, blockID.Hash.Bytes()) && - assert.Equal(t, req.Height, height) && - assert.Equal(t, req.Round, round) && - assert.Equal(t, req.VoteExtensions, voteExtensions) && + return assert.Equal(t, blockID.Hash.Bytes(), req.Hash) && + assert.Equal(t, height, req.Height) && + assert.Equal(t, round, req.Round) && + assert.Equal(t, voteExtensions.ToExtendProto(), req.VoteExtensions) && assert.True(t, ok) }) m.On("VerifyVoteExtension", mock.Anything, reqVerifyVoteExtFunc). @@ -2218,7 +2216,7 @@ func TestExtendVote(t *testing.T) { ensurePrevoteMatch(t, voteCh, height, round, blockID.Hash) ensurePrecommit(t, voteCh, height, round) - signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[1:]...) + signAddPrecommitsWithExtension(ctx, t, cs1, config.ChainID(), blockID, voteExtensions, vss[1:]...) ensureNewRound(t, newRoundCh, height+1, 0) m.AssertExpectations(t) mock.AssertExpectationsForObjects(t, m) @@ -2302,22 +2300,21 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { config := configSetup(t) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - voteExtensions := []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - } + voteExtensions := types.VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("extension"), + }) + m := abcimocks.NewApplication(t) m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{ AppHash: make([]byte, crypto.DefaultAppHashSize), Status: abci.ResponseProcessProposal_ACCEPT, }, nil) m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ - AppHash: make([]byte, crypto.DefaultAppHashSize), + AppHash: make([]byte, crypto.DefaultAppHashSize), AppVersion: 1, }, nil) m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ - VoteExtensions: voteExtensions, + VoteExtensions: voteExtensions.ToExtendProto(), }, nil) m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil).Maybe() cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) @@ -2355,10 +2352,10 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { }) reqVerifyVoteExtFunc := mock.MatchedBy(func(req *abci.RequestVerifyVoteExtension) bool { _, ok := proTxHashMap[types.ProTxHash(req.ValidatorProTxHash).String()] - return assert.Equal(t, req.Hash, blockID.Hash.Bytes()) && - assert.Equal(t, req.Height, height) && - assert.Equal(t, req.Round, round) && - assert.Equal(t, req.VoteExtensions, voteExtensions) && + return assert.Equal(t, blockID.Hash.Bytes(), req.Hash) && + assert.Equal(t, height, req.Height) && + assert.Equal(t, round, req.Round) && + assert.Equal(t, voteExtensions.ToExtendProto(), req.VoteExtensions) && assert.True(t, ok) }) m.On("VerifyVoteExtension", mock.Anything, reqVerifyVoteExtFunc). @@ -2366,7 +2363,7 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { Status: abci.ResponseVerifyVoteExtension_ACCEPT, }, nil) - signAddVotes(ctx, t, cs1, tmproto.PrecommitType, config.ChainID(), blockID, vss[2:]...) + signAddPrecommitsWithExtension(ctx, t, cs1, config.ChainID(), blockID, voteExtensions, vss[2:]...) ensureNewRound(t, newRoundCh, height+1, 0) m.AssertExpectations(t) @@ -2381,7 +2378,7 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { Round: round, VoteExtensions: []*abci.ExtendVoteExtension{ { - Type: tmproto.VoteExtensionType_DEFAULT, + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, Extension: []byte("extension"), }, }, @@ -2392,7 +2389,7 @@ func TestVerifyVoteExtensionNotCalledOnAbsentPrecommit(t *testing.T) { // TestPrepareProposalReceivesVoteExtensions tests that the PrepareProposal method // is called with the vote extensions from the previous height. The test functions // be completing a consensus height with a mock application as the proposer. The -// test then proceeds to fail sever rounds of consensus until the mock application +// test then proceeds to fail several rounds of consensus until the mock application // is the proposer again and ensures that the mock application receives the set of // vote extensions from the previous consensus instance. func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { @@ -2401,36 +2398,56 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { config := configSetup(t) + voteExtensions := types.VoteExtensionsFromProto( + &tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, + Extension: crypto.Checksum([]byte("extension-raw")), + }, &tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("deterministic"), + }, + ) + m := &abcimocks.Application{} m.On("ExtendVote", mock.Anything, mock.Anything).Return(&abci.ResponseExtendVote{ - VoteExtensions: []*abci.ExtendVoteExtension{ - { - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }, - { - Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, - Extension: []byte("deterministic"), - }, - }, + VoteExtensions: voteExtensions.ToExtendProto(), }, nil) m.On("ProcessProposal", mock.Anything, mock.Anything).Return(&abci.ResponseProcessProposal{ AppHash: make([]byte, crypto.DefaultAppHashSize), Status: abci.ResponseProcessProposal_ACCEPT, }, nil) - // capture the prepare proposal request. - rpp := &abci.RequestPrepareProposal{} + // matcher for prepare proposal request m.On("PrepareProposal", mock.Anything, mock.MatchedBy(func(r *abci.RequestPrepareProposal) bool { - rpp = r - return true + if r.Height == 1 { + return assert.Empty(t, r.GetLocalLastCommit().ThresholdVoteExtensions, "no vote extensions should be present on the first height") + } + + // at height 2, we expect the vote extensions from the previous height to be present. + extensions := make([][]byte, 0) + for _, ext := range r.GetLocalLastCommit().ThresholdVoteExtensions { + extensions = append(extensions, ext.Extension) + } + return assert.EqualValues(t, 2, r.Height) && + assert.EqualValues(t, 3, r.Round) && + assert.Len(t, r.GetLocalLastCommit().ThresholdVoteExtensions, 2, "expected 2 vote extensions at height %d", r.Height) && + assert.EqualValues(t, voteExtensions.GetExtensions(), extensions) + })).Return(&abci.ResponsePrepareProposal{ - AppHash: make([]byte, crypto.DefaultAppHashSize), + AppHash: make([]byte, crypto.DefaultAppHashSize), AppVersion: 1, }, nil) - m.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{AppHash: make([]byte, crypto.DefaultAppHashSize)}, nil).Once() m.On("VerifyVoteExtension", mock.Anything, mock.Anything).Return(&abci.ResponseVerifyVoteExtension{Status: abci.ResponseVerifyVoteExtension_ACCEPT}, nil) - m.On("FinalizeBlock", mock.Anything, mock.Anything).Return(&abci.ResponseFinalizeBlock{}, nil) + + // We expect 2 threshold-recovered vote extensions in current Commit + m.On("FinalizeBlock", mock.Anything, mock.MatchedBy(func(r *abci.RequestFinalizeBlock) bool { + assert.Len(t, r.Commit.ThresholdVoteExtensions, 2) + vexts := r.Commit.ThresholdVoteExtensions + + return bytes.Equal(vexts[0].Extension, voteExtensions[0].GetExtension()) && + bytes.Equal(vexts[1].Extension, voteExtensions[1].GetExtension()) + + })).Return(&abci.ResponseFinalizeBlock{}, nil) cs1, vss := makeState(ctx, t, makeStateArgs{config: config, application: m}) stateData := cs1.GetStateData() @@ -2452,17 +2469,7 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { signAddVotes(ctx, t, cs1, tmproto.PrevoteType, config.ChainID(), blockID, vss[1:]...) // create a precommit for each validator with the associated vote extension. - for _, vs := range vss[1:] { - voteExtensions := types.VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []types.VoteExtension{ - {Extension: []byte("extension")}, - }, - tmproto.VoteExtensionType_THRESHOLD_RECOVER: []types.VoteExtension{ - {Extension: []byte("deterministic")}, - }, - } - signAddPrecommitWithExtension(ctx, t, cs1, config.ChainID(), blockID, voteExtensions, vs) - } + signAddPrecommitsWithExtension(ctx, t, cs1, config.ChainID(), blockID, voteExtensions, vss[1:]...) ensurePrevote(t, voteCh, height, round) @@ -2482,9 +2489,7 @@ func TestPrepareProposalReceivesVoteExtensions(t *testing.T) { ensureNewRound(t, newRoundCh, height, round) ensureNewProposal(t, proposalCh, height, round) - // ensure that the proposer received the list of vote extensions from the - // previous height. - require.Len(t, rpp.LocalLastCommit.ThresholdVoteExtensions, 1) + m.AssertExpectations(t) } // 4 vals, 3 Nil Precommits at P0 @@ -2653,8 +2658,10 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { ensureNewValidBlock(t, validBlockCh, height, round) rs := cs1.GetRoundState() - assert.True(t, rs.Step == cstypes.RoundStepPrecommit) - assert.True(t, rs.ProposalBlock == nil) + // due to some delays, we might be on Precommit or ApplyCommit step, + // as these steps just follow one another with no delay + assert.Contains(t, []cstypes.RoundStepType{cstypes.RoundStepPrecommit, cstypes.RoundStepApplyCommit}, rs.Step) + assert.Nil(t, rs.ProposalBlock) assert.True(t, rs.ProposalBlockParts.Header().Equals(blockID.PartSetHeader)) } @@ -2728,7 +2735,6 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) stateData := cs1.GetStateData() - stateData.state.ConsensusParams.Timeout.BypassCommitTimeout = false err := stateData.Save() require.NoError(t, err) cs1.txNotifier = &fakeTxNotifier{ch: make(chan struct{})} @@ -2793,7 +2799,6 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { cs1, vss := makeState(ctx, t, makeStateArgs{config: config}) stateData := cs1.GetStateData() - stateData.state.ConsensusParams.Timeout.BypassCommitTimeout = false err := stateData.Save() require.NoError(t, err) @@ -3154,6 +3159,7 @@ func TestStateTryAddCommitCallsProcessProposal(t *testing.T) { block, err := sf.MakeBlock(css0StateData.state, 1, &types.Commit{}, kvstore.ProtocolVersion) require.NoError(t, err) + require.NotZero(t, block.Version.App) block.CoreChainLockedHeight = 1 commit, err := factory.MakeCommit( @@ -3164,7 +3170,6 @@ func TestStateTryAddCommitCallsProcessProposal(t *testing.T) { css0StateData.Votes.Precommits(0), css0StateData.Validators, privvals, - block.StateID(), ) require.NoError(t, err) @@ -3282,18 +3287,28 @@ func subscribe( return ch } -func signAddPrecommitWithExtension(ctx context.Context, +func signAddPrecommitsWithExtension(ctx context.Context, t *testing.T, cs *State, chainID string, blockID types.BlockID, extensions types.VoteExtensions, - stub *validatorStub) { + vss ...*validatorStub) { _, valSet := cs.GetValidatorSet() - v, err := stub.signVote(ctx, tmproto.PrecommitType, chainID, blockID, valSet.QuorumType, - valSet.QuorumHash, extensions) - require.NoError(t, err, "failed to sign vote") - addVotes(cs, v) + votes := make([]*types.Vote, 0, len(vss)) + + for _, vs := range vss { + v, err := vs.signVote(ctx, tmproto.PrecommitType, chainID, blockID, valSet.QuorumType, + valSet.QuorumHash, extensions.Copy()) + require.NoError(t, err, "failed to sign vote") + vs.lastVote = v + votes = append(votes, v) + + protx, _ := vs.GetProTxHash(ctx) + q, _ := vs.GetPubKey(ctx, valSet.QuorumHash) + t.Logf("signAddPrecommitsWithExtension: pubkey: %X, sigs %X, val protxhash(%d): %X\n", q.Bytes(), v.VoteExtensions.GetSignatures(), vs.Index, protx) + } + addVotes(cs, votes...) } // mockProposerApplicationCalls configures mock Application `m` to support calls executed for each round on the proposer. @@ -3310,7 +3325,7 @@ func mockProposerApplicationCalls(t *testing.T, m *abcimocks.Application, round }) m.On("PrepareProposal", mock.Anything, roundMatcher).Return(&abci.ResponsePrepareProposal{ - AppHash: make([]byte, crypto.DefaultAppHashSize), + AppHash: make([]byte, crypto.DefaultAppHashSize), AppVersion: 1, }, nil).Once() m.On("ProcessProposal", mock.Anything, roundMatcher).Return(&abci.ResponseProcessProposal{ @@ -3320,12 +3335,7 @@ func mockProposerApplicationCalls(t *testing.T, m *abcimocks.Application, round if final { m.On("ExtendVote", mock.Anything, roundMatcher). - Return(&abci.ResponseExtendVote{ - VoteExtensions: []*abci.ExtendVoteExtension{{ - Type: tmproto.VoteExtensionType_DEFAULT, - Extension: []byte("extension"), - }}, - }, nil).Once() + Return(&abci.ResponseExtendVote{}, nil).Once() m.On("VerifyVoteExtension", mock.Anything, roundMatcher). Return(&abci.ResponseVerifyVoteExtension{ diff --git a/internal/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go index 19223279b3..b65ab9ed6a 100644 --- a/internal/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -87,7 +87,7 @@ func makeVoteHR( require.NoError(t, err, "Error signing vote") vote.BlockSignature = v.BlockSignature - err = vote.VoteExtensions.CopySignsFromProto(v.VoteExtensionsToMap()) + err = vote.VoteExtensions.CopySignsFromProto(v.VoteExtensions) require.NoError(t, err) return vote diff --git a/internal/consensus/types/peer_round_state.go b/internal/consensus/types/peer_round_state.go index e5e2e3e6fb..4649e8f9af 100644 --- a/internal/consensus/types/peer_round_state.go +++ b/internal/consensus/types/peer_round_state.go @@ -34,7 +34,6 @@ type PeerRoundState struct { Prevotes *bits.BitArray `json:"prevotes"` // All votes peer has for this round Precommits *bits.BitArray `json:"precommits"` // All precommits peer has for this round LastCommitRound int32 `json:"last_commit_round"` // Round of commit for last height. -1 if none. - LastPrecommits *bits.BitArray `json:"last_commit"` // All commit precommits of commit for last height. HasCommit bool `json:"has_commit"` @@ -69,7 +68,6 @@ func (prs PeerRoundState) Copy() PeerRoundState { prs.ProposalPOL = prs.ProposalPOL.Copy() prs.Prevotes = prs.Prevotes.Copy() prs.Precommits = prs.Precommits.Copy() - prs.LastPrecommits = prs.LastPrecommits.Copy() prs.CatchupCommit = prs.CatchupCommit.Copy() return prs @@ -83,7 +81,7 @@ func (prs PeerRoundState) StringIndented(indent string) string { %s POL %v (round %v) %s Prevotes %v %s Precommits %v -%s LastPrecommits %v (round %v) +%s Last commit round %v %s Catchup %v (round %v) %s}`, indent, prs.Height, prs.Round, prs.Step, prs.StartTime, @@ -91,7 +89,7 @@ func (prs PeerRoundState) StringIndented(indent string) string { indent, prs.ProposalPOL, prs.ProposalPOLRound, indent, prs.Prevotes, indent, prs.Precommits, - indent, prs.LastPrecommits, prs.LastCommitRound, + indent, prs.LastCommitRound, indent, prs.CatchupCommit, prs.CatchupCommitRound, indent) } diff --git a/internal/consensus/types/round_state.go b/internal/consensus/types/round_state.go index 8c9181b9a0..1e93d8d8c7 100644 --- a/internal/consensus/types/round_state.go +++ b/internal/consensus/types/round_state.go @@ -103,7 +103,6 @@ type RoundState struct { ValidBlockParts *types.PartSet `json:"valid_block_parts"` Votes *HeightVoteSet `json:"votes"` CommitRound int32 `json:"commit_round"` - LastPrecommits *types.VoteSet `json:"last_precommits"` LastCommit *types.Commit `json:"last_commit"` LastValidators *types.ValidatorSet `json:"last_validators"` TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"` diff --git a/internal/consensus/vote_signer.go b/internal/consensus/vote_signer.go index 62ef6cee30..e649261147 100644 --- a/internal/consensus/vote_signer.go +++ b/internal/consensus/vote_signer.go @@ -34,7 +34,9 @@ func (s *voteSigner) signAddVote( } // If the node not in the validator set, do nothing. if !stateData.Validators.HasProTxHash(s.privValidator.ProTxHash) { - s.logger.Error("do nothing, node is not a part of validator set") + s.logger.Error("do nothing, node is not a part of validator set", + "protxhash", s.privValidator.ProTxHash.ShortString(), + "validators", stateData.Validators) return nil } keyVals := []any{"height", stateData.Height, "round", stateData.Round, "quorum_hash", stateData.Validators.QuorumHash} diff --git a/internal/consensus/vote_signer_test.go b/internal/consensus/vote_signer_test.go index c9b2369691..614352da39 100644 --- a/internal/consensus/vote_signer_test.go +++ b/internal/consensus/vote_signer_test.go @@ -39,11 +39,11 @@ func TestVoteSigner_signAddVote(t *testing.T) { PrivValidator: priVals[0], ProTxHash: proTxHash, } - voteExtensions := types.VoteExtensions{ - tmproto.VoteExtensionType_THRESHOLD_RECOVER: []types.VoteExtension{ - {Extension: tmbytes.MustHexDecode("524F1D03D1D81E94A099042736D40BD9681B867321443FF58A4568E274DBD83B")}, - }, - } + voteExtensions := tmproto.VoteExtensions{{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: tmbytes.MustHexDecode("524F1D03D1D81E94A099042736D40BD9681B867321443FF58A4568E274DBD83B"), + }} + conf := configSetup(t) stateData := StateData{ config: conf.Consensus, @@ -103,7 +103,7 @@ func TestVoteSigner_signAddVote(t *testing.T) { { msgType: tmproto.PrecommitType, blockID: blockID, - voteExtensions: voteExtensions, + voteExtensions: types.VoteExtensionsFromProto(voteExtensions...), mockFn: mockFn, wantBlockSign: "9755FA9803D98C344CB16A43B782D2A93ED9A7E7E1C8437482F42781D5EF802EC82442C14C44429737A7355B1F9D87CB139EB2CF193A1CF7C812E38B99221ADF4DAA60CE16550ED6509A9C467A3D4492D77038505235796968465337A1E14B3E", }, @@ -138,11 +138,14 @@ func TestVoteSigner_signAddVote(t *testing.T) { key, err := privVal.GetPubKey(ctx, valSet.QuorumHash) assert.NoError(t, err) + for _, ext := range vote.VoteExtensions { + assert.NotEmpty(t, ext.GetSignature()) + } + key1, err := bls.G1ElementFromBytes(key.Bytes()) assert.NoError(t, err) t.Logf("key: %x", key1.Serialize()) - t.Logf("%+v", vote.VoteExtensions[tmproto.VoteExtensionType_THRESHOLD_RECOVER]) }) } } diff --git a/internal/consensus/wal.go b/internal/consensus/wal.go index b615058bb1..6f5fe608ad 100644 --- a/internal/consensus/wal.go +++ b/internal/consensus/wal.go @@ -440,12 +440,12 @@ type nilWAL struct{} var _ WAL = nilWAL{} -func (nilWAL) Write(m WALMessage) error { return nil } -func (nilWAL) WriteSync(m WALMessage) error { return nil } -func (nilWAL) FlushAndSync() error { return nil } -func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { +func (nilWAL) Write(_m WALMessage) error { return nil } +func (nilWAL) WriteSync(_m WALMessage) error { return nil } +func (nilWAL) FlushAndSync() error { return nil } +func (nilWAL) SearchForEndHeight(_height int64, _options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { return nil, false, nil } -func (nilWAL) Start(context.Context) error { return nil } -func (nilWAL) Stop() {} -func (nilWAL) Wait() {} +func (nilWAL) Start(_ctx context.Context) error { return nil } +func (nilWAL) Stop() {} +func (nilWAL) Wait() {} diff --git a/internal/consensus/wal_generator_test.go b/internal/consensus/wal_generator_test.go index 15a1a9d9c6..ab5ec26667 100644 --- a/internal/consensus/wal_generator_test.go +++ b/internal/consensus/wal_generator_test.go @@ -11,9 +11,10 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/libs/log" - "github.com/stretchr/testify/require" ) // WALGenerateNBlocks generates a consensus WAL. It does this by @@ -157,8 +158,8 @@ func (w *byteBufferWAL) WriteSync(m WALMessage) error { func (w *byteBufferWAL) FlushAndSync() error { return nil } func (w *byteBufferWAL) SearchForEndHeight( - height int64, - options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { + _height int64, + _options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) { return nil, false, nil } diff --git a/internal/eventbus/event_bus.go b/internal/eventbus/event_bus.go index 05dbdcbc82..9cffd1475a 100644 --- a/internal/eventbus/event_bus.go +++ b/internal/eventbus/event_bus.go @@ -82,7 +82,7 @@ func (b *EventBus) Publish(eventValue string, eventData types.EventData) error { } func (b *EventBus) PublishEventNewBlock(data types.EventDataNewBlock) error { - events := data.ResultFinalizeBlock.Events + events := data.ResultProcessProposal.Events // add Tendermint-reserved new block event events = append(events, types.EventNewBlock) @@ -93,7 +93,7 @@ func (b *EventBus) PublishEventNewBlock(data types.EventDataNewBlock) error { func (b *EventBus) PublishEventNewBlockHeader(data types.EventDataNewBlockHeader) error { // no explicit deadline for publishing events - events := data.ResultFinalizeBlock.Events + events := data.ResultProcessProposal.Events // add Tendermint-reserved new block header event events = append(events, types.EventNewBlockHeader) diff --git a/internal/eventbus/event_bus_test.go b/internal/eventbus/event_bus_test.go index c0d5b3bb79..d993a25a0e 100644 --- a/internal/eventbus/event_bus_test.go +++ b/internal/eventbus/event_bus_test.go @@ -86,7 +86,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { bps, err := block.MakePartSet(types.BlockPartSizeBytes) require.NoError(t, err) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()} - resultFinalizeBlock := abci.ResponseFinalizeBlock{ + respProcessProposal := abci.ResponseProcessProposal{ Events: []abci.Event{ {Type: "testType", Attributes: []abci.EventAttribute{ {Key: "baz", Value: "1"}, @@ -112,13 +112,13 @@ func TestEventBusPublishEventNewBlock(t *testing.T) { edt := msg.Data().(types.EventDataNewBlock) assert.Equal(t, block, edt.Block) assert.Equal(t, blockID, edt.BlockID) - assert.Equal(t, resultFinalizeBlock, edt.ResultFinalizeBlock) + assert.Equal(t, respProcessProposal, edt.ResultProcessProposal) }() err = eventBus.PublishEventNewBlock(types.EventDataNewBlock{ - Block: block, - BlockID: blockID, - ResultFinalizeBlock: resultFinalizeBlock, + Block: block, + BlockID: blockID, + ResultProcessProposal: respProcessProposal, }) assert.NoError(t, err) @@ -256,14 +256,7 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { block := types.MakeBlock(0, []types.Tx{}, nil, []types.Evidence{}) block.SetDashParams(0, nil, 1, nil) - resultFinalizeBlock := abci.ResponseFinalizeBlock{ - Events: []abci.Event{ - {Type: "testType", Attributes: []abci.EventAttribute{ - {Key: "baz", Value: "1"}, - {Key: "foz", Value: "2"}, - }}, - }, - } + resultProcessProposal := abci.ResponseProcessProposal{ Status: abci.ResponseProcessProposal_ACCEPT, AppHash: make([]byte, crypto.DefaultAppHashSize), @@ -271,6 +264,12 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { {Code: abci.CodeTypeOK, Data: []byte("baz=1")}, {Code: abci.CodeTypeOK, Data: []byte("foz=2")}, }, + Events: []abci.Event{ + {Type: "testType", Attributes: []abci.EventAttribute{ + {Key: "baz", Value: "1"}, + {Key: "foz", Value: "2"}, + }}, + }, } // PublishEventNewBlockHeader adds the tm.event compositeKey, so the query below should work @@ -289,13 +288,11 @@ func TestEventBusPublishEventNewBlockHeader(t *testing.T) { edt := msg.Data().(types.EventDataNewBlockHeader) assert.Equal(t, block.Header, edt.Header) - assert.Equal(t, resultFinalizeBlock, edt.ResultFinalizeBlock) }() err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ Header: block.Header, ResultProcessProposal: resultProcessProposal, - ResultFinalizeBlock: resultFinalizeBlock, }) assert.NoError(t, err) @@ -478,9 +475,6 @@ func BenchmarkEventBus(b *testing.B) { } func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { - // for random* functions - mrand.Seed(time.Now().Unix()) - ctx, cancel := context.WithCancel(context.Background()) defer cancel() diff --git a/internal/evidence/mocks/block_store.go b/internal/evidence/mocks/block_store.go index 344c2f7992..41fff57cde 100644 --- a/internal/evidence/mocks/block_store.go +++ b/internal/evidence/mocks/block_store.go @@ -16,6 +16,10 @@ type BlockStore struct { func (_m *BlockStore) Height() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Height") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -30,6 +34,10 @@ func (_m *BlockStore) Height() int64 { func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockCommit") + } + var r0 *types.Commit if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { r0 = rf(height) @@ -46,6 +54,10 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockMeta") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok { r0 = rf(height) diff --git a/internal/evidence/pool.go b/internal/evidence/pool.go index bf8f607ffc..52dab70139 100644 --- a/internal/evidence/pool.go +++ b/internal/evidence/pool.go @@ -320,7 +320,7 @@ func (evpool *Pool) isPending(evidence types.Evidence) bool { return ok } -func (evpool *Pool) addPendingEvidence(ctx context.Context, ev types.Evidence) error { +func (evpool *Pool) addPendingEvidence(_ctx context.Context, ev types.Evidence) error { evpb, err := types.EvidenceToProto(ev) if err != nil { return fmt.Errorf("failed to convert to proto: %w", err) diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index 1669a47146..d243ef8aff 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -259,7 +259,7 @@ func TestEvidencePoolUpdate(t *testing.T) { state.Validators.QuorumHash, ) require.NoError(t, err) - lastCommit := makeCommit(height, state.Validators.QuorumHash, val.ProTxHash) + lastCommit := makeCommit(height, state.Validators.QuorumHash) coreChainLockHeight := state.LastCoreChainLockedBlockHeight block := types.MakeBlock(height+1, []types.Tx{}, lastCommit, []types.Evidence{ev}) @@ -402,14 +402,13 @@ func TestRecoverPendingEvidence(t *testing.T) { height := int64(10) quorumHash := crypto.RandQuorumHash() val := types.NewMockPVForQuorum(quorumHash) - proTxHash := val.ProTxHash evidenceDB := dbm.NewMemDB() stateStore := initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) state, err := stateStore.Load() require.NoError(t, err) - blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, proTxHash) + blockStore, err := initializeBlockStore(dbm.NewMemDB(), state) require.NoError(t, err) logger := log.NewNopLogger() @@ -536,11 +535,11 @@ func initializeValidatorState( // initializeBlockStore creates a block storage and populates it w/ a dummy // block at +height+. -func initializeBlockStore(db dbm.DB, state sm.State, valProTxHash []byte) (*store.BlockStore, error) { +func initializeBlockStore(db dbm.DB, state sm.State) (*store.BlockStore, error) { blockStore := store.NewBlockStore(db) for i := int64(1); i <= state.LastBlockHeight; i++ { - lastCommit := makeCommit(i-1, state.Validators.QuorumHash, valProTxHash) + lastCommit := makeCommit(i-1, state.Validators.QuorumHash) block := state.MakeBlock(i, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().ProTxHash, 0) block.Header.Time = defaultEvidenceTime.Add(time.Duration(i) * time.Minute) @@ -551,18 +550,19 @@ func initializeBlockStore(db dbm.DB, state sm.State, valProTxHash []byte) (*stor return nil, err } - seenCommit := makeCommit(i, state.Validators.QuorumHash, valProTxHash) + seenCommit := makeCommit(i, state.Validators.QuorumHash) blockStore.SaveBlock(block, partSet, seenCommit) } return blockStore, nil } -func makeCommit(height int64, quorumHash []byte, valProTxHash []byte) *types.Commit { +func makeCommit(height int64, quorumHash []byte) *types.Commit { return types.NewCommit( height, 0, types.BlockID{}, + nil, &types.CommitSigns{ QuorumSigns: types.QuorumSigns{ BlockSign: crypto.CRandBytes(types.SignatureSize), @@ -581,7 +581,7 @@ func defaultTestPool(ctx context.Context, t *testing.T, height int64) (*evidence stateStore := initializeValidatorState(ctx, t, val, height, btcjson.LLMQType_5_60, quorumHash) state, err := stateStore.Load() require.NoError(t, err) - blockStore, err := initializeBlockStore(dbm.NewMemDB(), state, val.ProTxHash) + blockStore, err := initializeBlockStore(dbm.NewMemDB(), state) require.NoError(t, err) logger := log.NewNopLogger() diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index ff8f6b6309..54dd75a810 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -27,7 +27,7 @@ const ( func GetChannelDescriptor() *p2p.ChannelDescriptor { return &p2p.ChannelDescriptor{ ID: EvidenceChannel, - Priority: 6, + Priority: 3, RecvMessageCapacity: maxMsgSize, RecvBufferCapacity: 32, Name: "evidence", diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index 5fb858511c..845273d389 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -58,7 +58,7 @@ func setup(ctx context.Context, t *testing.T, stateStores []sm.Store) *reactorTe rts := &reactorTestSuite{ numStateStores: numStateStores, logger: log.NewNopLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numStateStores}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numStateStores}, log.NewNopLogger()), reactors: make(map[types.NodeID]*evidence.Reactor, numStateStores), pools: make(map[types.NodeID]*evidence.Pool, numStateStores), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numStateStores), diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go index f5e7fcdb1b..bbbb4f9107 100644 --- a/internal/inspect/inspect_test.go +++ b/internal/inspect/inspect_test.go @@ -264,7 +264,6 @@ func TestBlockResults(t *testing.T) { testGasUsed := int64(100) stateStoreMock := &statemocks.Store{} stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{ - FinalizeBlock: &abcitypes.ResponseFinalizeBlock{}, ProcessProposal: &abcitypes.ResponseProcessProposal{ TxResults: []*abcitypes.ExecTxResult{ { diff --git a/internal/libs/clist/clist.go b/internal/libs/clist/clist.go index cb01b3f0d8..26ca2e124a 100644 --- a/internal/libs/clist/clist.go +++ b/internal/libs/clist/clist.go @@ -14,7 +14,8 @@ to ensure garbage collection of removed elements. import ( "fmt" - sync "github.com/sasha-s/go-deadlock" + // This is performance-critical code, so we don't use go-deadlock + "sync" ) // MaxLength is the max allowed number of elements a linked list is diff --git a/internal/libs/clist/clist_property_test.go b/internal/libs/clist/clist_property_test.go index b33ab21aa3..c8c332c255 100644 --- a/internal/libs/clist/clist_property_test.go +++ b/internal/libs/clist/clist_property_test.go @@ -25,7 +25,7 @@ type clistModel struct { // Init is a method used by the rapid state machine testing library. // Init is called when the test starts to initialize the data that will be used // in the state machine test. -func (m *clistModel) Init(t *rapid.T) { +func (m *clistModel) Init(_t *rapid.T) { m.clist = clist.New() m.model = []*clist.CElement{} } diff --git a/internal/libs/confix/plan.go b/internal/libs/confix/plan.go index ac6f7b5a6d..55fb767e51 100644 --- a/internal/libs/confix/plan.go +++ b/internal/libs/confix/plan.go @@ -234,4 +234,58 @@ var plan = transform.Plan{ T: transform.Remove(parser.Key{"p2p", "seeds"}), ErrorOK: true, }, + + { + // Since https://github.com/dashpay/tenderdash/pull/775/ + Desc: "Move ABCI related methods to [abci] section", + T: transform.Func(func(_ context.Context, doc *tomledit.Document) error { + var found []*tomledit.Entry + doc.Global.Scan(func(key parser.Key, e *tomledit.Entry) bool { + if len(key) == 1 && (key[0] == "abci" || key[0] == "proxy-app") { + found = append(found, e) + } + return true + }) + if len(found) == 0 { + return nil // nothing to do + } + + // Now that we know we have work to do, find the target table. + var sec *tomledit.Section + if dst := transform.FindTable(doc, "abci"); dst == nil { + // If the table doesn't exist, create it. Old config files + // probably will not have it, so plug in the comment too. + sec = &tomledit.Section{ + Heading: &parser.Heading{ + Block: parser.Comments{ + "#######################################################", + "### ABCI App Connection Options ###", + "#######################################################", + }, + Name: parser.Key{"abci"}, + }, + } + doc.Sections = append(doc.Sections, sec) + } else { + sec = dst.Section + } + + for _, e := range found { + e.Remove() + e.Name = parser.Key{e.Name[0]} + + switch e.Name[0] { + case "abci": + e.Name = parser.Key{"transport"} + case "proxy-app": + e.Name = parser.Key{"address"} + default: + e.Name = parser.Key{e.Name[0]} + } + + sec.Items = append(sec.Items, e.KeyValue) + } + return nil + }), + }, } diff --git a/internal/libs/sync/concurrent_slice.go b/internal/libs/sync/concurrent_slice.go new file mode 100644 index 0000000000..743a75112f --- /dev/null +++ b/internal/libs/sync/concurrent_slice.go @@ -0,0 +1,124 @@ +package sync + +import ( + "encoding/json" + "fmt" + "sync" +) + +// ConcurrentSlice is a thread-safe slice. +// +// It is safe to use from multiple goroutines without additional locking. +// It should be referenced by pointer. +// +// Initialize using NewConcurrentSlice(). +type ConcurrentSlice[T any] struct { + mtx sync.RWMutex + items []T +} + +// NewConcurrentSlice creates a new thread-safe slice. +func NewConcurrentSlice[T any](initial ...T) *ConcurrentSlice[T] { + return &ConcurrentSlice[T]{ + items: initial, + } +} + +// Append adds an element to the slice +func (s *ConcurrentSlice[T]) Append(val ...T) { + s.mtx.Lock() + defer s.mtx.Unlock() + + s.items = append(s.items, val...) +} + +// Reset removes all elements from the slice +func (s *ConcurrentSlice[T]) Reset() { + s.mtx.Lock() + defer s.mtx.Unlock() + + s.items = []T{} +} + +// Get returns the value at the given index +func (s *ConcurrentSlice[T]) Get(index int) T { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return s.items[index] +} + +// Set updates the value at the given index. +// If the index is greater than the length of the slice, it panics. +// If the index is equal to the length of the slice, the value is appended. +// Otherwise, the value at the index is updated. +func (s *ConcurrentSlice[T]) Set(index int, val T) { + s.mtx.Lock() + defer s.mtx.Unlock() + + if index > len(s.items) { + panic("index out of range") + } else if index == len(s.items) { + s.items = append(s.items, val) + return + } + + s.items[index] = val +} + +// ToSlice returns a copy of the underlying slice +func (s *ConcurrentSlice[T]) ToSlice() []T { + s.mtx.RLock() + defer s.mtx.RUnlock() + + slice := make([]T, len(s.items)) + copy(slice, s.items) + return slice +} + +// Len returns the length of the slice +func (s *ConcurrentSlice[T]) Len() int { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return len(s.items) +} + +// Copy returns a new deep copy of concurrentSlice with the same elements +func (s *ConcurrentSlice[T]) Copy() ConcurrentSlice[T] { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return ConcurrentSlice[T]{ + items: s.ToSlice(), + } +} + +// MarshalJSON implements the json.Marshaler interface. +func (cs *ConcurrentSlice[T]) MarshalJSON() ([]byte, error) { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + + return json.Marshal(cs.items) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (cs *ConcurrentSlice[T]) UnmarshalJSON(data []byte) error { + var items []T + if err := json.Unmarshal(data, &items); err != nil { + return err + } + + cs.mtx.Lock() + defer cs.mtx.Unlock() + + cs.items = items + return nil +} + +func (cs *ConcurrentSlice[T]) String() string { + cs.mtx.RLock() + defer cs.mtx.RUnlock() + + return fmt.Sprintf("%v", cs.items) +} diff --git a/internal/libs/sync/concurrent_slice_test.go b/internal/libs/sync/concurrent_slice_test.go new file mode 100644 index 0000000000..122f3a1a28 --- /dev/null +++ b/internal/libs/sync/concurrent_slice_test.go @@ -0,0 +1,96 @@ +package sync + +import ( + "encoding/json" + "sync" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestConcurrentSlice(t *testing.T) { + s := NewConcurrentSlice[int](1, 2, 3) + + // Test Append + s.Append(4) + if s.Len() != 4 { + t.Errorf("Expected length of slice to be 4, got %d", s.Len()) + } + + // Test Get + if s.Get(3) != 4 { + t.Errorf("Expected element at index 3 to be 4, got %d", s.Get(3)) + } + + // Test Set + s.Set(1, 5) + + // Test ToSlice + slice := s.ToSlice() + if len(slice) != 4 || slice[3] != 4 || slice[1] != 5 { + t.Errorf("Expected ToSlice to return [1 5 3 4], got %v", slice) + } + + // Test Reset + s.Reset() + if s.Len() != 0 { + t.Errorf("Expected length of slice to be 0 after Reset, got %d", s.Len()) + } + + // Test Copy + s.Append(5) + copy := s.Copy() + if copy.Len() != 1 || copy.Get(0) != 5 { + t.Errorf("Expected Copy to return a new slice with [5], got %v", copy.ToSlice()) + } +} + +func TestConcurrentSlice_Concurrency(t *testing.T) { + s := NewConcurrentSlice[int]() + + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(1) + go func(val int) { + defer wg.Done() + s.Append(val) + }(i) + } + + wg.Wait() + + assert.Equal(t, 100, s.Len()) + + if s.Len() != 100 { + t.Errorf("Expected length of slice to be 100, got %d", s.Len()) + } + + for i := 0; i < 100; i++ { + assert.Contains(t, s.ToSlice(), i) + } +} + +func TestConcurrentSlice_MarshalUnmarshalJSON(t *testing.T) { + type node struct { + Channels *ConcurrentSlice[uint16] + } + cs := NewConcurrentSlice[uint16](1, 2, 3) + + node1 := node{ + Channels: cs, + } + + // Marshal to JSON + data, err := json.Marshal(node1) + assert.NoError(t, err, "Failed to marshal concurrentSlice") + + // Unmarshal from JSON + node2 := node{ + // Channels: NewConcurrentSlice[uint16](), + } + + err = json.Unmarshal(data, &node2) + assert.NoError(t, err, "Failed to unmarshal concurrentSlice") + + assert.EqualValues(t, node1.Channels.ToSlice(), node2.Channels.ToSlice()) +} diff --git a/internal/libs/test/mutate.go b/internal/libs/test/mutate.go index 94920cad5a..f70bb31a82 100644 --- a/internal/libs/test/mutate.go +++ b/internal/libs/test/mutate.go @@ -1,4 +1,4 @@ -// nolint:gosec // G404: Use of weak random number generator +//nolint:gosec // G404: Use of weak random number generator package test import ( diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index e75707e306..c268dff0fd 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -16,6 +16,7 @@ import ( "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/internal/libs/clist" tmstrings "github.com/dashpay/tenderdash/internal/libs/strings" + tmsync "github.com/dashpay/tenderdash/internal/libs/sync" "github.com/dashpay/tenderdash/libs/log" "github.com/dashpay/tenderdash/types" ) @@ -48,14 +49,19 @@ type TxMempool struct { // Synchronized fields, protected by mtx. mtx *sync.RWMutex notifiedTxsAvailable bool - txsAvailable chan struct{} // one value sent per height when mempool is not empty - preCheck PreCheckFunc - postCheck PostCheckFunc - height int64 // the latest height passed to Update + // txsAvailable is a waker that triggers when transactions are available in the mempool. + // Can be nil if not enabled with EnableTxsAvailable. + txsAvailable *tmsync.Waker + preCheck PreCheckFunc + postCheck PostCheckFunc + height int64 // the latest height passed to Update txs *clist.CList // valid transactions (passed CheckTx) txByKey map[types.TxKey]*clist.CElement txBySender map[string]*clist.CElement // for sender != "" + + // cancellation function for recheck txs tasks + recheckCancel context.CancelFunc } // NewTxMempool constructs a new, empty priority mempool at the specified @@ -78,6 +84,7 @@ func NewTxMempool( txByKey: make(map[types.TxKey]*clist.CElement), txBySender: make(map[string]*clist.CElement), } + if cfg.CacheSize > 0 { txmp.cache = NewLRUTxCache(cfg.CacheSize) } @@ -146,12 +153,26 @@ func (txmp *TxMempool) EnableTxsAvailable() { txmp.mtx.Lock() defer txmp.mtx.Unlock() - txmp.txsAvailable = make(chan struct{}, 1) + if txmp.txsAvailable != nil { + if err := txmp.txsAvailable.Close(); err != nil { + txmp.logger.Error("failed to close txsAvailable", "err", err) + } + } + txmp.txsAvailable = tmsync.NewWaker() } // TxsAvailable returns a channel which fires once for every height, and only // when transactions are available in the mempool. It is thread-safe. -func (txmp *TxMempool) TxsAvailable() <-chan struct{} { return txmp.txsAvailable } +// +// Note: returned channel might never close if EnableTxsAvailable() was not called before +// calling this function. +func (txmp *TxMempool) TxsAvailable() <-chan struct{} { + if txmp.txsAvailable == nil { + return make(<-chan struct{}) + } + + return txmp.txsAvailable.Sleep() +} // CheckTx adds the given transaction to the mempool if it fits and passes the // application's ABCI CheckTx method. @@ -245,7 +266,11 @@ func (txmp *TxMempool) CheckTx( func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { txmp.mtx.Lock() defer txmp.mtx.Unlock() - return txmp.removeTxByKey(txKey) + if err := txmp.removeTxByKey(txKey); err != nil { + return err + } + txmp.metrics.Size.Add(-1) + return nil } // removeTxByKey removes the specified transaction key from the mempool. @@ -394,8 +419,10 @@ func (txmp *TxMempool) Update( len(blockTxs), len(deliverTxResponses))) } - txmp.height = blockHeight - txmp.notifiedTxsAvailable = false + if txmp.height != blockHeight { + txmp.height = blockHeight + txmp.notifiedTxsAvailable = false + } if newPreFn != nil { txmp.preCheck = newPreFn @@ -448,11 +475,29 @@ func (txmp *TxMempool) Update( // transactions are evicted. // // Finally, the new transaction is added and size stats updated. +// +// Note: due to locking appoach we take, it is possible that meanwhile another thread evicted the same items. +// This means we can put put slightly more items into the mempool, but it has significant performance impact func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.ResponseCheckTx) error { - txmp.mtx.Lock() - defer txmp.mtx.Unlock() - var err error + // RLock here. + + // When the mempool is full, we we don't need a writable lock. RLocking here should add significant + // performance boost in this case, as threads will not need to wait to obtain writable lock. + // + // A disadvantage is that we need to relock RW when we need to evict peers, what introduces race condition + // when two threads want to evict the same transactions. We choose to manage that race condition to gain some + // performance. + txmp.mtx.RLock() + rlocked := true + defer func() { + if rlocked { + txmp.mtx.RUnlock() + } else { + txmp.mtx.Unlock() + } + }() + if txmp.postCheck != nil { err = txmp.postCheck(wtx.tx, checkTxRes) } @@ -461,7 +506,7 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon txmp.logger.Info( "rejected bad transaction", "priority", wtx.Priority(), - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "tx", fmt.Sprintf("%X", wtx.hash), "peer_id", wtx.peers, "code", checkTxRes.Code, "post_check_err", err, @@ -496,13 +541,13 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon w := elt.Value.(*WrappedTx) txmp.logger.Debug( "rejected valid incoming transaction; tx already exists for sender", - "tx", fmt.Sprintf("%X", w.tx.Hash()), + "tx", fmt.Sprintf("%X", w.hash), "sender", sender, ) txmp.metrics.RejectedTxs.Add(1) // TODO(creachadair): Report an error for a duplicate sender. // This is an API change, unfortunately, but should be made safe if it isn't. - // fmt.Errorf("transaction rejected: tx already exists for sender %q (%X)", sender, w.tx.Hash()) + // fmt.Errorf("transaction rejected: tx already exists for sender %q (%X)", sender, w.hash) return nil } } @@ -513,7 +558,10 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon // of them as necessary to make room for tx. If no such items exist, we // discard tx. + txmp.logger.Debug("addNewTransaction canAddTx") if err := txmp.canAddTx(wtx); err != nil { + txmp.logger.Debug("addNewTransaction findVictims", "err", err) + var victims []*clist.CElement // eligible transactions for eviction var victimBytes int64 // total size of victims for cur := txmp.txs.Front(); cur != nil; cur = cur.Next() { @@ -524,65 +572,68 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon } } + haveSpace := victimBytes >= wtx.Size() + if haveSpace { + // Sort lowest priority items first so they will be evicted first. Break + // ties in favor of newer items (to maintain FIFO semantics in a group). + sort.Slice(victims, func(i, j int) bool { + iw := victims[i].Value.(*WrappedTx) + jw := victims[j].Value.(*WrappedTx) + if iw.Priority() == jw.Priority() { + return iw.timestamp.After(jw.timestamp) + } + return iw.Priority() < jw.Priority() + }) + + txmp.logger.Debug("evicting lower-priority transactions", + "new_tx", tmstrings.LazySprintf("%X", wtx.hash), + "new_priority", priority) + + // Evict as many of the victims as necessary to make room. + // We need to drop RLock and Lock here, as from now on, we will be modifying the mempool. + // This introduces race condition which we handle inside evict() + if rlocked { + txmp.mtx.RUnlock() + txmp.mtx.Lock() + rlocked = false + } + + haveSpace = txmp.evict(wtx.Size(), victims) + + if !haveSpace { + txmp.logger.Debug("unexpected mempool eviction failure - possibly concurrent eviction happened") + } + } + // If there are no suitable eviction candidates, or the total size of // those candidates is not enough to make room for the new transaction, // drop the new one. - if len(victims) == 0 || victimBytes < wtx.Size() { + if !haveSpace { txmp.cache.Remove(wtx.tx) txmp.logger.Error( "rejected valid incoming transaction; mempool is full", - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "tx", fmt.Sprintf("%X", wtx.hash), "err", err.Error(), ) txmp.metrics.RejectedTxs.Add(1) // TODO(creachadair): Report an error for a full mempool. // This is an API change, unfortunately, but should be made safe if it isn't. - // fmt.Errorf("transaction rejected: mempool is full (%X)", wtx.tx.Hash()) + // fmt.Errorf("transaction rejected: mempool is full (%X)", wtx.hash) return nil } - - txmp.logger.Debug("evicting lower-priority transactions", - "new_tx", tmstrings.LazySprintf("%X", wtx.tx.Hash()), - "new_priority", priority, - ) - - // Sort lowest priority items first so they will be evicted first. Break - // ties in favor of newer items (to maintain FIFO semantics in a group). - sort.Slice(victims, func(i, j int) bool { - iw := victims[i].Value.(*WrappedTx) - jw := victims[j].Value.(*WrappedTx) - if iw.Priority() == jw.Priority() { - return iw.timestamp.After(jw.timestamp) - } - return iw.Priority() < jw.Priority() - }) - - // Evict as many of the victims as necessary to make room. - var evictedBytes int64 - for _, vic := range victims { - w := vic.Value.(*WrappedTx) - - txmp.logger.Debug( - "evicted valid existing transaction; mempool full", - "old_tx", tmstrings.LazySprintf("%X", w.tx.Hash()), - "old_priority", w.priority, - ) - txmp.removeTxByElement(vic) - txmp.cache.Remove(w.tx) - txmp.metrics.EvictedTxs.Add(1) - - // We may not need to evict all the eligible transactions. Bail out - // early if we have made enough room. - evictedBytes += w.Size() - if evictedBytes >= wtx.Size() { - break - } - } } wtx.SetGasWanted(checkTxRes.GasWanted) wtx.SetPriority(priority) wtx.SetSender(sender) + + // Ensure we have writable lock + if rlocked { + txmp.mtx.RUnlock() + txmp.mtx.Lock() + rlocked = false + } + txmp.insertTx(wtx) txmp.metrics.TxSizeBytes.Observe(float64(wtx.Size())) @@ -590,14 +641,51 @@ func (txmp *TxMempool) addNewTransaction(wtx *WrappedTx, checkTxRes *abci.Respon txmp.logger.Debug( "inserted new valid transaction", "priority", wtx.Priority(), - "tx", tmstrings.LazySprintf("%X", wtx.tx.Hash()), + "tx", tmstrings.LazySprintf("%X", wtx.hash), "height", txmp.height, "num_txs", txmp.Size(), ) + txmp.notifyTxsAvailable() + return nil } +// Remove victims from the mempool until we fee up to bytes +// Returns true when enough victims were removed. +// +// Caller should hold writable lock +func (txmp *TxMempool) evict(size int64, victims []*clist.CElement) bool { + var evictedBytes int64 + for _, vic := range victims { + w := vic.Value.(*WrappedTx) + + if vic.Removed() { + // Race condition - some other thread already removed this item + // We handle it by just skipping this tx + continue + } + + txmp.logger.Debug( + "evicted valid existing transaction; mempool full", + "old_tx", tmstrings.LazySprintf("%X", w.hash), + "old_priority", w.priority, + ) + txmp.removeTxByElement(vic) + txmp.cache.Remove(w.tx) + txmp.metrics.EvictedTxs.Add(1) + + // We may not need to evict all the eligible transactions. Bail out + // early if we have made enough room. + evictedBytes += w.Size() + if evictedBytes >= size { + return true + } + } + + return false +} + func (txmp *TxMempool) insertTx(wtx *WrappedTx) { elt := txmp.txs.PushBack(wtx) txmp.txByKey[wtx.tx.Key()] = elt @@ -642,7 +730,7 @@ func (txmp *TxMempool) handleRecheckResult(tx types.Tx, checkTxRes *abci.Respons txmp.logger.Debug( "existing transaction no longer valid; failed re-CheckTx callback", "priority", wtx.Priority(), - "tx", fmt.Sprintf("%X", wtx.tx.Hash()), + "tx", fmt.Sprintf("%X", wtx.hash), "err", err, "code", checkTxRes.Code, ) @@ -661,6 +749,12 @@ func (txmp *TxMempool) handleRecheckResult(tx types.Tx, checkTxRes *abci.Respons // Precondition: The mempool is not empty. // The caller must hold txmp.mtx exclusively. func (txmp *TxMempool) recheckTransactions(ctx context.Context) { + // cancel previous recheck if it is still running + if txmp.recheckCancel != nil { + txmp.recheckCancel() + } + ctx, txmp.recheckCancel = context.WithCancel(ctx) + if txmp.Size() == 0 { panic("mempool: cannot run recheck on an empty mempool") } @@ -684,13 +778,17 @@ func (txmp *TxMempool) recheckTransactions(ctx context.Context) { for _, wtx := range wtxs { wtx := wtx start(func() error { + if err := ctx.Err(); err != nil { + txmp.logger.Trace("recheck txs task canceled", "err", err, "tx", wtx.hash.String()) + return err + } rsp, err := txmp.proxyAppConn.CheckTx(ctx, &abci.RequestCheckTx{ Tx: wtx.tx, Type: abci.CheckTxType_Recheck, }) if err != nil { txmp.logger.Error("failed to execute CheckTx during recheck", - "err", err, "hash", fmt.Sprintf("%x", wtx.tx.Hash())) + "err", err, "hash", fmt.Sprintf("%x", wtx.hash)) } else { txmp.handleRecheckResult(wtx.tx, rsp) } @@ -703,8 +801,10 @@ func (txmp *TxMempool) recheckTransactions(ctx context.Context) { // When recheck is complete, trigger a notification for more transactions. _ = g.Wait() + txmp.mtx.Lock() defer txmp.mtx.Unlock() + txmp.notifyTxsAvailable() }() } @@ -759,6 +859,11 @@ func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { } } +// notifyTxsAvailable triggers a notification that transactions are available in +// the mempool. It is a no-op if the mempool is empty or if a notification has +// already been sent. +// +// No locking is required to call this method. func (txmp *TxMempool) notifyTxsAvailable() { if txmp.Size() == 0 { return // nothing to do @@ -768,9 +873,6 @@ func (txmp *TxMempool) notifyTxsAvailable() { // channel cap is 1, so this will send once txmp.notifiedTxsAvailable = true - select { - case txmp.txsAvailable <- struct{}{}: - default: - } + txmp.txsAvailable.Wake() } } diff --git a/internal/mempool/mempool_test.go b/internal/mempool/mempool_test.go index 61796affa6..85fe4576fd 100644 --- a/internal/mempool/mempool_test.go +++ b/internal/mempool/mempool_test.go @@ -7,23 +7,29 @@ import ( "fmt" "math/rand" "os" + "runtime" "sort" "strconv" "strings" + "sync/atomic" "testing" "time" + "github.com/fortytw2/leaktest" sync "github.com/sasha-s/go-deadlock" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" abciclient "github.com/dashpay/tenderdash/abci/client" "github.com/dashpay/tenderdash/abci/example/code" "github.com/dashpay/tenderdash/abci/example/kvstore" abci "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/abci/types/mocks" "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/libs/log" + tmrand "github.com/dashpay/tenderdash/libs/rand" "github.com/dashpay/tenderdash/types" ) @@ -739,6 +745,89 @@ func TestTxMempool_CheckTxPostCheckError(t *testing.T) { } } +// TestTxMempool_OneRecheckTxAtTime checks if previous recheckTransactions task is canceled when another one is started. +// +// Given mempool with some transactions AND app that processes CheckTX very slowly, +// when we call recheckTransactions() twice, +// then first recheckTransactions task is canceled and second one starts from the beginning. +func TestTxMempool_OneRecheckTxAtTime(t *testing.T) { + // SETUP + t.Cleanup(leaktest.Check(t)) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + + // num of parallel tasks started in recheckTransactions; this is how many + // txs will be processed as a minimum + numRecheckTasks := 2 * runtime.NumCPU() + numTxs := 3 * numRecheckTasks + + app := mocks.NewApplication(t) + var ( + checkTxCounter atomic.Uint32 + recheckTxBlocker sync.Mutex + ) + // app will wait on recheckTxBlocker until we unblock it + app.On("CheckTx", mock.Anything, mock.Anything).Return(&abci.ResponseCheckTx{ + Priority: 1, + Code: abci.CodeTypeOK}, nil). + Run(func(_ mock.Arguments) { + // increase counter before locking, so we can check if it was called + checkTxCounter.Add(1) + recheckTxBlocker.Lock() + defer recheckTxBlocker.Unlock() + }) + + client := abciclient.NewLocalClient(log.NewNopLogger(), app) + cfg := config.TestConfig() + mp := NewTxMempool(logger, cfg.Mempool, client) + // add some txs to mempool + for i := 0; i < numTxs; i++ { + err := mp.addNewTransaction(randomTx(), &abci.ResponseCheckTx{Code: abci.CodeTypeOK, GasWanted: 1, Priority: int64(i + 1)}) + require.NoError(t, err) + } + + // TEST + + // block checkTx until we unblock it + recheckTxBlocker.Lock() + // start recheckTransactions in the background; it should process exactly one tx per recheck task + mp.recheckTransactions(ctx) + assert.Eventually(t, + func() bool { return checkTxCounter.Load() == uint32(numRecheckTasks) }, + 200*time.Millisecond, 10*time.Millisecond, + "1st run: processed %d txs, expected %d", checkTxCounter.Load(), numRecheckTasks) + + // another recheck should cancel the first run and start from the beginning , but pending checkTx ops should finish + mp.recheckTransactions(ctx) + // unlock the app; this should finish all started rechecks, but not continue with rechecks from 1st run + recheckTxBlocker.Unlock() + // Ensure that all goroutines/tasks have finished + assert.Eventually(t, func() bool { return uint32(numRecheckTasks+numTxs) == checkTxCounter.Load() }, + 200*time.Millisecond, 10*time.Millisecond, + "num of txs mismatch: got %d, expected %d", checkTxCounter.Load(), numRecheckTasks+numTxs) + + // let's give it some more time and ensure we don't process any further txs + if !testing.Short() { + time.Sleep(100 * time.Millisecond) + assert.Equal(t, uint32(numRecheckTasks+numTxs), checkTxCounter.Load()) + } +} + +func randomTx() *WrappedTx { + tx := tmrand.Bytes(10) + return &WrappedTx{ + tx: tx, + height: 1, + timestamp: time.Now(), + gasWanted: 1, + priority: 1, + peers: map[uint16]bool{}, + } +} + func mustKvStore(t *testing.T, opts ...kvstore.OptFunc) *kvstore.Application { opts = append(opts, kvstore.WithLogger(log.NewTestingLogger(t).With("module", "kvstore"))) app, err := kvstore.NewMemoryApp(opts...) diff --git a/internal/mempool/mocks/mempool.go b/internal/mempool/mocks/mempool.go index 890e3d7b41..6f4996f400 100644 --- a/internal/mempool/mocks/mempool.go +++ b/internal/mempool/mocks/mempool.go @@ -23,6 +23,10 @@ type Mempool struct { func (_m *Mempool) CheckTx(ctx context.Context, tx types.Tx, cb func(*abcitypes.ResponseCheckTx), txInfo mempool.TxInfo) error { ret := _m.Called(ctx, tx, cb, txInfo) + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx, func(*abcitypes.ResponseCheckTx), mempool.TxInfo) error); ok { r0 = rf(ctx, tx, cb, txInfo) @@ -47,6 +51,10 @@ func (_m *Mempool) Flush() { func (_m *Mempool) FlushAppConn(_a0 context.Context) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for FlushAppConn") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(_a0) @@ -66,6 +74,10 @@ func (_m *Mempool) Lock() { func (_m *Mempool) ReapMaxBytesMaxGas(maxBytes int64, maxGas int64) types.Txs { ret := _m.Called(maxBytes, maxGas) + if len(ret) == 0 { + panic("no return value specified for ReapMaxBytesMaxGas") + } + var r0 types.Txs if rf, ok := ret.Get(0).(func(int64, int64) types.Txs); ok { r0 = rf(maxBytes, maxGas) @@ -82,6 +94,10 @@ func (_m *Mempool) ReapMaxBytesMaxGas(maxBytes int64, maxGas int64) types.Txs { func (_m *Mempool) ReapMaxTxs(max int) types.Txs { ret := _m.Called(max) + if len(ret) == 0 { + panic("no return value specified for ReapMaxTxs") + } + var r0 types.Txs if rf, ok := ret.Get(0).(func(int) types.Txs); ok { r0 = rf(max) @@ -98,6 +114,10 @@ func (_m *Mempool) ReapMaxTxs(max int) types.Txs { func (_m *Mempool) RemoveTxByKey(txKey types.TxKey) error { ret := _m.Called(txKey) + if len(ret) == 0 { + panic("no return value specified for RemoveTxByKey") + } + var r0 error if rf, ok := ret.Get(0).(func(types.TxKey) error); ok { r0 = rf(txKey) @@ -112,6 +132,10 @@ func (_m *Mempool) RemoveTxByKey(txKey types.TxKey) error { func (_m *Mempool) Size() int { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 int if rf, ok := ret.Get(0).(func() int); ok { r0 = rf() @@ -126,6 +150,10 @@ func (_m *Mempool) Size() int { func (_m *Mempool) SizeBytes() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for SizeBytes") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -140,6 +168,10 @@ func (_m *Mempool) SizeBytes() int64 { func (_m *Mempool) TxsAvailable() <-chan struct{} { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for TxsAvailable") + } + var r0 <-chan struct{} if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { r0 = rf() @@ -161,6 +193,10 @@ func (_m *Mempool) Unlock() { func (_m *Mempool) Update(ctx context.Context, blockHeight int64, blockTxs types.Txs, txResults []*abcitypes.ExecTxResult, newPreFn mempool.PreCheckFunc, newPostFn mempool.PostCheckFunc, recheck bool) error { ret := _m.Called(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn, recheck) + if len(ret) == 0 { + panic("no return value specified for Update") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, int64, types.Txs, []*abcitypes.ExecTxResult, mempool.PreCheckFunc, mempool.PostCheckFunc, bool) error); ok { r0 = rf(ctx, blockHeight, blockTxs, txResults, newPreFn, newPostFn, recheck) diff --git a/internal/mempool/p2p_msg_handler.go b/internal/mempool/p2p_msg_handler.go index c93fc6177d..73f21e4e54 100644 --- a/internal/mempool/p2p_msg_handler.go +++ b/internal/mempool/p2p_msg_handler.go @@ -4,7 +4,9 @@ import ( "context" "errors" "fmt" + "time" + "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/internal/p2p" "github.com/dashpay/tenderdash/internal/p2p/client" "github.com/dashpay/tenderdash/libs/log" @@ -15,21 +17,36 @@ import ( type ( mempoolP2PMessageHandler struct { logger log.Logger + config *config.MempoolConfig checker TxChecker ids *IDs } ) -func consumerHandler(logger log.Logger, checker TxChecker, ids *IDs) client.ConsumerParams { +func consumerHandler(ctx context.Context, logger log.Logger, config *config.MempoolConfig, checker TxChecker, ids *IDs) client.ConsumerParams { chanIDs := []p2p.ChannelID{p2p.MempoolChannel} + + nTokensFunc := func(e *p2p.Envelope) uint { + if m, ok := e.Message.(*protomem.Txs); ok { + return uint(len(m.Txs)) + } + + // unknown message type; this should not happen, we expect only Txs messages + // But we don't panic, as this is not a critical error + logger.Error("received unknown message type, expected Txs; assuming weight 1", "type", fmt.Sprintf("%T", e.Message), "from", e.From) + return 1 + } + return client.ConsumerParams{ ReadChannels: chanIDs, Handler: client.HandlerWithMiddlewares( &mempoolP2PMessageHandler{ logger: logger, + config: config, checker: checker, ids: ids, }, + client.WithRecvRateLimitPerPeerHandler(ctx, config.TxRecvRateLimit, nTokensFunc, true, logger), client.WithValidateMessageHandler(chanIDs), client.WithErrorLoggerMiddleware(logger), client.WithRecoveryMiddleware(logger), @@ -52,8 +69,24 @@ func (h *mempoolP2PMessageHandler) Handle(ctx context.Context, _ *client.Client, SenderID: h.ids.GetForPeer(envelope.From), SenderNodeID: envelope.From, } + // some stats for logging + start := time.Now() + known := 0 + failed := 0 for _, tx := range protoTxs { - if err := h.checker.CheckTx(ctx, tx, nil, txInfo); err != nil { + var ( + subCtx context.Context + subCtxCancel context.CancelFunc + ) + if h.config.TimeoutCheckTx > 0 { + subCtx, subCtxCancel = context.WithTimeout(ctx, h.config.TimeoutCheckTx) + } else { + subCtx, subCtxCancel = context.WithCancel(ctx) + } + + defer subCtxCancel() + + if err := h.checker.CheckTx(subCtx, tx, nil, txInfo); err != nil { if errors.Is(err, types.ErrTxInCache) { // if the tx is in the cache, // then we've been gossiped a @@ -61,20 +94,25 @@ func (h *mempoolP2PMessageHandler) Handle(ctx context.Context, _ *client.Client, // got. Gossip should be // smarter, but it's not a // problem. + known++ continue } - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - // Do not propagate context - // cancellation errors, but do - // not continue to check - // transactions from this - // message if we are shutting down. - return err + + // In case of ctx cancelation, we return error as we are most likely shutting down. + // Otherwise we just reject the tx. + if errCtx := ctx.Err(); errCtx != nil { + return errCtx } + failed++ logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "error", err) } } + logger.Debug("processed txs from peer", "took", time.Since(start).String(), + "num_txs", len(protoTxs), + "already_known", known, + "failed", failed, + ) return nil } diff --git a/internal/mempool/p2p_msg_handler_test.go b/internal/mempool/p2p_msg_handler_test.go index b9d39793bf..7edee40b10 100644 --- a/internal/mempool/p2p_msg_handler_test.go +++ b/internal/mempool/p2p_msg_handler_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/mock" abcitypes "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/internal/p2p" tmrequire "github.com/dashpay/tenderdash/internal/test/require" "github.com/dashpay/tenderdash/libs/log" @@ -18,6 +19,7 @@ import ( func TestMempoolP2PMessageHandler(t *testing.T) { ctx := context.Background() logger := log.NewTestingLogger(t) + cfg := config.DefaultMempoolConfig() peerID1 := types.NodeID("peer1") ids := NewMempoolIDs() ids.ReserveForPeer(peerID1) @@ -69,6 +71,7 @@ func TestMempoolP2PMessageHandler(t *testing.T) { logger: logger, checker: mockTxChecker, ids: ids, + config: cfg, } err := hd.Handle(ctx, nil, &tc.envelope) tmrequire.Error(t, tc.wantErr, err) diff --git a/internal/mempool/reactor.go b/internal/mempool/reactor.go index 6794e8a62a..415fef8bec 100644 --- a/internal/mempool/reactor.go +++ b/internal/mempool/reactor.go @@ -29,7 +29,7 @@ type Reactor struct { cfg *config.MempoolConfig mempool *TxMempool - ids *IDs + ids *IDs // Peer IDs assigned for peers peerEvents p2p.PeerEventSubscriber p2pClient *client.Client @@ -58,7 +58,7 @@ func NewReactor( p2pClient: p2pClient, peerEvents: peerEvents, peerRoutines: make(map[types.NodeID]context.CancelFunc), - observePanic: func(i interface{}) {}, + observePanic: func(_i interface{}) {}, } r.BaseService = *service.NewBaseService(logger, "Mempool", r) @@ -74,7 +74,7 @@ func (r *Reactor) OnStart(ctx context.Context) error { r.logger.Info("tx broadcasting is disabled") } go func() { - err := r.p2pClient.Consume(ctx, consumerHandler(r.logger, r.mempool, r.ids)) + err := r.p2pClient.Consume(ctx, consumerHandler(ctx, r.logger, r.mempool.config, r.mempool, r.ids)) if err != nil { r.logger.Error("failed to consume p2p checker messages", "error", err) } @@ -153,6 +153,17 @@ func (r *Reactor) processPeerUpdates(ctx context.Context, peerUpdates *p2p.PeerU } } +// sendTxs sends the given txs to the given peer. +// +// Sending txs to a peer is rate limited to prevent spamming the network. +// Each peer has its own rate limiter. +// +// As we will wait for confirmation of the txs being delivered, it is generally safe to +// drop the txs if the send fails. +func (r *Reactor) sendTxs(ctx context.Context, peerID types.NodeID, txs ...types.Tx) error { + return r.p2pClient.SendTxs(ctx, peerID, txs...) +} + func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID) { peerMempoolID := r.ids.GetForPeer(peerID) var nextGossipTx *clist.CElement @@ -194,12 +205,14 @@ func (r *Reactor) broadcastTxRoutine(ctx context.Context, peerID types.NodeID) { memTx := nextGossipTx.Value.(*WrappedTx) + // We expect the peer to send tx back once it gets it, and that's + // when we will mark it as seen. // NOTE: Transaction batching was disabled due to: // https://github.com/tendermint/tendermint/issues/5796 if !memTx.HasPeer(peerMempoolID) { // Send the mempool tx to the corresponding peer. Note, the peer may be // behind and thus would not be able to process the mempool tx correctly. - err := r.p2pClient.SendTxs(ctx, peerID, memTx.tx) + err := r.sendTxs(ctx, peerID, memTx.tx) if err != nil { r.logger.Error("failed to gossip transaction", "peerID", peerID, "error", err) return diff --git a/internal/mempool/reactor_test.go b/internal/mempool/reactor_test.go index 6ebbdae2ca..691324e721 100644 --- a/internal/mempool/reactor_test.go +++ b/internal/mempool/reactor_test.go @@ -47,7 +47,7 @@ func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNode rts := &reactorTestSuite{ logger: log.NewNopLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}), + network: p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: numNodes}, log.NewNopLogger()), reactors: make(map[types.NodeID]*Reactor, numNodes), mempools: make(map[types.NodeID]*TxMempool, numNodes), kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), @@ -74,7 +74,7 @@ func setupReactors(ctx context.Context, t *testing.T, logger log.Logger, numNode cfg.Mempool, mempool, rts.network.Nodes[nodeID].Client, - func(ctx context.Context, n string) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, + func(_ctx context.Context, _n string) *p2p.PeerUpdates { return rts.peerUpdates[nodeID] }, ) rts.nodes = append(rts.nodes, nodeID) @@ -146,7 +146,7 @@ func TestReactorBroadcastDoesNotPanic(t *testing.T) { logger := log.NewNopLogger() rts := setupReactors(ctx, t, logger, numNodes, 0) - observePanic := func(r interface{}) { + observePanic := func(_r interface{}) { t.Fatal("panic detected in reactor") } diff --git a/internal/mempool/types.go b/internal/mempool/types.go index 28b3dc7f34..48dc52041d 100644 --- a/internal/mempool/types.go +++ b/internal/mempool/types.go @@ -131,7 +131,7 @@ func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { // PostCheckMaxGas checks that the wanted gas is smaller or equal to the passed // maxGas. Returns nil if maxGas is -1. func PostCheckMaxGas(maxGas int64) PostCheckFunc { - return func(tx types.Tx, res *abci.ResponseCheckTx) error { + return func(_tx types.Tx, res *abci.ResponseCheckTx) error { if maxGas == -1 { return nil } diff --git a/internal/p2p/channel.go b/internal/p2p/channel.go index 9e8cb3283d..c77b4f39f9 100644 --- a/internal/p2p/channel.go +++ b/internal/p2p/channel.go @@ -12,11 +12,17 @@ import ( "github.com/gogo/protobuf/proto" "github.com/rs/zerolog" sync "github.com/sasha-s/go-deadlock" + "golang.org/x/time/rate" + log "github.com/dashpay/tenderdash/libs/log" "github.com/dashpay/tenderdash/proto/tendermint/p2p" "github.com/dashpay/tenderdash/types" ) +var ( + ErrRecvRateLimitExceeded = errors.New("receive rate limit exceeded") +) + // Envelope contains a message with sender/receiver routing info. type Envelope struct { From types.NodeID // sender (empty if outbound) @@ -117,7 +123,7 @@ type Channel interface { Send(context.Context, Envelope) error SendError(context.Context, PeerError) error - Receive(context.Context) *ChannelIterator + Receive(context.Context) ChannelIterator } // PeerError is a peer error reported via Channel.Error. @@ -194,8 +200,8 @@ func (ch *legacyChannel) String() string { return fmt.Sprintf("p2p.Channel<%d:%s // Receive returns a new unbuffered iterator to receive messages from ch. // The iterator runs until ctx ends. -func (ch *legacyChannel) Receive(ctx context.Context) *ChannelIterator { - iter := &ChannelIterator{ +func (ch *legacyChannel) Receive(ctx context.Context) ChannelIterator { + iter := &channelIterator{ pipe: make(chan Envelope), // unbuffered } go func(pipe chan<- Envelope) { @@ -216,32 +222,38 @@ func (ch *legacyChannel) Receive(ctx context.Context) *ChannelIterator { return iter } -// ChannelIterator provides a context-aware path for callers +// ChannelIterator is an iterator for receiving messages from a Channel. +type ChannelIterator interface { + // Next returns true when the Envelope value has advanced, and false + // when the context is canceled or iteration should stop. If an iterator has returned false, + // it will never return true again. + // in general, use Next, as in: + // + // for iter.Next(ctx) { + // envelope := iter.Envelope() + // // ... do things ... + // } + Next(ctx context.Context) bool + Envelope() *Envelope +} + +// channelIterator provides a context-aware path for callers // (reactors) to process messages from the P2P layer without relying // on the implementation details of the P2P layer. Channel provides // access to it's Outbound stream as an iterator, and the // MergedChannelIterator makes it possible to combine multiple // channels into a single iterator. -type ChannelIterator struct { +type channelIterator struct { pipe chan Envelope current *Envelope } -// NewChannelIterator returns a new instance of ChannelIterator -func NewChannelIterator(pipe chan Envelope) *ChannelIterator { - return &ChannelIterator{pipe: pipe} +// NewChannelIterator returns a new instance of channelIterator +func NewChannelIterator(pipe chan Envelope) ChannelIterator { + return &channelIterator{pipe: pipe} } -// Next returns true when the Envelope value has advanced, and false -// when the context is canceled or iteration should stop. If an iterator has returned false, -// it will never return true again. -// in general, use Next, as in: -// -// for iter.Next(ctx) { -// envelope := iter.Envelope() -// // ... do things ... -// } -func (iter *ChannelIterator) Next(ctx context.Context) bool { +func (iter *channelIterator) Next(ctx context.Context) bool { select { case <-ctx.Done(): iter.current = nil @@ -262,15 +274,15 @@ func (iter *ChannelIterator) Next(ctx context.Context) bool { // iterator. When the last call to Next returned true, Envelope will // return a non-nil object. If Next returned false then Envelope is // always nil. -func (iter *ChannelIterator) Envelope() *Envelope { return iter.current } +func (iter *channelIterator) Envelope() *Envelope { return iter.current } // MergedChannelIterator produces an iterator that merges the // messages from the given channels in arbitrary order. // // This allows the caller to consume messages from multiple channels // without needing to manage the concurrency separately. -func MergedChannelIterator(ctx context.Context, chs ...Channel) *ChannelIterator { - iter := &ChannelIterator{ +func MergedChannelIterator(ctx context.Context, chs ...Channel) ChannelIterator { + iter := &channelIterator{ pipe: make(chan Envelope), // unbuffered } wg := new(sync.WaitGroup) @@ -304,3 +316,88 @@ func MergedChannelIterator(ctx context.Context, chs ...Channel) *ChannelIterator return iter } + +type throttledChannelIterator struct { + innerChan Channel + innerIter ChannelIterator + limiter *rate.Limiter + reportErr bool + logger log.Logger +} + +// ThrottledChannelIterator wraps an existing channel iterator with a rate limiter. +// +// ## Arguments +// - ctx: the context in which the iterator will run +// - limiter: the rate limiter to use +// - innerIterator: the underlying iterator to use +// - reportError: if true, errors will be sent to the channel whenever the rate limit is exceeded; otherwise +// the messages will be dropped without error +// - innerChannel: the channel related; errors will be sent to this channel, also used for logging +// - logger: the logger to use +func ThrottledChannelIterator(_ context.Context, limiter *rate.Limiter, innerIterator ChannelIterator, + reportError bool, innerChannel Channel, logger log.Logger) (ChannelIterator, error) { + if innerChannel == nil { + if reportError { + return nil, fmt.Errorf("inner channel is required to report errors") + } + } else { + logger = logger.With("channel", innerChannel) + } + + throttledChannelIterator := &throttledChannelIterator{ + innerChan: innerChannel, + innerIter: innerIterator, + limiter: limiter, + reportErr: reportError, + logger: logger, + } + + return throttledChannelIterator, nil +} + +func (tci *throttledChannelIterator) Next(ctx context.Context) bool { + if tci.innerIter == nil { + tci.logger.Error("inner channel iterator is nil", "channel", tci.innerChan) + return false + } + + for { + if ctx.Err() != nil { + return false + } + + if !tci.innerIter.Next(ctx) { + return false + } + + // If the limiter allows the message to be sent, we break the loop + if tci.limiter.Allow() { + break + } + e := tci.innerIter.Envelope() + if tci.reportErr && e != nil { + msg := PeerError{ + NodeID: e.From, + Err: ErrRecvRateLimitExceeded, + Fatal: true, + } + if err := tci.innerChan.SendError(ctx, msg); err != nil { + tci.logger.Error("error sending error message", "err", err, "msg", msg) + } + } else { + tci.logger.Trace("dropping message due to rate limit", "channel", tci.innerChan, "rate", tci.limiter.Limit()) + } + } + + return true +} + +func (tci *throttledChannelIterator) Envelope() *Envelope { + if tci.innerIter == nil { + tci.logger.Error("inner channel iterator is nil", "channel", tci.innerChan) + return nil + } + + return tci.innerIter.Envelope() +} diff --git a/internal/p2p/channel_params.go b/internal/p2p/channel_params.go index 8a0aa8418b..d9580e2479 100644 --- a/internal/p2p/channel_params.go +++ b/internal/p2p/channel_params.go @@ -16,6 +16,14 @@ import ( ) const ( + // + // Consensus channels + // + ConsensusStateChannel = ChannelID(0x20) + ConsensusDataChannel = ChannelID(0x21) + ConsensusVoteChannel = ChannelID(0x22) + VoteSetBitsChannel = ChannelID(0x23) + ErrorChannel = ChannelID(0x10) // BlockSyncChannel is a channelStore for blocks and status updates BlockSyncChannel = ChannelID(0x40) @@ -40,21 +48,24 @@ const ( lightBlockMsgSize = int(1e7) // ~1MB // paramMsgSize is the maximum size of a paramsResponseMessage paramMsgSize = int(1e5) // ~100kb + // consensusMsgSize is the maximum size of a consensus message + maxMsgSize = 1048576 // 1MB; NOTE: keep in sync with types.PartSet sizes. + ) // ChannelDescriptors returns a map of all supported descriptors func ChannelDescriptors(cfg *config.Config) map[ChannelID]*ChannelDescriptor { - return map[ChannelID]*ChannelDescriptor{ + channels := map[ChannelID]*ChannelDescriptor{ ErrorChannel: { ID: ErrorChannel, - Priority: 6, + Priority: 7, RecvMessageCapacity: blockMaxMsgSize, RecvBufferCapacity: 32, Name: "error", }, BlockSyncChannel: { ID: BlockSyncChannel, - Priority: 5, + Priority: 6, SendQueueCapacity: 1000, RecvBufferCapacity: 1024, RecvMessageCapacity: types.MaxBlockSizeBytes + @@ -64,11 +75,27 @@ func ChannelDescriptors(cfg *config.Config) map[ChannelID]*ChannelDescriptor { }, MempoolChannel: { ID: MempoolChannel, - Priority: 5, + Priority: 2, // 5 RecvMessageCapacity: mempoolBatchSize(cfg.Mempool.MaxTxBytes), - RecvBufferCapacity: 128, + RecvBufferCapacity: 1000, Name: "mempool", + EnqueueTimeout: cfg.Mempool.TxEnqueueTimeout, }, + } + + for k, v := range StatesyncChannelDescriptors() { + channels[k] = v + } + for k, v := range ConsensusChannelDescriptors() { + channels[k] = v + } + + return channels +} + +// ChannelDescriptors returns a map of all supported descriptors +func StatesyncChannelDescriptors() map[ChannelID]*ChannelDescriptor { + return map[ChannelID]*ChannelDescriptor{ SnapshotChannel: { ID: SnapshotChannel, Priority: 6, @@ -79,7 +106,7 @@ func ChannelDescriptors(cfg *config.Config) map[ChannelID]*ChannelDescriptor { }, ChunkChannel: { ID: ChunkChannel, - Priority: 3, + Priority: 4, SendQueueCapacity: 4, RecvMessageCapacity: chunkMsgSize, RecvBufferCapacity: 128, @@ -87,7 +114,7 @@ func ChannelDescriptors(cfg *config.Config) map[ChannelID]*ChannelDescriptor { }, LightBlockChannel: { ID: LightBlockChannel, - Priority: 5, + Priority: 6, SendQueueCapacity: 10, RecvMessageCapacity: lightBlockMsgSize, RecvBufferCapacity: 128, @@ -95,7 +122,7 @@ func ChannelDescriptors(cfg *config.Config) map[ChannelID]*ChannelDescriptor { }, ParamsChannel: { ID: ParamsChannel, - Priority: 2, + Priority: 3, SendQueueCapacity: 10, RecvMessageCapacity: paramMsgSize, RecvBufferCapacity: 128, @@ -104,6 +131,48 @@ func ChannelDescriptors(cfg *config.Config) map[ChannelID]*ChannelDescriptor { } } +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func ConsensusChannelDescriptors() map[ChannelID]*ChannelDescriptor { + return map[ChannelID]*ChannelDescriptor{ + ConsensusStateChannel: { + ID: ConsensusStateChannel, + Priority: 18, + SendQueueCapacity: 64, + RecvMessageCapacity: maxMsgSize, + RecvBufferCapacity: 128, + Name: "state", + }, + ConsensusDataChannel: { + // TODO: Consider a split between gossiping current block and catchup + // stuff. Once we gossip the whole block there is nothing left to send + // until next height or round. + ID: ConsensusDataChannel, + Priority: 22, + SendQueueCapacity: 64, + RecvBufferCapacity: 512, + RecvMessageCapacity: maxMsgSize, + Name: "data", + }, + ConsensusVoteChannel: { + ID: ConsensusVoteChannel, + Priority: 20, + SendQueueCapacity: 64, + RecvBufferCapacity: 4096, + RecvMessageCapacity: maxMsgSize, + Name: "vote", + }, + VoteSetBitsChannel: { + ID: VoteSetBitsChannel, + Priority: 15, + SendQueueCapacity: 8, + RecvBufferCapacity: 128, + RecvMessageCapacity: maxMsgSize, + Name: "voteSet", + }, + } +} + // ResolveChannelID returns channel ID according to message type // currently only is supported blocksync channelID, the remaining channelIDs should be added as it will be necessary func ResolveChannelID(msg proto.Message) ChannelID { @@ -114,6 +183,7 @@ func ResolveChannelID(msg proto.Message) ChannelID { *blocksync.StatusRequest, *blocksync.StatusResponse: return BlockSyncChannel + // State sync case *statesync.ChunkRequest, *statesync.ChunkResponse: return ChunkChannel @@ -126,30 +196,27 @@ func ResolveChannelID(msg proto.Message) ChannelID { case *statesync.LightBlockRequest, *statesync.LightBlockResponse: return LightBlockChannel - case *consensus.NewRoundStep, - *consensus.NewValidBlock, + // Consensus messages + case *consensus.VoteSetBits: + return VoteSetBitsChannel + case *consensus.Vote, *consensus.Commit: + return ConsensusVoteChannel + case *consensus.ProposalPOL, *consensus.Proposal, - *consensus.ProposalPOL, - *consensus.BlockPart, - *consensus.Vote, + *consensus.BlockPart: + return ConsensusDataChannel + case *consensus.NewRoundStep, *consensus.NewValidBlock, + *consensus.HasCommit, *consensus.HasVote, - *consensus.VoteSetMaj23, - *consensus.VoteSetBits, - *consensus.Commit, - *consensus.HasCommit: - // TODO: enable these channels when they are implemented - //*statesync.SnapshotsRequest, - //*statesync.SnapshotsResponse, - //*statesync.ChunkRequest, - //*statesync.ChunkResponse, - //*statesync.LightBlockRequest, - //*statesync.LightBlockResponse, - //*statesync.ParamsRequest, - //*statesync.ParamsResponse: + *consensus.VoteSetMaj23: + return ConsensusStateChannel + // pex case *p2pproto.PexRequest, *p2pproto.PexResponse, *p2pproto.Echo: + // evidence case *prototypes.Evidence: + // mempool case *mempool.Txs: return MempoolChannel } diff --git a/internal/p2p/client/chanstore.go b/internal/p2p/client/chanstore.go index e49c3994f4..e25858f6a5 100644 --- a/internal/p2p/client/chanstore.go +++ b/internal/p2p/client/chanstore.go @@ -31,7 +31,7 @@ func newChanStore(descriptors map[p2p.ChannelID]*p2p.ChannelDescriptor, creator return store } -func (c *chanStore) iter(ctx context.Context, chanIDs ...p2p.ChannelID) (*p2p.ChannelIterator, error) { +func (c *chanStore) iter(ctx context.Context, chanIDs ...p2p.ChannelID) (p2p.ChannelIterator, error) { chans := make([]p2p.Channel, 0, len(chanIDs)) for _, chanID := range chanIDs { ch, err := c.get(ctx, chanID) diff --git a/internal/p2p/client/client.go b/internal/p2p/client/client.go index fffc4a36df..ea6d4a08b8 100644 --- a/internal/p2p/client/client.go +++ b/internal/p2p/client/client.go @@ -98,6 +98,8 @@ type ( pending sync.Map reqTimeout time.Duration chanIDResolver func(msg proto.Message) p2p.ChannelID + // rateLimit represents a rate limiter for the channel; can be nil + rateLimit map[p2p.ChannelID]*RateLimit } // OptionFunc is a client optional function, it is used to override the default parameters in a Client OptionFunc func(c *Client) @@ -128,6 +130,18 @@ func WithChanIDResolver(resolver func(msg proto.Message) p2p.ChannelID) OptionFu } } +// WithSendRateLimits defines a rate limiter for the provided channels. +// +// Provided rate limiter will be shared between provided channels. +// Use this function multiple times to set different rate limiters for different channels. +func WithSendRateLimits(rateLimit *RateLimit, channels ...p2p.ChannelID) OptionFunc { + return func(c *Client) { + for _, ch := range channels { + c.rateLimit[ch] = rateLimit + } + } +} + // New creates and returns Client with optional functions func New(descriptors map[p2p.ChannelID]*p2p.ChannelDescriptor, creator p2p.ChannelCreator, opts ...OptionFunc) *Client { client := &Client{ @@ -136,6 +150,7 @@ func New(descriptors map[p2p.ChannelID]*p2p.ChannelDescriptor, creator p2p.Chann logger: log.NewNopLogger(), reqTimeout: peerTimeout, chanIDResolver: p2p.ResolveChannelID, + rateLimit: make(map[p2p.ChannelID]*RateLimit), } for _, opt := range opts { opt(client) @@ -224,15 +239,27 @@ func (c *Client) GetSyncStatus(ctx context.Context) error { } // SendTxs sends a transaction to the peer -func (c *Client) SendTxs(ctx context.Context, peerID types.NodeID, tx types.Tx) error { +func (c *Client) SendTxs(ctx context.Context, peerID types.NodeID, tx ...types.Tx) error { + txs := make([][]byte, len(tx)) + for i := 0; i < len(tx); i++ { + txs[i] = tx[i] + } + return c.Send(ctx, p2p.Envelope{ To: peerID, - Message: &protomem.Txs{Txs: [][]byte{tx}}, + Message: &protomem.Txs{Txs: txs}, }) } // Send sends p2p message to a peer, allowed p2p.Envelope or p2p.PeerError types func (c *Client) Send(ctx context.Context, msg any) error { + return c.SendN(ctx, msg, 1) +} + +// SendN sends p2p message to a peer, consuming `nTokens` from rate limiter. +// +// Allowed `msg` types are: p2p.Envelope or p2p.PeerError +func (c *Client) SendN(ctx context.Context, msg any, nTokens int) error { switch t := msg.(type) { case p2p.PeerError: ch, err := c.chanStore.get(ctx, p2p.ErrorChannel) @@ -252,6 +279,19 @@ func (c *Client) Send(ctx context.Context, msg any) error { if err != nil { return err } + if limiter, ok := c.rateLimit[t.ChannelID]; ok { + ok, err := limiter.Limit(ctx, t.To, nTokens) + if err != nil { + return fmt.Errorf("rate limited when sending message %T on channel %d to %s: %w", + t.Message, t.ChannelID, t.To, err) + } + if !ok { + c.logger.Debug("dropping message due to rate limit", + "channel", t.ChannelID, "peer", t.To, "message", t.Message) + return nil + } + } + return ch.Send(ctx, t) } return fmt.Errorf("cannot send an unsupported message type %T", msg) @@ -267,7 +307,7 @@ func (c *Client) Consume(ctx context.Context, params ConsumerParams) error { return c.iter(ctx, iter, params.Handler) } -func (c *Client) iter(ctx context.Context, iter *p2p.ChannelIterator, handler ConsumerHandler) error { +func (c *Client) iter(ctx context.Context, iter p2p.ChannelIterator, handler ConsumerHandler) error { for iter.Next(ctx) { envelope := iter.Envelope() if isMessageResolvable(envelope.Message) { diff --git a/internal/p2p/client/client_test.go b/internal/p2p/client/client_test.go index 5a02123db0..ee353b0f62 100644 --- a/internal/p2p/client/client_test.go +++ b/internal/p2p/client/client_test.go @@ -54,11 +54,11 @@ func (suite *ChannelTestSuite) SetupTest() { suite.fakeClock = clockwork.NewFakeClock() suite.client = New( suite.descriptors, - func(ctx context.Context, descriptor *p2p.ChannelDescriptor) (p2p.Channel, error) { + func(_ctx context.Context, _descriptor *p2p.ChannelDescriptor) (p2p.Channel, error) { return suite.p2pChannel, nil }, WithClock(suite.fakeClock), - WithChanIDResolver(func(msg proto.Message) p2p.ChannelID { + WithChanIDResolver(func(_msg proto.Message) p2p.ChannelID { return testChannelID }), ) @@ -185,7 +185,7 @@ func (suite *ChannelTestSuite) TestConsumeHandle() { suite.p2pChannel. On("Receive", ctx). Once(). - Return(func(ctx context.Context) *p2p.ChannelIterator { + Return(func(_ctx context.Context) p2p.ChannelIterator { return p2p.NewChannelIterator(outCh) }) consumer := newMockConsumer(suite.T()) @@ -226,7 +226,7 @@ func (suite *ChannelTestSuite) TestConsumeResolve() { suite.p2pChannel. On("Receive", ctx). Once(). - Return(func(ctx context.Context) *p2p.ChannelIterator { + Return(func(_ctx context.Context) p2p.ChannelIterator { return p2p.NewChannelIterator(outCh) }) resCh := suite.client.addPending(reqID) @@ -278,7 +278,7 @@ func (suite *ChannelTestSuite) TestConsumeError() { suite.p2pChannel. On("Receive", ctx). Once(). - Return(func(ctx context.Context) *p2p.ChannelIterator { + Return(func(_ctx context.Context) p2p.ChannelIterator { return p2p.NewChannelIterator(outCh) }) consumer := newMockConsumer(suite.T()) diff --git a/internal/p2p/client/consumer.go b/internal/p2p/client/consumer.go index 6038737d7b..976c0590ff 100644 --- a/internal/p2p/client/consumer.go +++ b/internal/p2p/client/consumer.go @@ -9,6 +9,9 @@ import ( "github.com/dashpay/tenderdash/libs/log" ) +// DefaultRecvBurstMultiplier tells how many times burst is bigger than the limit in recvRateLimitPerPeerHandler +const DefaultRecvBurstMultiplier = 10 + var ( ErrRequestIDAttributeRequired = errors.New("envelope requestID attribute is required") ErrResponseIDAttributeRequired = errors.New("envelope responseID attribute is required") @@ -41,6 +44,19 @@ type ( allowedChannelIDs map[p2p.ChannelID]struct{} next ConsumerHandler } + + // TokenNumberFunc is a function that returns number of tokens to consume for a given envelope + TokenNumberFunc func(*p2p.Envelope) uint + + recvRateLimitPerPeerHandler struct { + RateLimit + + // next is the next handler in the chain + next ConsumerHandler + + // nTokens is a function that returns number of tokens to consume for a given envelope; if unsure, return 1 + nTokensFunc TokenNumberFunc + } ) // WithRecoveryMiddleware creates panic recovery middleware @@ -75,6 +91,18 @@ func WithValidateMessageHandler(allowedChannelIDs []p2p.ChannelID) ConsumerMiddl } } +func WithRecvRateLimitPerPeerHandler(ctx context.Context, limit float64, nTokensFunc TokenNumberFunc, drop bool, logger log.Logger) ConsumerMiddlewareFunc { + return func(next ConsumerHandler) ConsumerHandler { + hd := &recvRateLimitPerPeerHandler{ + RateLimit: *NewRateLimit(ctx, limit, drop, logger), + nTokensFunc: nTokensFunc, + } + + hd.next = next + return hd + } +} + // HandlerWithMiddlewares is a function that wraps a handler in middlewares func HandlerWithMiddlewares(handler ConsumerHandler, mws ...ConsumerMiddlewareFunc) ConsumerHandler { for _, mw := range mws { @@ -125,3 +153,16 @@ func (h *validateMessageHandler) Handle(ctx context.Context, client *Client, env } return h.next.Handle(ctx, client, envelope) } + +func (h *recvRateLimitPerPeerHandler) Handle(ctx context.Context, client *Client, envelope *p2p.Envelope) error { + accepted, err := h.RateLimit.Limit(ctx, envelope.From, int(h.nTokensFunc(envelope))) + if err != nil { + return fmt.Errorf("rate limit failed for peer '%s;: %w", envelope.From, err) + } + if !accepted { + h.logger.Debug("silently dropping message due to rate limit", "peer", envelope.From, "envelope", envelope) + return nil + } + + return h.next.Handle(ctx, client, envelope) +} diff --git a/internal/p2p/client/consumer_test.go b/internal/p2p/client/consumer_test.go index cfacda0267..31b1347cd3 100644 --- a/internal/p2p/client/consumer_test.go +++ b/internal/p2p/client/consumer_test.go @@ -35,7 +35,7 @@ func TestErrorLoggerP2PMessageHandler(t *testing.T) { wantErr: "error", }, { - mockFn: func(hd *mockConsumer, logger *log.TestingLogger) { + mockFn: func(hd *mockConsumer, _logger *log.TestingLogger) { hd.On("Handle", mock.Anything, mock.Anything, mock.Anything). Once(). Return(nil) diff --git a/internal/p2p/client/mocks/block_client.go b/internal/p2p/client/mocks/block_client.go index b094d38ccb..5d1e5a90ce 100644 --- a/internal/p2p/client/mocks/block_client.go +++ b/internal/p2p/client/mocks/block_client.go @@ -23,6 +23,10 @@ type BlockClient struct { func (_m *BlockClient) GetBlock(ctx context.Context, height int64, peerID types.NodeID) (*promise.Promise[*blocksync.BlockResponse], error) { ret := _m.Called(ctx, height, peerID) + if len(ret) == 0 { + panic("no return value specified for GetBlock") + } + var r0 *promise.Promise[*blocksync.BlockResponse] var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, types.NodeID) (*promise.Promise[*blocksync.BlockResponse], error)); ok { @@ -49,6 +53,10 @@ func (_m *BlockClient) GetBlock(ctx context.Context, height int64, peerID types. func (_m *BlockClient) GetSyncStatus(ctx context.Context) error { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for GetSyncStatus") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(ctx) @@ -63,6 +71,10 @@ func (_m *BlockClient) GetSyncStatus(ctx context.Context) error { func (_m *BlockClient) Send(ctx context.Context, msg interface{}) error { ret := _m.Called(ctx, msg) + if len(ret) == 0 { + panic("no return value specified for Send") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, interface{}) error); ok { r0 = rf(ctx, msg) diff --git a/internal/p2p/client/mocks/snapshot_client.go b/internal/p2p/client/mocks/snapshot_client.go index e2eb8d643d..a88f2e269b 100644 --- a/internal/p2p/client/mocks/snapshot_client.go +++ b/internal/p2p/client/mocks/snapshot_client.go @@ -22,6 +22,10 @@ type SnapshotClient struct { func (_m *SnapshotClient) GetChunk(ctx context.Context, peerID types.NodeID, height uint64, format uint32, index uint32) (*promise.Promise[*statesync.ChunkResponse], error) { ret := _m.Called(ctx, peerID, height, format, index) + if len(ret) == 0 { + panic("no return value specified for GetChunk") + } + var r0 *promise.Promise[*statesync.ChunkResponse] var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.NodeID, uint64, uint32, uint32) (*promise.Promise[*statesync.ChunkResponse], error)); ok { @@ -48,6 +52,10 @@ func (_m *SnapshotClient) GetChunk(ctx context.Context, peerID types.NodeID, hei func (_m *SnapshotClient) GetLightBlock(ctx context.Context, peerID types.NodeID, height uint64) (*promise.Promise[*statesync.LightBlockResponse], error) { ret := _m.Called(ctx, peerID, height) + if len(ret) == 0 { + panic("no return value specified for GetLightBlock") + } + var r0 *promise.Promise[*statesync.LightBlockResponse] var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.NodeID, uint64) (*promise.Promise[*statesync.LightBlockResponse], error)); ok { @@ -74,6 +82,10 @@ func (_m *SnapshotClient) GetLightBlock(ctx context.Context, peerID types.NodeID func (_m *SnapshotClient) GetParams(ctx context.Context, peerID types.NodeID, height uint64) (*promise.Promise[*statesync.ParamsResponse], error) { ret := _m.Called(ctx, peerID, height) + if len(ret) == 0 { + panic("no return value specified for GetParams") + } + var r0 *promise.Promise[*statesync.ParamsResponse] var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.NodeID, uint64) (*promise.Promise[*statesync.ParamsResponse], error)); ok { @@ -100,6 +112,10 @@ func (_m *SnapshotClient) GetParams(ctx context.Context, peerID types.NodeID, he func (_m *SnapshotClient) GetSnapshots(ctx context.Context, peerID types.NodeID) error { ret := _m.Called(ctx, peerID) + if len(ret) == 0 { + panic("no return value specified for GetSnapshots") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, types.NodeID) error); ok { r0 = rf(ctx, peerID) diff --git a/internal/p2p/client/ratelimit.go b/internal/p2p/client/ratelimit.go new file mode 100644 index 0000000000..a42e22efb3 --- /dev/null +++ b/internal/p2p/client/ratelimit.go @@ -0,0 +1,132 @@ +package client + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + "time" + + "golang.org/x/time/rate" + + "github.com/dashpay/tenderdash/libs/log" + "github.com/dashpay/tenderdash/types" +) + +const PeerRateLimitLifetime = 60 // number of seconds to keep the rate limiter for a peer + +// RateLimit is a rate limiter for p2p messages. +// It is used to limit the rate of incoming messages from a peer. +// Each peer has its own independent limit. +// +// Use NewRateLimit to create a new rate limiter. +// Use [Limit()] to wait for the rate limit to allow the message to be sent. +type RateLimit struct { + // limit is the rate limit per peer per second; 0 means no limit + limit float64 + // burst is the initial number of tokens; see rate module for more details + burst int + // map of peerID to rate.Limiter + limiters sync.Map + // drop is a flag to silently drop the message if the rate limit is exceeded; otherwise we will wait + drop bool + + logger log.Logger +} + +type limiter struct { + *rate.Limiter + // lastAccess is the last time the limiter was accessed, as Unix time (seconds) + lastAccess atomic.Int64 +} + +// NewRateLimit creates a new rate limiter. +// +// # Arguments +// +// * `ctx` - context; used to gracefully shutdown the garbage collection routine +// * `limit` - rate limit per peer per second; 0 means no limit +// * `drop` - silently drop the message if the rate limit is exceeded; otherwise we will wait until the message is allowed +// * `logger` - logger +func NewRateLimit(ctx context.Context, limit float64, drop bool, logger log.Logger) *RateLimit { + h := &RateLimit{ + limiters: sync.Map{}, + limit: limit, + burst: int(DefaultRecvBurstMultiplier * limit), + drop: drop, + logger: logger, + } + + // start the garbage collection routine + go h.gcRoutine(ctx) + + return h +} + +func (h *RateLimit) getLimiter(peerID types.NodeID) *limiter { + var limit *limiter + if l, ok := h.limiters.Load(peerID); ok { + limit = l.(*limiter) + } else { + limit = &limiter{Limiter: rate.NewLimiter(rate.Limit(h.limit), h.burst)} + // we have a slight race condition here, possibly overwriting the limiter, but it's not a big deal + // as the worst case scenario is that we allow one or two more messages than we should + h.limiters.Store(peerID, limit) + } + + limit.lastAccess.Store(time.Now().Unix()) + + return limit +} + +// Limit waits for the rate limit to allow the message to be sent. +// It returns true if the message is allowed, false otherwise. +// +// If peerID is empty, messages is always allowed. +// +// Returns true when the message is allowed, false if it should be dropped. +// +// Arguments: +// - ctx: context +// - peerID: peer ID; if empty, the message is always allowed +// - nTokens: number of tokens to consume; use 1 if unsure +func (h *RateLimit) Limit(ctx context.Context, peerID types.NodeID, nTokens int) (allowed bool, err error) { + if h.limit > 0 && peerID != "" { + limiter := h.getLimiter(peerID) + + if h.drop { + return limiter.AllowN(time.Now(), nTokens), nil + } + + if err := limiter.WaitN(ctx, 1); err != nil { + return false, fmt.Errorf("rate limit failed for peer %s: %w", peerID, err) + } + } + return true, nil +} + +// gcRoutine is a goroutine that removes unused limiters for peers every `PeerRateLimitLifetime` seconds. +func (h *RateLimit) gcRoutine(ctx context.Context) { + ticker := time.NewTicker(PeerRateLimitLifetime * time.Second) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + h.gc() + } + } +} + +// GC removes old limiters. +func (h *RateLimit) gc() { + now := time.Now().Unix() + h.limiters.Range(func(key, value interface{}) bool { + if value.(*limiter).lastAccess.Load() < now-60 { + h.limiters.Delete(key) + } + return true + }) +} diff --git a/internal/p2p/client/ratelimit_test.go b/internal/p2p/client/ratelimit_test.go new file mode 100644 index 0000000000..004a330049 --- /dev/null +++ b/internal/p2p/client/ratelimit_test.go @@ -0,0 +1,219 @@ +package client + +import ( + "context" + "errors" + "math" + "runtime" + "strconv" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/dashpay/tenderdash/internal/p2p" + "github.com/dashpay/tenderdash/internal/p2p/conn" + "github.com/dashpay/tenderdash/libs/log" + "github.com/dashpay/tenderdash/types" +) + +// TestRecvRateLimitHandler tests the rate limit middleware when receiving messages from peers. +// It tests that the rate limit is applied per peer. +// +// GIVEN 5 peers named 1..5 and rate limit of 2/s and burst 4, +// WHEN we send 1, 2, 3, 4 and 5 msgs per second respectively for 3 seconds, +// THEN: +// * peer 1 and 2 receive all messages, +// * other peers receive 2 messages per second plus 4 burst messages. +// +// Reuses testRateLimit from client_test.go +func TestRecvRateLimitHandler(t *testing.T) { + // don't run this if we are in short mode + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + const ( + Limit = 2.0 + Burst = 4 + Peers = 5 + TestTimeSeconds = 3 + ) + + sent := make([]atomic.Uint32, Peers) + + fakeHandler := newMockConsumer(t) + fakeHandler.On("Handle", mock.Anything, mock.Anything, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + peerID := args.Get(2).(*p2p.Envelope).From + peerNum, err := strconv.Atoi(string(peerID)) + require.NoError(t, err) + sent[peerNum-1].Add(1) + }) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + client := &Client{} + + mw := WithRecvRateLimitPerPeerHandler(ctx, + Limit, + func(*p2p.Envelope) uint { return 1 }, + false, + logger, + )(fakeHandler).(*recvRateLimitPerPeerHandler) + + mw.burst = Burst + + sendFn := func(peerID types.NodeID) error { + envelope := p2p.Envelope{ + From: peerID, + ChannelID: testChannelID, + } + return mw.Handle(ctx, client, &envelope) + } + + parallelSendWithLimit(t, ctx, sendFn, Peers, TestTimeSeconds) + assertRateLimits(t, sent, Limit, Burst, TestTimeSeconds) +} + +// TestSendRateLimit tests the rate limit for sending messages using p2p.client. +// +// Each peer should have his own, independent rate limit. +// +// GIVEN 5 peers named 1..5 and rate limit of 2/s and burst 4, +// WHEN we send 1, 2, 3, 4 and 5 msgs per second respectively for 3 seconds, +// THEN: +// * peer 1 and 2 receive all messages, +// * other peers receive 2 messages per second plus 4 burst messages. +func (suite *ChannelTestSuite) TestSendRateLimit() { + if testing.Short() { + suite.T().Skip("skipping test in short mode.") + } + + const ( + Limit = 2.0 + Burst = 4 + Peers = 5 + TestTimeSeconds = 3 + ) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + client := suite.client + + limiter := NewRateLimit(ctx, Limit, false, suite.client.logger) + limiter.burst = Burst + suite.client.rateLimit = map[conn.ChannelID]*RateLimit{ + testChannelID: limiter, + } + + sendFn := func(peerID types.NodeID) error { + envelope := p2p.Envelope{ + To: peerID, + ChannelID: testChannelID, + } + return client.Send(ctx, envelope) + + } + sent := make([]atomic.Uint32, Peers) + + suite.p2pChannel.On("Send", mock.Anything, mock.Anything). + Run(func(args mock.Arguments) { + peerID := args.Get(1).(p2p.Envelope).To + peerNum, err := strconv.Atoi(string(peerID)) + suite.NoError(err) + sent[peerNum-1].Add(1) + }). + Return(nil) + + parallelSendWithLimit(suite.T(), ctx, sendFn, Peers, TestTimeSeconds) + assertRateLimits(suite.T(), sent, Limit, Burst, TestTimeSeconds) +} + +// parallelSendWithLimit sends messages to peers in parallel with a rate limit. +// +// The function sends messages to peers. Each peer gets its number, starting from 1. +// Rate limit is equal to the peer number, eg. peer 1 sends 1 msg/s, peeer 2 sends 2 msg/s etc. +func parallelSendWithLimit(t *testing.T, ctx context.Context, sendFn func(peerID types.NodeID) error, + peers int, testTimeSeconds int) { + t.Helper() + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // all goroutines will wait for the start signal + start := sync.RWMutex{} + start.Lock() + + for peer := 1; peer <= peers; peer++ { + peerID := types.NodeID(strconv.Itoa(peer)) + // peer number is the rate limit + msgsPerSec := peer + + go func(peerID types.NodeID, rate int) { + start.RLock() + defer start.RUnlock() + + for s := 0; s < testTimeSeconds; s++ { + until := time.NewTimer(time.Second) + defer until.Stop() + + for i := 0; i < rate; i++ { + select { + case <-ctx.Done(): + return + default: + } + + if err := sendFn(peerID); !errors.Is(err, context.Canceled) { + require.NoError(t, err) + } + } + + select { + case <-until.C: + // noop, we just sleep until the end of the second + case <-ctx.Done(): + return + } + } + + }(peerID, msgsPerSec) + } + + // start the test + startTime := time.Now() + start.Unlock() + runtime.Gosched() + time.Sleep(time.Duration(testTimeSeconds) * time.Second) + cancel() + // wait for all goroutines to finish, that is - drop RLocks + start.Lock() + defer start.Unlock() + + // check if test ran for the expected time + // note we ignore up to 99 ms to account for any processing time + elapsed := math.Floor(time.Since(startTime).Seconds()*10) / 10 + assert.Equal(t, float64(testTimeSeconds), elapsed, "test should run for %d seconds", testTimeSeconds) +} + +// assertRateLimits checks if the rate limits were applied correctly +// We assume that index of each item in `sent` is the peer number, as described in parallelSendWithLimit. +func assertRateLimits(t *testing.T, sent []atomic.Uint32, limit float64, burst int, seconds int) { + for peer := 1; peer <= len(sent); peer++ { + expected := int(limit)*seconds + burst + if expected > peer*seconds { + expected = peer * seconds + } + + assert.Equal(t, expected, int(sent[peer-1].Load()), "peer %d should receive %d messages", peer, expected) + } +} diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index 1ecdd1a4dc..82494a5964 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -100,9 +100,6 @@ type MConnection struct { // Closing quitRecvRouting will cause the recvRouting to eventually quit. quitRecvRoutine chan struct{} - // used to ensure FlushStop and OnStop - // are safe to call concurrently. - stopMtx sync.Mutex stopSignal <-chan struct{} cancel context.CancelFunc @@ -231,11 +228,8 @@ func (c *MConnection) getLastMessageAt() time.Time { // stopServices stops the BaseService and timers and closes the quitSendRoutine. // if the quitSendRoutine was already closed, it returns true, otherwise it returns false. -// It uses the stopMtx to ensure only one of FlushStop and OnStop can do this at a time. +// It doesn't lock, as we rely on the caller (eg. BaseService) locking func (c *MConnection) stopServices() (alreadyStopped bool) { - c.stopMtx.Lock() - defer c.stopMtx.Unlock() - select { case <-c.quitSendRoutine: // already quit @@ -612,16 +606,25 @@ type ChannelDescriptor struct { Priority int // TODO: Remove once p2p refactor is complete. - SendQueueCapacity int + SendQueueCapacity int + // RecvMessageCapacity defines the max message size for a given p2p Channel. RecvMessageCapacity int - // RecvBufferCapacity defines the max buffer size of inbound messages for a + // RecvBufferCapacity defines the max number of inbound messages for a // given p2p Channel queue. RecvBufferCapacity int // Human readable name of the channel, used in logging and // diagnostics. Name string + + // Timeout for enqueue operations on the incoming queue. + // It is applied to all messages received from remote peer + // and delievered to this channel. + // When timeout expires, messages will be silently dropped. + // + // If zero, enqueue operations will not time out. + EnqueueTimeout time.Duration } func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { @@ -634,6 +637,7 @@ func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { if chDesc.RecvMessageCapacity == 0 { chDesc.RecvMessageCapacity = defaultRecvMessageCapacity } + filled = chDesc return } diff --git a/internal/p2p/conn/secret_connection.go b/internal/p2p/conn/secret_connection.go index caf3150528..222fc1b69a 100644 --- a/internal/p2p/conn/secret_connection.go +++ b/internal/p2p/conn/secret_connection.go @@ -247,7 +247,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { if 0 < len(sc.recvBuffer) { n = copy(data, sc.recvBuffer) sc.recvBuffer = sc.recvBuffer[n:] - return + return n, err } // read off the conn @@ -255,7 +255,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { defer pool.Put(sealedFrame) _, err = io.ReadFull(sc.conn, sealedFrame) if err != nil { - return + return n, err } // decrypt the frame. @@ -336,7 +336,7 @@ func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byt // If error: if trs.FirstError() != nil { err = trs.FirstError() - return + return nil, err } // Otherwise: @@ -442,22 +442,22 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte return nil, true, err // abort } - _recvMsg := authSigMessage{ + recvMsg2 := authSigMessage{ Key: pk, Sig: pba.Sig, } - return _recvMsg, false, nil + return recvMsg2, false, nil }, ) // If error: if trs.FirstError() != nil { err = trs.FirstError() - return + return recvMsg, err } - var _recvMsg = trs.FirstValue().(authSigMessage) - return _recvMsg, nil + recvMsg = trs.FirstValue().(authSigMessage) + return recvMsg, nil } //-------------------------------------------------------------------------------- diff --git a/internal/p2p/mocks/channel.go b/internal/p2p/mocks/channel.go index ebee0c7670..ae54580e6f 100644 --- a/internal/p2p/mocks/channel.go +++ b/internal/p2p/mocks/channel.go @@ -18,6 +18,10 @@ type Channel struct { func (_m *Channel) Err() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Err") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -29,15 +33,19 @@ func (_m *Channel) Err() error { } // Receive provides a mock function with given fields: _a0 -func (_m *Channel) Receive(_a0 context.Context) *p2p.ChannelIterator { +func (_m *Channel) Receive(_a0 context.Context) p2p.ChannelIterator { ret := _m.Called(_a0) - var r0 *p2p.ChannelIterator - if rf, ok := ret.Get(0).(func(context.Context) *p2p.ChannelIterator); ok { + if len(ret) == 0 { + panic("no return value specified for Receive") + } + + var r0 p2p.ChannelIterator + if rf, ok := ret.Get(0).(func(context.Context) p2p.ChannelIterator); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*p2p.ChannelIterator) + r0 = ret.Get(0).(p2p.ChannelIterator) } } @@ -48,6 +56,10 @@ func (_m *Channel) Receive(_a0 context.Context) *p2p.ChannelIterator { func (_m *Channel) Send(_a0 context.Context, _a1 p2p.Envelope) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Send") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, p2p.Envelope) error); ok { r0 = rf(_a0, _a1) @@ -62,6 +74,10 @@ func (_m *Channel) Send(_a0 context.Context, _a1 p2p.Envelope) error { func (_m *Channel) SendError(_a0 context.Context, _a1 p2p.PeerError) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for SendError") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, p2p.PeerError) error); ok { r0 = rf(_a0, _a1) @@ -76,6 +92,10 @@ func (_m *Channel) SendError(_a0 context.Context, _a1 p2p.PeerError) error { func (_m *Channel) String() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for String") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 447bb925ab..879950b6a1 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -27,6 +27,10 @@ type Connection struct { func (_m *Connection) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -41,6 +45,10 @@ func (_m *Connection) Close() error { func (_m *Connection) Handshake(_a0 context.Context, _a1 time.Duration, _a2 types.NodeInfo, _a3 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) { ret := _m.Called(_a0, _a1, _a2, _a3) + if len(ret) == 0 { + panic("no return value specified for Handshake") + } + var r0 types.NodeInfo var r1 crypto.PubKey var r2 error @@ -74,6 +82,10 @@ func (_m *Connection) Handshake(_a0 context.Context, _a1 time.Duration, _a2 type func (_m *Connection) LocalEndpoint() p2p.Endpoint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LocalEndpoint") + } + var r0 p2p.Endpoint if rf, ok := ret.Get(0).(func() p2p.Endpoint); ok { r0 = rf() @@ -88,6 +100,10 @@ func (_m *Connection) LocalEndpoint() p2p.Endpoint { func (_m *Connection) ReceiveMessage(_a0 context.Context) (conn.ChannelID, []byte, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ReceiveMessage") + } + var r0 conn.ChannelID var r1 []byte var r2 error @@ -121,6 +137,10 @@ func (_m *Connection) ReceiveMessage(_a0 context.Context) (conn.ChannelID, []byt func (_m *Connection) RemoteEndpoint() p2p.Endpoint { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for RemoteEndpoint") + } + var r0 p2p.Endpoint if rf, ok := ret.Get(0).(func() p2p.Endpoint); ok { r0 = rf() @@ -135,6 +155,10 @@ func (_m *Connection) RemoteEndpoint() p2p.Endpoint { func (_m *Connection) SendMessage(_a0 context.Context, _a1 conn.ChannelID, _a2 []byte) error { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for SendMessage") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, conn.ChannelID, []byte) error); ok { r0 = rf(_a0, _a1, _a2) @@ -149,6 +173,10 @@ func (_m *Connection) SendMessage(_a0 context.Context, _a1 conn.ChannelID, _a2 [ func (_m *Connection) String() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for String") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index 0901d27c84..6fbaccacaa 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -21,6 +21,10 @@ type Transport struct { func (_m *Transport) Accept(_a0 context.Context) (p2p.Connection, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Accept") + } + var r0 p2p.Connection var r1 error if rf, ok := ret.Get(0).(func(context.Context) (p2p.Connection, error)); ok { @@ -52,6 +56,10 @@ func (_m *Transport) AddChannelDescriptors(_a0 []*conn.ChannelDescriptor) { func (_m *Transport) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -66,6 +74,10 @@ func (_m *Transport) Close() error { func (_m *Transport) Dial(_a0 context.Context, _a1 *p2p.Endpoint) (p2p.Connection, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for Dial") + } + var r0 p2p.Connection var r1 error if rf, ok := ret.Get(0).(func(context.Context, *p2p.Endpoint) (p2p.Connection, error)); ok { @@ -92,6 +104,10 @@ func (_m *Transport) Dial(_a0 context.Context, _a1 *p2p.Endpoint) (p2p.Connectio func (_m *Transport) Endpoint() (*p2p.Endpoint, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Endpoint") + } + var r0 *p2p.Endpoint var r1 error if rf, ok := ret.Get(0).(func() (*p2p.Endpoint, error)); ok { @@ -118,6 +134,10 @@ func (_m *Transport) Endpoint() (*p2p.Endpoint, error) { func (_m *Transport) Listen(_a0 *p2p.Endpoint) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Listen") + } + var r0 error if rf, ok := ret.Get(0).(func(*p2p.Endpoint) error); ok { r0 = rf(_a0) @@ -132,6 +152,10 @@ func (_m *Transport) Listen(_a0 *p2p.Endpoint) error { func (_m *Transport) Protocols() []p2p.Protocol { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Protocols") + } + var r0 []p2p.Protocol if rf, ok := ret.Get(0).(func() []p2p.Protocol); ok { r0 = rf() @@ -148,6 +172,10 @@ func (_m *Transport) Protocols() []p2p.Protocol { func (_m *Transport) String() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for String") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() diff --git a/internal/p2p/p2p_test.go b/internal/p2p/p2p_test.go index 97a008d025..2d2c6c2c48 100644 --- a/internal/p2p/p2p_test.go +++ b/internal/p2p/p2p_test.go @@ -3,6 +3,7 @@ package p2p_test import ( "github.com/dashpay/tenderdash/crypto" "github.com/dashpay/tenderdash/crypto/ed25519" + tmsync "github.com/dashpay/tenderdash/internal/libs/sync" "github.com/dashpay/tenderdash/internal/p2p" "github.com/dashpay/tenderdash/types" ) @@ -25,7 +26,7 @@ var ( ListenAddr: "0.0.0.0:0", Network: "test", Moniker: string(selfID), - Channels: []byte{0x01, 0x02}, + Channels: tmsync.NewConcurrentSlice[uint16](0x01, 0x02), } peerKey crypto.PrivKey = ed25519.GenPrivKeyFromSecret([]byte{0x84, 0xd7, 0x01, 0xbf, 0x83, 0x20, 0x1c, 0xfe}) @@ -35,6 +36,6 @@ var ( ListenAddr: "0.0.0.0:0", Network: "test", Moniker: string(peerID), - Channels: []byte{0x01, 0x02}, + Channels: tmsync.NewConcurrentSlice[uint16](0x01, 0x02), } ) diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 5c0163ab0f..e9f6d9abde 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -12,6 +12,7 @@ import ( "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/crypto" "github.com/dashpay/tenderdash/crypto/ed25519" + tmsync "github.com/dashpay/tenderdash/internal/libs/sync" "github.com/dashpay/tenderdash/internal/p2p" p2pclient "github.com/dashpay/tenderdash/internal/p2p/client" "github.com/dashpay/tenderdash/libs/log" @@ -53,9 +54,8 @@ func (opts *NetworkOptions) setDefaults() { // MakeNetwork creates a test network with the given number of nodes and // connects them to each other. -func MakeNetwork(ctx context.Context, t *testing.T, opts NetworkOptions) *Network { +func MakeNetwork(ctx context.Context, t *testing.T, opts NetworkOptions, logger log.Logger) *Network { opts.setDefaults() - logger := log.NewNopLogger() network := &Network{ Nodes: map[types.NodeID]*Node{}, logger: logger, @@ -272,6 +272,7 @@ func (n *Network) MakeNode(ctx context.Context, t *testing.T, proTxHash crypto.P ListenAddr: "0.0.0.0:0", // FIXME: We have to fake this for now. Moniker: string(nodeID), ProTxHash: proTxHash.Copy(), + Channels: tmsync.NewConcurrentSlice[uint16](), } transport := n.memoryNetwork.CreateTransport(nodeID) @@ -374,7 +375,7 @@ func (n *Node) MakePeerUpdates(ctx context.Context, t *testing.T) *p2p.PeerUpdat // MakePeerUpdatesNoRequireEmpty opens a peer update subscription, with automatic cleanup. // It does *not* check that all updates have been consumed, but will // close the update channel. -func (n *Node) MakePeerUpdatesNoRequireEmpty(ctx context.Context, t *testing.T) *p2p.PeerUpdates { +func (n *Node) MakePeerUpdatesNoRequireEmpty(ctx context.Context, _t *testing.T) *p2p.PeerUpdates { return n.PeerManager.Subscribe(ctx, "p2ptest") } diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index af49bc18eb..16804ae81a 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -25,7 +25,8 @@ func RequireEmpty(ctx context.Context, t *testing.T, channels ...p2p.Channel) { count := 0 for iter.Next(ctx) { count++ - require.Nil(t, iter.Envelope()) + e := iter.Envelope() + require.Nil(t, e, "received unexpected message %v", e.Message) } require.Zero(t, count) require.Error(t, ctx.Err()) diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index e38b7751b1..533a570fe5 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -328,7 +328,7 @@ type PeerManager struct { } // NewPeerManager creates a new peer manager. -func NewPeerManager(ctx context.Context, selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptions) (*PeerManager, error) { +func NewPeerManager(_ctx context.Context, selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptions) (*PeerManager, error) { if selfID == "" { return nil, errors.New("self ID not given") } @@ -683,7 +683,7 @@ func (m *PeerManager) DialFailed(ctx context.Context, address NodeAddress) error } // scheduleDial will dial peers after some delay -func (m *PeerManager) scheduleDial(ctx context.Context, delay time.Duration) { +func (m *PeerManager) scheduleDial(_ctx context.Context, delay time.Duration) { if delay > 0 && delay != retryNever { m.dialWaker.WakeAfter(delay) } else { @@ -980,6 +980,7 @@ func (m *PeerManager) Disconnected(ctx context.Context, peerID types.NodeID) { } m.dialWaker.Wake() + m.logger.Debug("peer disconnected", "peer", peerID, "ready", ready) } // Errored reports a peer error, causing the peer to be evicted if it's @@ -991,6 +992,7 @@ func (m *PeerManager) Disconnected(ctx context.Context, peerID types.NodeID) { // FIXME: This will cause the peer manager to immediately try to reconnect to // the peer, which is probably not always what we want. func (m *PeerManager) Errored(peerID types.NodeID, err error) { + m.logger.Error("peer errored", "peer", peerID, "error", err) m.mtx.Lock() defer m.mtx.Unlock() diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index cd66af1421..cfd8c25288 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -1,4 +1,3 @@ -//nolint:unused package pex_test import ( @@ -381,7 +380,7 @@ func setupNetwork(ctx context.Context, t *testing.T, opts testOptions) *reactorT rts := &reactorTestSuite{ logger: log.NewNopLogger().With("testCase", t.Name()), - network: p2ptest.MakeNetwork(ctx, t, networkOpts), + network: p2ptest.MakeNetwork(ctx, t, networkOpts, log.NewNopLogger()), reactors: make(map[types.NodeID]*pex.Reactor, realNodes), pexChannels: make(map[types.NodeID]p2p.Channel, opts.TotalNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index 350c38ce0b..eea4ea2142 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -99,13 +99,16 @@ func newPQScheduler( logger log.Logger, m *Metrics, lc *metricsLabelCache, - chDescs []*ChannelDescriptor, + chDescs map[ChannelID]*ChannelDescriptor, enqueueBuf, dequeueBuf, capacity uint, ) *pqScheduler { // copy each ChannelDescriptor and sort them by ascending channel priority - chDescsCopy := make([]*ChannelDescriptor, len(chDescs)) - copy(chDescsCopy, chDescs) + chDescsCopy := make([]*ChannelDescriptor, 0, len(chDescs)) + for _, chDesc := range chDescs { + chDescsCopy = append(chDescsCopy, chDesc) + } + sort.Slice(chDescsCopy, func(i, j int) bool { return chDescsCopy[i].Priority < chDescsCopy[j].Priority }) var ( diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index 3e1594d79c..7058b7e4cc 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -14,8 +14,8 @@ type testMessage = gogotypes.StringValue func TestCloseWhileDequeueFull(t *testing.T) { enqueueLength := 5 - chDescs := []*ChannelDescriptor{ - {ID: 0x01, Priority: 1}, + chDescs := map[ChannelID]*ChannelDescriptor{ + 0x01: {ID: 0x01, Priority: 1}, } pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), newMetricsLabelCache(), chDescs, uint(enqueueLength), 1, 120) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index ed6e2c0d52..34896d77eb 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net" + "reflect" "runtime" "time" @@ -151,7 +152,7 @@ type Router struct { options RouterOptions privKey crypto.PrivKey peerManager *PeerManager - chDescs []*ChannelDescriptor + chDescs map[ChannelID]*ChannelDescriptor transport Transport endpoint *Endpoint connTracker connectionTracker @@ -198,7 +199,7 @@ func NewRouter( options.MaxIncomingConnectionAttempts, options.IncomingConnectionWindow, ), - chDescs: make([]*ChannelDescriptor, 0), + chDescs: make(map[ChannelID]*ChannelDescriptor, 0), transport: transport, endpoint: endpoint, peerManager: peerManager, @@ -213,6 +214,9 @@ func NewRouter( return router, nil } +// createQueueFactory creates a queue factory function based on the queue type +// +// Caller should hold the r.channelMtx RLock. func (r *Router) createQueueFactory(ctx context.Context) (func(int) queue, error) { switch r.options.QueueType { case queueTypeFifo: @@ -223,14 +227,13 @@ func (r *Router) createQueueFactory(ctx context.Context) (func(int) queue, error if size%2 != 0 { size++ } - q := newPQScheduler(r.logger, r.metrics, r.lc, r.chDescs, uint(size)/2, uint(size)/2, defaultCapacity) q.start(ctx) return q }, nil case queueTypeSimplePriority: - return func(size int) queue { return newSimplePriorityQueue(ctx, size, r.chDescs) }, nil + return func(size int) queue { return newSimplePriorityQueue(ctx, size) }, nil default: return nil, fmt.Errorf("cannot construct queue of type %q", r.options.QueueType) @@ -256,7 +259,7 @@ func (r *Router) OpenChannel(ctx context.Context, chDesc *ChannelDescriptor) (Ch if _, ok := r.channelQueues[id]; ok { return nil, fmt.Errorf("channel %v already exists", id) } - r.chDescs = append(r.chDescs, chDesc) + r.chDescs[id] = chDesc queue := r.queueFactory(chDesc.RecvBufferCapacity) outCh := make(chan Envelope, chDesc.RecvBufferCapacity) @@ -432,7 +435,7 @@ func (r *Router) acceptPeers(ctx context.Context, transport Transport) { return case err != nil: // in this case we got an error from the net.Listener. - r.logger.Error("failed to accept connection", "transport", transport, "err", err) + r.logger.Warn("failed to accept connection", "transport", transport, "err", err) continue } @@ -496,12 +499,12 @@ func (r *Router) openConnection(ctx context.Context, conn Connection) { err = r.peerManager.Accepted(peerInfo.NodeID, SetProTxHashToPeerInfo(peerInfo.ProTxHash)) if err != nil { - r.logger.Error("failed to accept connection", + r.logger.Warn("failed to accept connection", "op", "incoming/accepted", "peer", peerInfo.NodeID, "err", err) return } - r.routePeer(ctx, peerInfo.NodeID, conn, toChannelIDs(peerInfo.Channels)) + r.routePeer(ctx, peerInfo.NodeID, conn, toChannelIDs(peerInfo.Channels.ToSlice())) } // dialPeers maintains outbound connections to peers by dialing them. @@ -589,7 +592,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { } // routePeer (also) calls connection close - go r.routePeer(ctx, address.NodeID, conn, toChannelIDs(peerInfo.Channels)) + go r.routePeer(ctx, address.NodeID, conn, toChannelIDs(peerInfo.Channels.ToSlice())) } func (r *Router) getOrMakeQueue(peerID types.NodeID, channels ChannelIDSet) queue { @@ -600,7 +603,10 @@ func (r *Router) getOrMakeQueue(peerID types.NodeID, channels ChannelIDSet) queu return peerQueue } + r.channelMtx.RLock() peerQueue := r.queueFactory(queueBufferDefault) + r.channelMtx.RUnlock() + r.peerQueues[peerID] = peerQueue r.peerChannels[peerID] = channels return peerQueue @@ -698,6 +704,11 @@ func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connec r.metrics.PeersConnected.Add(1) r.peerManager.Ready(ctx, peerID, channels) + // we use context to manage the lifecycle of the peer + // note that original ctx will be used in cleanup + ioCtx, cancel := context.WithCancel(ctx) + defer cancel() + sendQueue := r.getOrMakeQueue(peerID, channels) defer func() { r.peerMtx.Lock() @@ -705,6 +716,7 @@ func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connec delete(r.peerChannels, peerID) r.peerMtx.Unlock() + _ = conn.Close() sendQueue.close() r.peerManager.Disconnected(ctx, peerID) @@ -714,43 +726,68 @@ func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connec r.logger.Info("peer connected", "peer", peerID, "endpoint", conn) errCh := make(chan error, 2) + wg := sync.WaitGroup{} + wg.Add(2) go func() { select { - case errCh <- r.receivePeer(ctx, peerID, conn): - case <-ctx.Done(): + case errCh <- r.receivePeer(ioCtx, peerID, conn): + case <-ioCtx.Done(): } + wg.Done() }() go func() { select { - case errCh <- r.sendPeer(ctx, peerID, conn, sendQueue): - case <-ctx.Done(): + case errCh <- r.sendPeer(ioCtx, peerID, conn, sendQueue): + case <-ioCtx.Done(): } + wg.Done() }() - var err error + // wait for error from first goroutine + + var ( + err error + ctxErr error + ) + select { case err = <-errCh: - case <-ctx.Done(): + r.logger.Debug("routePeer: received error from subroutine 1", "peer", peerID, "err", err) + case <-ioCtx.Done(): + r.logger.Debug("routePeer: ctx done", "peer", peerID) + ctxErr = ioCtx.Err() } + // goroutine 1 has finished, so we can cancel the context and close everything + cancel() _ = conn.Close() sendQueue.close() - select { - case <-ctx.Done(): - case e := <-errCh: - // The first err was nil, so we update it with the second err, which may - // or may not be nil. - if err == nil { - err = e + r.logger.Trace("routePeer: closed conn and send queue, waiting for all goroutines to finish", "peer", peerID, "err", err) + wg.Wait() + r.logger.Trace("routePeer: all goroutines finished", "peer", peerID, "err", err) + + // Drain the error channel; these should typically not be interesting +FOR: + for { + select { + case e := <-errCh: + r.logger.Trace("routePeer: received error when draining errCh", "peer", peerID, "err", e) + // if we received non-context error, we should return it + if err == nil && !errors.Is(e, context.Canceled) && !errors.Is(e, context.DeadlineExceeded) { + err = e + } + default: + break FOR } } + close(errCh) - // if the context was canceled - if e := ctx.Err(); err == nil && e != nil { - err = e + // if the context was canceled, and no other error received on errCh + if err == nil { + err = ctxErr } switch err { @@ -764,6 +801,9 @@ func (r *Router) routePeer(ctx context.Context, peerID types.NodeID, conn Connec // receivePeer receives inbound messages from a peer, deserializes them and // passes them on to the appropriate channel. func (r *Router) receivePeer(ctx context.Context, peerID types.NodeID, conn Connection) error { + timeout := time.NewTimer(0) + defer timeout.Stop() + for { chID, bz, err := conn.ReceiveMessage(ctx) if err != nil { @@ -771,11 +811,14 @@ func (r *Router) receivePeer(ctx context.Context, peerID types.NodeID, conn Conn } r.channelMtx.RLock() - queue, ok := r.channelQueues[chID] + queue, queueOk := r.channelQueues[chID] + chDesc, chDescOk := r.chDescs[chID] r.channelMtx.RUnlock() - if !ok { - r.logger.Debug("dropping message for unknown channel", "peer", peerID, "channel", chID) + if !queueOk || !chDescOk { + r.logger.Debug("dropping message for unknown channel", + "peer", peerID, "channel", chID, + "queue", queueOk, "chDesc", chDescOk) continue } @@ -784,7 +827,6 @@ func (r *Router) receivePeer(ctx context.Context, peerID types.NodeID, conn Conn r.logger.Error("message decoding failed, dropping message", "peer", peerID, "err", err) continue } - start := time.Now().UTC() envelope, err := EnvelopeFromProto(protoEnvelope) if err != nil { r.logger.Error("message decoding failed, dropping message", "peer", peerID, "err", err) @@ -792,6 +834,19 @@ func (r *Router) receivePeer(ctx context.Context, peerID types.NodeID, conn Conn } envelope.From = peerID envelope.ChannelID = chID + + // stop previous timeout counter and drain the timeout channel + timeout.Stop() + select { + case <-timeout.C: + default: + } + + if chDesc.EnqueueTimeout > 0 { + timeout.Reset(chDesc.EnqueueTimeout) + } + start := time.Now().UTC() + select { case queue.enqueue() <- envelope: r.metrics.PeerReceiveBytesTotal.With( @@ -804,7 +859,18 @@ func (r *Router) receivePeer(ctx context.Context, peerID types.NodeID, conn Conn case <-queue.closed(): r.logger.Debug("channel closed, dropping message", "peer", peerID, "channel", chID) + case <-timeout.C: + r.logger.Debug("dropping message from peer due to enqueue timeout", + "peer", peerID, + "channel", chID, + "channel_name", chDesc.Name, + "timeout", chDesc.EnqueueTimeout.String(), + "type", reflect.TypeOf((envelope.Message)).Name(), + "took", time.Since(start).String(), + ) + case <-ctx.Done(): + r.logger.Debug("receivePeer: ctx is done", "peer", peerID, "channel", chID) return nil } } @@ -943,9 +1009,9 @@ func (cs ChannelIDSet) Contains(id ChannelID) bool { return ok } -func toChannelIDs(bytes []byte) ChannelIDSet { - c := make(map[ChannelID]struct{}, len(bytes)) - for _, b := range bytes { +func toChannelIDs(ids []uint16) ChannelIDSet { + c := make(map[ChannelID]struct{}, len(ids)) + for _, b := range ids { c[ChannelID(b)] = struct{}{} } return c diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 37aa4e2c76..d8cd615c87 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -3,8 +3,10 @@ package p2p_test import ( "context" "errors" + "fmt" "io" "runtime" + "strconv" "strings" "testing" "time" @@ -12,11 +14,13 @@ import ( "github.com/fortytw2/leaktest" gogotypes "github.com/gogo/protobuf/types" sync "github.com/sasha-s/go-deadlock" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" "github.com/dashpay/tenderdash/crypto" + tmsync "github.com/dashpay/tenderdash/internal/libs/sync" "github.com/dashpay/tenderdash/internal/p2p" "github.com/dashpay/tenderdash/internal/p2p/mocks" "github.com/dashpay/tenderdash/internal/p2p/p2ptest" @@ -50,7 +54,7 @@ func TestRouter_Network(t *testing.T) { t.Cleanup(leaktest.Check(t)) // Create a test network and open a channel where all peers run echoReactor. - network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 8}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 8}, log.NewNopLogger()) local := network.AnyNode() peers := network.Peers(local.NodeID) channels := network.MakeChannels(ctx, t, chDesc) @@ -106,7 +110,7 @@ func TestRouter_Channel_Basic(t *testing.T) { peerManager, err := p2p.NewPeerManager(ctx, selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - testnet := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 1}) + testnet := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 1}, log.NewNopLogger()) router, err := p2p.NewRouter( log.NewNopLogger(), @@ -173,7 +177,7 @@ func TestRouter_Channel_SendReceive(t *testing.T) { t.Cleanup(leaktest.Check(t)) // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}, log.NewNopLogger()) ids := network.NodeIDs() aID, bID, cID := ids[0], ids[1], ids[2] @@ -239,7 +243,7 @@ func TestRouter_Channel_Broadcast(t *testing.T) { defer cancel() // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 4}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 4}, log.NewNopLogger()) ids := network.NodeIDs() aID, bID, cID, dID := ids[0], ids[1], ids[2], ids[3] @@ -270,7 +274,7 @@ func TestRouter_Channel_Error(t *testing.T) { defer cancel() // Create a test network and open a channel on all nodes. - network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}) + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 3}, log.NewNopLogger()) network.Start(ctx, t) ids := network.NodeIDs() @@ -303,6 +307,7 @@ func TestRouter_AcceptPeers(t *testing.T) { ListenAddr: "0.0.0.0:0", Network: "other-network", Moniker: string(peerID), + Channels: tmsync.NewConcurrentSlice[uint16](), }, peerKey.PubKey(), false, @@ -504,6 +509,7 @@ func TestRouter_DialPeers(t *testing.T) { ListenAddr: "0.0.0.0:0", Network: "other-network", Moniker: string(peerID), + Channels: tmsync.NewConcurrentSlice[uint16](), }, peerKey.PubKey(), nil, @@ -766,7 +772,7 @@ func TestRouter_ChannelCompatability(t *testing.T) { ListenAddr: "0.0.0.0:0", Network: "test", Moniker: string(peerID), - Channels: []byte{0x03}, + Channels: tmsync.NewConcurrentSlice[uint16](0x03), } mockConnection := &mocks.Connection{} @@ -817,7 +823,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { ListenAddr: "0.0.0.0:0", Network: "test", Moniker: string(peerID), - Channels: []byte{0x02}, + Channels: tmsync.NewConcurrentSlice[uint16](0x02), } mockConnection := &mocks.Connection{} @@ -871,3 +877,99 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { router.Stop() mockTransport.AssertExpectations(t) } + +// Given a channel with non-zero enqueue timeout, +// when I send more messages than recv channel capacity, +// and I wait longer than enqueue timeout, +// then I should receive only the messages that fit into the recv channel capacity. +func TestRouter_Channel_Enqueue_Timeout(t *testing.T) { + type testCase struct { + sendCount int + expectedRecvCount int + delay time.Duration + } + + chDesc := &p2p.ChannelDescriptor{ + ID: chID, + Priority: 5, + SendQueueCapacity: 100, + RecvMessageCapacity: 10240, //10kB + EnqueueTimeout: 10 * time.Millisecond, // FIXME: Check if this doesn't affect other tests + RecvBufferCapacity: 10, + } + const processingTime = 10 * time.Millisecond + + testCases := []testCase{ + {sendCount: chDesc.RecvBufferCapacity, expectedRecvCount: chDesc.RecvBufferCapacity, delay: 0}, + {sendCount: chDesc.RecvBufferCapacity * 2, expectedRecvCount: chDesc.RecvBufferCapacity * 2, delay: 0}, + {sendCount: 1, expectedRecvCount: 1, delay: chDesc.EnqueueTimeout + 10*time.Millisecond}, + {sendCount: chDesc.RecvBufferCapacity - 1, expectedRecvCount: chDesc.RecvBufferCapacity - 1, delay: chDesc.EnqueueTimeout + processingTime}, + {sendCount: chDesc.RecvBufferCapacity, expectedRecvCount: chDesc.RecvBufferCapacity, delay: chDesc.EnqueueTimeout + processingTime}, + {sendCount: chDesc.RecvBufferCapacity + 1, expectedRecvCount: chDesc.RecvBufferCapacity, delay: 2*chDesc.EnqueueTimeout + processingTime}, + {sendCount: chDesc.RecvBufferCapacity + 5, expectedRecvCount: chDesc.RecvBufferCapacity, delay: 6*chDesc.EnqueueTimeout + processingTime}, + } + + // how many more messages we send than the recv channel capacity + + logger := log.NewTestingLoggerWithLevel(t, log.LogLevelDebug).WithTimestamp() + + t.Cleanup(leaktest.Check(t)) + + // ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + // defer cancel() + + for _, tc := range testCases { + tc := tc + t.Run(fmt.Sprintf("send=%d,recv=%d,delay=%s", tc.sendCount, tc.expectedRecvCount, tc.delay), func(t *testing.T) { + // timeout that will expire if we don't receive some of the expected messages + ctxTimeout := tc.delay + 200*time.Millisecond + ctx, cancel := context.WithTimeout(context.Background(), ctxTimeout) + defer cancel() + // Create a test network and open a channel on all nodes. + network := p2ptest.MakeNetwork(ctx, t, p2ptest.NetworkOptions{NumNodes: 2}, logger) + + ids := network.NodeIDs() + aID, bID := ids[0], ids[1] + channels := network.MakeChannels(ctx, t, chDesc) + a, b := channels[aID], channels[bID] + + network.Start(ctx, t) + + wg := sync.WaitGroup{} + + // Start the test - send messages in a goroutine to not block on full chan + wg.Add(1) + go func() { + for i := 0; i < tc.sendCount; i++ { + sentEnvelope := p2p.Envelope{To: bID, Message: &p2ptest.Message{Value: strconv.Itoa(i)}} + p2ptest.RequireSend(ctx, t, a, sentEnvelope) + logger.Trace("Sent message", "id", i) + } + + wg.Done() + }() + + // sleep to ensure the timeout expired and at least some msgs will be dropped + time.Sleep(tc.delay) + count := 0 + + // check if we received all the messages we expected + iter := b.Receive(ctx) + for count < tc.expectedRecvCount && iter.Next(ctx) { + // this will hang if we don't receive the expected number of messages + e := iter.Envelope() + logger.Trace("received message", "message", e.Message) + count++ + } + logger.Info("received %d messages", count) + + wg.Wait() + + // this will error if we receive too many messages + p2ptest.RequireEmpty(ctx, t, a, b) + + // this will error if we don't receive the expected number of messages + assert.NoError(t, ctx.Err(), "timed out, received %d msgs, expected %d", count, tc.expectedRecvCount) + }) + } +} diff --git a/internal/p2p/rqueue.go b/internal/p2p/rqueue.go index 8d6406864a..a46eefe50f 100644 --- a/internal/p2p/rqueue.go +++ b/internal/p2p/rqueue.go @@ -19,7 +19,7 @@ type simpleQueue struct { chDescs []*ChannelDescriptor } -func newSimplePriorityQueue(ctx context.Context, size int, chDescs []*ChannelDescriptor) *simpleQueue { +func newSimplePriorityQueue(ctx context.Context, size int) *simpleQueue { if size%2 != 0 { size++ } diff --git a/internal/p2p/rqueue_test.go b/internal/p2p/rqueue_test.go index 43c4066e57..0ea137ee89 100644 --- a/internal/p2p/rqueue_test.go +++ b/internal/p2p/rqueue_test.go @@ -13,7 +13,7 @@ func TestSimpleQueue(t *testing.T) { // set up a small queue with very small buffers so we can // watch it shed load, then send a bunch of messages to the // queue, most of which we'll watch it drop. - sq := newSimplePriorityQueue(ctx, 1, nil) + sq := newSimplePriorityQueue(ctx, 1) for i := 0; i < 100; i++ { sq.enqueue() <- Envelope{From: "merlin"} } diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index 7d4b3d046f..3757f8f7d4 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -147,11 +147,15 @@ func (m *MConnTransport) Accept(ctx context.Context) (Connection, error) { if err != nil { select { case errCh <- err: + case <-m.doneCh: + m.logger.Trace("MConnTransport Accept: connection closed - doneCh") case <-ctx.Done(): } } select { case conCh <- tcpConn: + case <-m.doneCh: + m.logger.Trace("MConnTransport Accept: connection closed - doneCh") case <-ctx.Done(): } }() @@ -187,12 +191,10 @@ func (m *MConnTransport) Dial(ctx context.Context, endpoint *Endpoint) (Connecti tcpConn, err := dialer.DialContext(ctx, "tcp", net.JoinHostPort( endpoint.IP.String(), strconv.Itoa(int(endpoint.Port)))) if err != nil { - select { - case <-ctx.Done(): - return nil, ctx.Err() - default: - return nil, err + if e := ctx.Err(); e != nil { + return nil, e } + return nil, err } return newMConnConnection(m.logger, tcpConn, m.mConnConfig, m.channelDescs), nil @@ -313,6 +315,7 @@ func (c *mConnConnection) Handshake( select { case errCh <- err: + case <-c.doneCh: case <-handshakeCtx.Done(): } @@ -322,7 +325,8 @@ func (c *mConnConnection) Handshake( case <-handshakeCtx.Done(): _ = c.Close() return types.NodeInfo{}, nil, handshakeCtx.Err() - + case <-c.doneCh: + return types.NodeInfo{}, nil, io.EOF case err := <-errCh: if err != nil { return types.NodeInfo{}, nil, err @@ -365,6 +369,7 @@ func (c *mConnConnection) handshake( _, err := protoio.NewDelimitedWriter(secretConn).WriteMsg(nodeInfo.ToProto()) select { case errCh <- err: + case <-c.doneCh: case <-ctx.Done(): } @@ -375,6 +380,7 @@ func (c *mConnConnection) handshake( _, err := protoio.NewDelimitedReader(secretConn, types.MaxNodeInfoSize()).ReadMsg(&pbPeerInfo) select { case errCh <- err: + case <-c.doneCh: case <-ctx.Done(): } }() @@ -410,6 +416,7 @@ func (c *mConnConnection) handshake( func (c *mConnConnection) onReceive(ctx context.Context, chID ChannelID, payload []byte) { select { case c.receiveCh <- mConnMessage{channelID: chID, payload: payload}: + case <-c.doneCh: case <-ctx.Done(): } } @@ -426,6 +433,7 @@ func (c *mConnConnection) onError(ctx context.Context, e interface{}) { _ = c.Close() select { case c.errorCh <- err: + case <-c.doneCh: case <-ctx.Done(): } } @@ -445,6 +453,8 @@ func (c *mConnConnection) SendMessage(ctx context.Context, chID ChannelID, msg [ return err case <-ctx.Done(): return io.EOF + case <-c.doneCh: + return io.EOF default: if ok := c.mconn.Send(chID, msg); !ok { return errors.New("sending message timed out") @@ -458,10 +468,13 @@ func (c *mConnConnection) SendMessage(ctx context.Context, chID ChannelID, msg [ func (c *mConnConnection) ReceiveMessage(ctx context.Context) (ChannelID, []byte, error) { select { case err := <-c.errorCh: + c.logger.Debug("ReceiveMessage: error occurred", "err", err) return 0, nil, err case <-c.doneCh: + c.logger.Trace("ReceiveMessage: connection closed - doneCh") return 0, nil, io.EOF case <-ctx.Done(): + c.logger.Trace("ReceiveMessage: connection closed - ctx.Done()") return 0, nil, io.EOF case msg := <-c.receiveCh: return msg.channelID, msg.payload, nil @@ -496,6 +509,7 @@ func (c *mConnConnection) RemoteEndpoint() Endpoint { func (c *mConnConnection) Close() error { var err error c.closeOnce.Do(func() { + c.logger.Trace("mConnConnection.Close(): closing doneCh") defer close(c.doneCh) if c.mconn != nil && c.mconn.IsRunning() { diff --git a/internal/p2p/transport_test.go b/internal/p2p/transport_test.go index d22dbb9e28..1d87c5179c 100644 --- a/internal/p2p/transport_test.go +++ b/internal/p2p/transport_test.go @@ -12,8 +12,8 @@ import ( "github.com/stretchr/testify/require" "github.com/dashpay/tenderdash/crypto/ed25519" + tmsync "github.com/dashpay/tenderdash/internal/libs/sync" "github.com/dashpay/tenderdash/internal/p2p" - "github.com/dashpay/tenderdash/libs/bytes" "github.com/dashpay/tenderdash/types" ) @@ -283,7 +283,7 @@ func TestConnection_Handshake(t *testing.T) { ListenAddr: "listenaddr", Network: "network", Version: "1.2.3", - Channels: bytes.HexBytes([]byte{0xf0, 0x0f}), + Channels: tmsync.NewConcurrentSlice[uint16](0xf0, 0x0f), Moniker: "moniker", Other: types.NodeInfoOther{ TxIndex: "txindex", @@ -291,7 +291,10 @@ func TestConnection_Handshake(t *testing.T) { }, } bKey := ed25519.GenPrivKey() - bInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(bKey.PubKey())} + bInfo := types.NodeInfo{ + NodeID: types.NodeIDFromPubKey(bKey.PubKey()), + Channels: tmsync.NewConcurrentSlice[uint16](), + } errCh := make(chan error, 1) go func() { @@ -641,13 +644,13 @@ func dialAcceptHandshake(ctx context.Context, t *testing.T, a, b p2p.Transport) errCh := make(chan error, 1) go func() { privKey := ed25519.GenPrivKey() - nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())} + nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey()), Channels: tmsync.NewConcurrentSlice[uint16]()} _, _, err := ba.Handshake(ctx, 0, nodeInfo, privKey) errCh <- err }() privKey := ed25519.GenPrivKey() - nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())} + nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey()), Channels: tmsync.NewConcurrentSlice[uint16]()} _, _, err := ab.Handshake(ctx, 0, nodeInfo, privKey) require.NoError(t, err) diff --git a/internal/proxy/client.go b/internal/proxy/client.go index 4ed8395759..9421d6b8ea 100644 --- a/internal/proxy/client.go +++ b/internal/proxy/client.go @@ -4,6 +4,7 @@ import ( "context" "io" "os" + "strconv" "syscall" "time" @@ -12,7 +13,9 @@ import ( abciclient "github.com/dashpay/tenderdash/abci/client" "github.com/dashpay/tenderdash/abci/example/kvstore" "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/libs/log" + tmos "github.com/dashpay/tenderdash/libs/os" "github.com/dashpay/tenderdash/libs/service" e2e "github.com/dashpay/tenderdash/test/e2e/app" ) @@ -23,7 +26,9 @@ import ( // // The Closer is a noop except for persistent_kvstore applications, // which will clean up the store. -func ClientFactory(logger log.Logger, addr, transport, dbDir string) (abciclient.Client, io.Closer, error) { +func ClientFactory(logger log.Logger, cfg config.AbciConfig, dbDir string) (abciclient.Client, io.Closer, error) { + addr := cfg.Address + switch addr { case "kvstore": app, err := kvstore.NewMemoryApp( @@ -32,7 +37,7 @@ func ClientFactory(logger log.Logger, addr, transport, dbDir string) (abciclient if err != nil { return nil, nil, err } - return abciclient.NewLocalClient(logger, app), noopCloser{}, nil + return abciclient.NewLocalClient(logger, app), tmos.NoopCloser{}, nil case "persistent_kvstore": app, err := kvstore.NewPersistentApp( kvstore.DefaultConfig(dbDir), @@ -45,26 +50,22 @@ func ClientFactory(logger log.Logger, addr, transport, dbDir string) (abciclient case "e2e": app, err := e2e.NewApplication(kvstore.DefaultConfig(dbDir)) if err != nil { - return nil, noopCloser{}, err + return nil, tmos.NoopCloser{}, err } - return abciclient.NewLocalClient(logger, app), noopCloser{}, nil + return abciclient.NewLocalClient(logger, app), tmos.NoopCloser{}, nil case "noop": - return abciclient.NewLocalClient(logger, types.NewBaseApplication()), noopCloser{}, nil + return abciclient.NewLocalClient(logger, types.NewBaseApplication()), tmos.NoopCloser{}, nil default: const mustConnect = false // loop retrying - client, err := abciclient.NewClient(logger, addr, transport, mustConnect) + client, err := abciclient.NewClient(logger, cfg, mustConnect) if err != nil { - return nil, noopCloser{}, err + return nil, tmos.NoopCloser{}, err } - return client, noopCloser{}, nil + return client, tmos.NoopCloser{}, nil } } -type noopCloser struct{} - -func (noopCloser) Close() error { return nil } - // proxyClient provides the application connection. type proxyClient struct { service.BaseService @@ -136,78 +137,78 @@ func kill() error { return p.Signal(syscall.SIGABRT) } -func (app *proxyClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))() +func (app *proxyClient) InitChain(ctx context.Context, req *types.RequestInitChain) (r *types.ResponseInitChain, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync", "success"))(&err) return app.client.InitChain(ctx, req) } -func (app *proxyClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "prepare_proposal", "type", "sync"))() +func (app *proxyClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (r *types.ResponsePrepareProposal, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "prepare_proposal", "type", "sync"))(&err) return app.client.PrepareProposal(ctx, req) } -func (app *proxyClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "process_proposal", "type", "sync"))() +func (app *proxyClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (r *types.ResponseProcessProposal, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "process_proposal", "type", "sync"))(&err) return app.client.ProcessProposal(ctx, req) } -func (app *proxyClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "extend_vote", "type", "sync"))() +func (app *proxyClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (r *types.ResponseExtendVote, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "extend_vote", "type", "sync"))(&err) return app.client.ExtendVote(ctx, req) } -func (app *proxyClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "verify_vote_extension", "type", "sync"))() +func (app *proxyClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (r *types.ResponseVerifyVoteExtension, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "verify_vote_extension", "type", "sync"))(&err) return app.client.VerifyVoteExtension(ctx, req) } -func (app *proxyClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "finalize_block", "type", "sync"))() +func (app *proxyClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (r *types.ResponseFinalizeBlock, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "finalize_block", "type", "sync"))(&err) return app.client.FinalizeBlock(ctx, req) } -func (app *proxyClient) Flush(ctx context.Context) error { - defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))() +func (app *proxyClient) Flush(ctx context.Context) (err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))(&err) return app.client.Flush(ctx) } -func (app *proxyClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))() +func (app *proxyClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (r *types.ResponseCheckTx, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))(&err) return app.client.CheckTx(ctx, req) } -func (app *proxyClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))() +func (app *proxyClient) Echo(ctx context.Context, msg string) (r *types.ResponseEcho, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))(&err) return app.client.Echo(ctx, msg) } -func (app *proxyClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))() +func (app *proxyClient) Info(ctx context.Context, req *types.RequestInfo) (r *types.ResponseInfo, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))(&err) return app.client.Info(ctx, req) } -func (app *proxyClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))() +func (app *proxyClient) Query(ctx context.Context, req *types.RequestQuery) (r *types.ResponseQuery, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))(&err) return app.client.Query(ctx, req) } -func (app *proxyClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))() +func (app *proxyClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (r *types.ResponseListSnapshots, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))(&err) return app.client.ListSnapshots(ctx, req) } -func (app *proxyClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))() +func (app *proxyClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (r *types.ResponseOfferSnapshot, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))(&err) return app.client.OfferSnapshot(ctx, req) } -func (app *proxyClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))() +func (app *proxyClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (r *types.ResponseLoadSnapshotChunk, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))(&err) return app.client.LoadSnapshotChunk(ctx, req) } -func (app *proxyClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { - defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))() +func (app *proxyClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (r *types.ResponseApplySnapshotChunk, err error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))(&err) return app.client.ApplySnapshotChunk(ctx, req) } @@ -215,7 +216,11 @@ func (app *proxyClient) ApplySnapshotChunk(ctx context.Context, req *types.Reque // The observation added to m is the number of seconds ellapsed since addTimeSample // was initially called. addTimeSample is meant to be called in a defer to calculate // the amount of time a function takes to complete. -func addTimeSample(m metrics.Histogram) func() { +func addTimeSample(m metrics.Histogram) func(*error) { start := time.Now() - return func() { m.Observe(time.Since(start).Seconds()) } + + // we take err address to simplify usage in defer() + return func(err *error) { + m.With("success", strconv.FormatBool(*err == nil)).Observe(time.Since(start).Seconds()) + } } diff --git a/internal/proxy/client_test.go b/internal/proxy/client_test.go index 700ee47a65..8821ef7ea5 100644 --- a/internal/proxy/client_test.go +++ b/internal/proxy/client_test.go @@ -7,20 +7,23 @@ import ( "os" "os/signal" "strings" + "sync" "syscall" "testing" "time" + "github.com/go-kit/kit/metrics" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "gotest.tools/assert" - abciclient "github.com/dashpay/tenderdash/abci/client" abcimocks "github.com/dashpay/tenderdash/abci/client/mocks" "github.com/dashpay/tenderdash/abci/example/kvstore" "github.com/dashpay/tenderdash/abci/server" "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/abci/types/mocks" + "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/libs/log" tmrand "github.com/dashpay/tenderdash/libs/rand" ) @@ -55,12 +58,13 @@ func (app *appConnTest) Info(ctx context.Context, req *types.RequestInfo) (*type //---------------------------------------- -var SOCKET = "socket" +const SOCKET = "socket" func TestEcho(t *testing.T) { sockPath := fmt.Sprintf("unix://%s/echo_%v.sock", t.TempDir(), tmrand.Str(6)) logger := log.NewNopLogger() - client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) + cfg := config.AbciConfig{Address: sockPath, Transport: SOCKET} + client, err := abciclient.NewClient(logger, cfg, true) if err != nil { t.Fatal(err) } @@ -104,7 +108,8 @@ func BenchmarkEcho(b *testing.B) { b.StopTimer() // Initialize sockPath := fmt.Sprintf("unix://%s/echo_%v.sock", b.TempDir(), tmrand.Str(6)) logger := log.NewNopLogger() - client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) + cfg := config.AbciConfig{Address: sockPath, Transport: SOCKET} + client, err := abciclient.NewClient(logger, cfg, true) if err != nil { b.Fatal(err) } @@ -155,7 +160,8 @@ func TestInfo(t *testing.T) { sockPath := fmt.Sprintf("unix://%s/echo_%v.sock", t.TempDir(), tmrand.Str(6)) logger := log.NewNopLogger() - client, err := abciclient.NewClient(logger, sockPath, SOCKET, true) + cfg := config.AbciConfig{Address: sockPath, Transport: SOCKET} + client, err := abciclient.NewClient(logger, cfg, true) require.NoError(t, err) // Start server @@ -239,3 +245,102 @@ func TestAppConns_Failure(t *testing.T) { t.Fatal("expected process to receive SIGTERM signal") } } +func TestFailureMetrics(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewTestingLogger(t) + mtr := mockMetrics() + hst := mtr.MethodTiming.(*mockMetric) + + app := mocks.NewApplication(t) + app.On("CheckTx", mock.Anything, mock.Anything).Return(&types.ResponseCheckTx{}, errors.New("some error")).Once() + app.On("CheckTx", mock.Anything, mock.Anything).Return(&types.ResponseCheckTx{}, nil).Times(2) + app.On("Info", mock.Anything, mock.Anything).Return(&types.ResponseInfo{}, nil) + + // promtest.ToFloat64(hst) + cli := abciclient.NewLocalClient(logger, app) + + proxy := New(cli, log.NewNopLogger(), mtr) + + var err error + for i := 0; i < 5; i++ { + _, err = proxy.Info(ctx, &types.RequestInfo{}) + assert.NoError(t, err) + } + + for i := 0; i < 3; i++ { + _, err = proxy.CheckTx(ctx, &types.RequestCheckTx{}) + if i == 0 { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } + + cancel() // this should stop all clients + proxy.Wait() + assert.Equal(t, 5, hst.count["method=info,type=sync,success=true"]) + assert.Equal(t, 1, hst.count["method=check_tx,type=sync,success=false"]) + assert.Equal(t, 2, hst.count["method=check_tx,type=sync,success=true"]) +} + +func mockMetrics() *Metrics { + return &Metrics{ + MethodTiming: &mockMetric{ + labels: []string{}, + count: make(map[string]int), + mtx: &sync.Mutex{}, + }, + } +} + +type mockMetric struct { + labels []string + /// count maps concatenated labels to the count of observations. + count map[string]int + mtx *sync.Mutex +} + +var _ = metrics.Histogram(&mockMetric{}) + +func (m *mockMetric) With(labelValues ...string) metrics.Histogram { + m.mtx.Lock() + defer m.mtx.Unlock() + + return &mockMetric{ + labels: append(m.labels, labelValues...), + count: m.count, + mtx: m.mtx, // pointer, as we use the same m.count + } +} + +func (m *mockMetric) Observe(_value float64) { + m.mtx.Lock() + defer m.mtx.Unlock() + + labels := "" + for i, label := range m.labels { + labels += label + if i < len(m.labels)-1 { + if i%2 == 0 { + labels += "=" + } else { + labels += "," + } + } + } + + m.count[labels]++ +} + +func (m *mockMetric) String() (s string) { + m.mtx.Lock() + defer m.mtx.Unlock() + + for labels, total := range m.count { + s += fmt.Sprintf("%s: %d\n", labels, total) + } + + return s +} diff --git a/internal/proxy/metrics.gen.go b/internal/proxy/metrics.gen.go index ea483f83db..d786740c63 100644 --- a/internal/proxy/metrics.gen.go +++ b/internal/proxy/metrics.gen.go @@ -21,7 +21,7 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Help: "Timing for each ABCI method.", Buckets: []float64{.0001, .0004, .002, .009, .02, .1, .65, 2, 6, 25}, - }, append(labels, "method", "type")).With(labelsAndValues...), + }, append(labels, "method", "type", "success")).With(labelsAndValues...), } } diff --git a/internal/proxy/metrics.go b/internal/proxy/metrics.go index b95687a03b..043b191eae 100644 --- a/internal/proxy/metrics.go +++ b/internal/proxy/metrics.go @@ -15,5 +15,5 @@ const ( // Metrics contains the prometheus metrics exposed by the proxy package. type Metrics struct { // Timing for each ABCI method. - MethodTiming metrics.Histogram `metrics_bucketsizes:".0001,.0004,.002,.009,.02,.1,.65,2,6,25" metrics_labels:"method, type"` + MethodTiming metrics.Histogram `metrics_bucketsizes:".0001,.0004,.002,.009,.02,.1,.65,2,6,25" metrics_labels:"method, type, success"` } diff --git a/internal/pubsub/pubsub.go b/internal/pubsub/pubsub.go index 063aba5681..14dc70e694 100644 --- a/internal/pubsub/pubsub.go +++ b/internal/pubsub/pubsub.go @@ -158,7 +158,7 @@ func (s *Server) BufferCapacity() int { return cap(s.queue) } // being forwarded to any subscriber. If no queries are specified, all // messages will be observed. An error is reported if an observer is already // registered. -func (s *Server) Observe(ctx context.Context, observe func(Message) error, queries ...*query.Query) error { +func (s *Server) Observe(_ctx context.Context, observe func(Message) error, queries ...*query.Query) error { s.subs.Lock() defer s.subs.Unlock() if observe == nil { @@ -194,7 +194,7 @@ func (s *Server) Observe(ctx context.Context, observe func(Message) error, queri // SubscribeWithArgs creates a subscription for the given arguments. It is an // error if the query is nil, a subscription already exists for the specified // client ID and query, or if the capacity arguments are invalid. -func (s *Server) SubscribeWithArgs(ctx context.Context, args SubscribeArgs) (*Subscription, error) { +func (s *Server) SubscribeWithArgs(_ctx context.Context, args SubscribeArgs) (*Subscription, error) { s.subs.Lock() defer s.subs.Unlock() @@ -222,7 +222,7 @@ func (s *Server) SubscribeWithArgs(ctx context.Context, args SubscribeArgs) (*Su // Unsubscribe removes the subscription for the given client and/or query. It // returns ErrSubscriptionNotFound if no such subscription exists. -func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { +func (s *Server) Unsubscribe(_ctx context.Context, args UnsubscribeArgs) error { if err := args.Validate(); err != nil { return err } @@ -257,7 +257,7 @@ func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { // UnsubscribeAll removes all subscriptions for the given client ID. // It returns ErrSubscriptionNotFound if no subscriptions exist for that client. -func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { +func (s *Server) UnsubscribeAll(_ctx context.Context, clientID string) error { s.subs.Lock() defer s.subs.Unlock() diff --git a/internal/rpc/core/blocks.go b/internal/rpc/core/blocks.go index 85e6f79903..434410c77e 100644 --- a/internal/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -23,7 +23,7 @@ import ( // order (highest first). // // More: https://docs.tendermint.com/master/rpc/#/Info/blockchain -func (env *Environment) BlockchainInfo(ctx context.Context, req *coretypes.RequestBlockchainInfo) (*coretypes.ResultBlockchainInfo, error) { +func (env *Environment) BlockchainInfo(_ctx context.Context, req *coretypes.RequestBlockchainInfo) (*coretypes.ResultBlockchainInfo, error) { const limit = 20 minHeight, maxHeight, err := filterMinMax( env.BlockStore.Base(), @@ -88,7 +88,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // Block gets block at a given height. // If no height is provided, it will fetch the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/block -func (env *Environment) Block(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultBlock, error) { +func (env *Environment) Block(_ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultBlock, error) { height, err := env.getHeight(env.BlockStore.Height(), (*int64)(req.Height)) if err != nil { return nil, err @@ -105,7 +105,7 @@ func (env *Environment) Block(ctx context.Context, req *coretypes.RequestBlockIn // BlockByHash gets block by hash. // More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash -func (env *Environment) BlockByHash(ctx context.Context, req *coretypes.RequestBlockByHash) (*coretypes.ResultBlock, error) { +func (env *Environment) BlockByHash(_ctx context.Context, req *coretypes.RequestBlockByHash) (*coretypes.ResultBlock, error) { block := env.BlockStore.LoadBlockByHash(req.Hash) if block == nil { return &coretypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil @@ -118,7 +118,7 @@ func (env *Environment) BlockByHash(ctx context.Context, req *coretypes.RequestB // Header gets block header at a given height. // If no height is provided, it will fetch the latest header. // More: https://docs.tendermint.com/master/rpc/#/Info/header -func (env *Environment) Header(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultHeader, error) { +func (env *Environment) Header(_ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultHeader, error) { height, err := env.getHeight(env.BlockStore.Height(), (*int64)(req.Height)) if err != nil { return nil, err @@ -134,7 +134,7 @@ func (env *Environment) Header(ctx context.Context, req *coretypes.RequestBlockI // HeaderByHash gets header by hash. // More: https://docs.tendermint.com/master/rpc/#/Info/header_by_hash -func (env *Environment) HeaderByHash(ctx context.Context, req *coretypes.RequestBlockByHash) (*coretypes.ResultHeader, error) { +func (env *Environment) HeaderByHash(_ctx context.Context, req *coretypes.RequestBlockByHash) (*coretypes.ResultHeader, error) { blockMeta := env.BlockStore.LoadBlockMetaByHash(req.Hash) if blockMeta == nil { return &coretypes.ResultHeader{}, nil @@ -146,7 +146,7 @@ func (env *Environment) HeaderByHash(ctx context.Context, req *coretypes.Request // Commit gets block commit at a given height. // If no height is provided, it will fetch the commit for the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/commit -func (env *Environment) Commit(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultCommit, error) { +func (env *Environment) Commit(_ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultCommit, error) { height, err := env.getHeight(env.BlockStore.Height(), (*int64)(req.Height)) if err != nil { return nil, err @@ -182,7 +182,7 @@ func (env *Environment) Commit(ctx context.Context, req *coretypes.RequestBlockI // // Results are for the height of the block containing the txs. // More: https://docs.tendermint.com/master/rpc/#/Info/block_results -func (env *Environment) BlockResults(ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultBlockResults, error) { +func (env *Environment) BlockResults(_ctx context.Context, req *coretypes.RequestBlockInfo) (*coretypes.ResultBlockResults, error) { height, err := env.getHeight(env.BlockStore.Height(), (*int64)(req.Height)) if err != nil { return nil, err @@ -202,7 +202,7 @@ func (env *Environment) BlockResults(ctx context.Context, req *coretypes.Request Height: height, TxsResults: results.ProcessProposal.TxResults, TotalGasUsed: totalGasUsed, - FinalizeBlockEvents: results.FinalizeBlock.Events, + FinalizeBlockEvents: results.ProcessProposal.Events, ValidatorSetUpdate: results.ProcessProposal.ValidatorSetUpdate, ConsensusParamUpdates: consensusParamsPtrFromProtoPtr(results.ProcessProposal.ConsensusParamUpdates), }, nil diff --git a/internal/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go index 3138210e28..2b761c3bf3 100644 --- a/internal/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -71,7 +71,6 @@ func TestBlockchainInfo(t *testing.T) { func TestBlockResults(t *testing.T) { results := state.ABCIResponses{ - FinalizeBlock: &abci.ResponseFinalizeBlock{}, ProcessProposal: &abci.ResponseProcessProposal{ TxResults: []*abci.ExecTxResult{ {Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10}, @@ -102,7 +101,7 @@ func TestBlockResults(t *testing.T) { Height: 100, TxsResults: results.ProcessProposal.TxResults, TotalGasUsed: 15, - FinalizeBlockEvents: results.FinalizeBlock.Events, + FinalizeBlockEvents: results.ProcessProposal.Events, ValidatorSetUpdate: results.ProcessProposal.ValidatorSetUpdate, ConsensusParamUpdates: consensusParamsPtrFromProtoPtr(results.ProcessProposal.ConsensusParamUpdates), }}, diff --git a/internal/rpc/core/consensus.go b/internal/rpc/core/consensus.go index ecacb6f1c0..ff356e7d8d 100644 --- a/internal/rpc/core/consensus.go +++ b/internal/rpc/core/consensus.go @@ -15,7 +15,7 @@ import ( // for the validators in the set as used in computing their Merkle root. // // More: https://docs.tendermint.com/master/rpc/#/Info/validators -func (env *Environment) Validators(ctx context.Context, req *coretypes.RequestValidators) (*coretypes.ResultValidators, error) { +func (env *Environment) Validators(_ctx context.Context, req *coretypes.RequestValidators) (*coretypes.ResultValidators, error) { // The latest validator that we know is the NextValidator of the last block. height, err := env.getHeight(env.latestUncommittedHeight(), (*int64)(req.Height)) if err != nil { @@ -55,7 +55,7 @@ func (env *Environment) Validators(ctx context.Context, req *coretypes.RequestVa // DumpConsensusState dumps consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state -func (env *Environment) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { +func (env *Environment) DumpConsensusState(_ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { // Get Peer consensus states. var peerStates []coretypes.PeerStateInfo @@ -97,7 +97,7 @@ func (env *Environment) DumpConsensusState(ctx context.Context) (*coretypes.Resu // ConsensusState returns a concise summary of the consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state -func (env *Environment) GetConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { +func (env *Environment) GetConsensusState(_ctx context.Context) (*coretypes.ResultConsensusState, error) { // Get self round state. bz, err := env.ConsensusState.GetRoundStateSimpleJSON() return &coretypes.ResultConsensusState{RoundState: bz}, err @@ -106,7 +106,7 @@ func (env *Environment) GetConsensusState(ctx context.Context) (*coretypes.Resul // ConsensusParams gets the consensus parameters at the given block height. // If no height is provided, it will fetch the latest consensus params. // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params -func (env *Environment) ConsensusParams(ctx context.Context, req *coretypes.RequestConsensusParams) (*coretypes.ResultConsensusParams, error) { +func (env *Environment) ConsensusParams(_ctx context.Context, req *coretypes.RequestConsensusParams) (*coretypes.ResultConsensusParams, error) { // The latest consensus params that we know is the consensus params after // the last block. height, err := env.getHeight(env.latestUncommittedHeight(), (*int64)(req.Height)) diff --git a/internal/rpc/core/dev.go b/internal/rpc/core/dev.go index 8d6b099ec7..8a8669bb98 100644 --- a/internal/rpc/core/dev.go +++ b/internal/rpc/core/dev.go @@ -7,7 +7,7 @@ import ( ) // UnsafeFlushMempool removes all transactions from the mempool. -func (env *Environment) UnsafeFlushMempool(ctx context.Context) (*coretypes.ResultUnsafeFlushMempool, error) { +func (env *Environment) UnsafeFlushMempool(_ctx context.Context) (*coretypes.ResultUnsafeFlushMempool, error) { env.Mempool.Flush() return &coretypes.ResultUnsafeFlushMempool{}, nil } diff --git a/internal/rpc/core/env.go b/internal/rpc/core/env.go index fd16d667d4..77b89f5170 100644 --- a/internal/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -251,7 +251,7 @@ func (env *Environment) StartService(ctx context.Context, conf *config.Config) ( } go func() { // N.B. Use background for unsubscribe, ctx is already terminated. - defer env.EventBus.UnsubscribeAll(context.Background(), subscriberID) // nolint:errcheck + defer env.EventBus.UnsubscribeAll(context.Background(), subscriberID) //nolint:errcheck for { msg, err := sub.Next(ctx) if err != nil { diff --git a/internal/rpc/core/health.go b/internal/rpc/core/health.go index 438f994859..596df180c0 100644 --- a/internal/rpc/core/health.go +++ b/internal/rpc/core/health.go @@ -9,6 +9,6 @@ import ( // Health gets node health. Returns empty result (200 OK) on success, no // response - in case of an error. // More: https://docs.tendermint.com/master/rpc/#/Info/health -func (env *Environment) Health(ctx context.Context) (*coretypes.ResultHealth, error) { +func (env *Environment) Health(_ctx context.Context) (*coretypes.ResultHealth, error) { return &coretypes.ResultHealth{}, nil } diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go index adfd86176f..464e15b3ef 100644 --- a/internal/rpc/core/mempool.go +++ b/internal/rpc/core/mempool.go @@ -22,8 +22,25 @@ import ( // More: // https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async // Deprecated and should be removed in 0.37 -func (env *Environment) BroadcastTxAsync(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { - go func() { _ = env.Mempool.CheckTx(ctx, req.Tx, nil, mempool.TxInfo{}) }() +func (env *Environment) BroadcastTxAsync(_ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { + go func() { + var ( + ctx context.Context + cancel context.CancelFunc + ) + // We need to create a new context here, because the original context + // may be canceled after parent function returns. + if env.Config.TimeoutBroadcastTx > 0 { + ctx, cancel = context.WithTimeout(context.Background(), env.Config.TimeoutBroadcastTx) + } else { + ctx, cancel = context.WithCancel(context.Background()) + } + defer cancel() + + if res, err := env.BroadcastTx(ctx, req); err != nil || res.Code != abci.CodeTypeOK { + env.Logger.Error("error on broadcastTxAsync", "err", err, "result", res, "tx", req.Tx.Hash()) + } + }() return &coretypes.ResultBroadcastTx{Hash: req.Tx.Hash()}, nil } @@ -37,6 +54,15 @@ func (env *Environment) BroadcastTxSync(ctx context.Context, req *coretypes.Requ // DeliverTx result. // More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync func (env *Environment) BroadcastTx(ctx context.Context, req *coretypes.RequestBroadcastTx) (*coretypes.ResultBroadcastTx, error) { + var cancel context.CancelFunc + + if env.Config.TimeoutBroadcastTx > 0 { + ctx, cancel = context.WithTimeout(ctx, env.Config.TimeoutBroadcastTx) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + resCh := make(chan *abci.ResponseCheckTx, 1) err := env.Mempool.CheckTx( ctx, @@ -128,7 +154,7 @@ func (env *Environment) BroadcastTxCommit(ctx context.Context, req *coretypes.Re Prove: false, }) if err != nil { - jitter := 100*time.Millisecond + time.Duration(rand.Int63n(int64(time.Second))) // nolint: gosec + jitter := 100*time.Millisecond + time.Duration(rand.Int63n(int64(time.Second))) //nolint:gosec backoff := 100 * time.Duration(count) * time.Millisecond timer.Reset(jitter + backoff) continue @@ -147,7 +173,7 @@ func (env *Environment) BroadcastTxCommit(ctx context.Context, req *coretypes.Re // UnconfirmedTxs gets unconfirmed transactions from the mempool in order of priority // More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs -func (env *Environment) UnconfirmedTxs(ctx context.Context, req *coretypes.RequestUnconfirmedTxs) (*coretypes.ResultUnconfirmedTxs, error) { +func (env *Environment) UnconfirmedTxs(_ctx context.Context, req *coretypes.RequestUnconfirmedTxs) (*coretypes.ResultUnconfirmedTxs, error) { totalCount := env.Mempool.Size() perPage := env.validatePerPage(req.PerPage.IntPtr()) page, err := validatePage(req.Page.IntPtr(), perPage, totalCount) @@ -170,7 +196,7 @@ func (env *Environment) UnconfirmedTxs(ctx context.Context, req *coretypes.Reque // NumUnconfirmedTxs gets number of unconfirmed transactions. // More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs -func (env *Environment) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { +func (env *Environment) NumUnconfirmedTxs(_ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { return &coretypes.ResultUnconfirmedTxs{ Count: env.Mempool.Size(), Total: env.Mempool.Size(), @@ -188,6 +214,6 @@ func (env *Environment) CheckTx(ctx context.Context, req *coretypes.RequestCheck return &coretypes.ResultCheckTx{ResponseCheckTx: *res}, nil } -func (env *Environment) RemoveTx(ctx context.Context, req *coretypes.RequestRemoveTx) error { +func (env *Environment) RemoveTx(_ctx context.Context, req *coretypes.RequestRemoveTx) error { return env.Mempool.RemoveTxByKey(req.TxKey) } diff --git a/internal/rpc/core/net.go b/internal/rpc/core/net.go index 3914ecb85e..c38520e2ba 100644 --- a/internal/rpc/core/net.go +++ b/internal/rpc/core/net.go @@ -10,7 +10,7 @@ import ( // NetInfo returns network info. // More: https://docs.tendermint.com/master/rpc/#/Info/net_info -func (env *Environment) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { +func (env *Environment) NetInfo(_ctx context.Context) (*coretypes.ResultNetInfo, error) { peerList := env.PeerManager.Peers() peers := make([]coretypes.Peer, 0, len(peerList)) @@ -36,7 +36,7 @@ func (env *Environment) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, // Genesis returns genesis file. // More: https://docs.tendermint.com/master/rpc/#/Info/genesis -func (env *Environment) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { +func (env *Environment) Genesis(_ctx context.Context) (*coretypes.ResultGenesis, error) { if len(env.genChunks) > 1 { return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") } @@ -44,7 +44,7 @@ func (env *Environment) Genesis(ctx context.Context) (*coretypes.ResultGenesis, return &coretypes.ResultGenesis{Genesis: env.GenDoc}, nil } -func (env *Environment) GenesisChunked(ctx context.Context, req *coretypes.RequestGenesisChunked) (*coretypes.ResultGenesisChunk, error) { +func (env *Environment) GenesisChunked(_ctx context.Context, req *coretypes.RequestGenesisChunked) (*coretypes.ResultGenesisChunk, error) { if env.genChunks == nil { return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") } diff --git a/internal/rpc/core/tx.go b/internal/rpc/core/tx.go index 1258ae41ee..527d2142b3 100644 --- a/internal/rpc/core/tx.go +++ b/internal/rpc/core/tx.go @@ -17,7 +17,7 @@ import ( // transaction is in the mempool, invalidated, or was not sent in the first // place. // More: https://docs.tendermint.com/master/rpc/#/Info/tx -func (env *Environment) Tx(ctx context.Context, req *coretypes.RequestTx) (*coretypes.ResultTx, error) { +func (env *Environment) Tx(_ctx context.Context, req *coretypes.RequestTx) (*coretypes.ResultTx, error) { // if index is disabled, return error if !indexer.KVSinkEnabled(env.EventSinks) { return nil, errors.New("transaction querying is disabled due to no kvEventSink") diff --git a/internal/state/current_round_state.go b/internal/state/current_round_state.go index b88ed95a59..f16bc353d7 100644 --- a/internal/state/current_round_state.go +++ b/internal/state/current_round_state.go @@ -4,6 +4,8 @@ import ( "bytes" "fmt" + "github.com/rs/zerolog" + abci "github.com/dashpay/tenderdash/abci/types" tmbytes "github.com/dashpay/tenderdash/libs/bytes" tmtypes "github.com/dashpay/tenderdash/proto/tendermint/types" @@ -193,7 +195,7 @@ func (candidate *CurrentRoundState) populateValsetUpdates() error { base := candidate.Base - newValSet, err := valsetUpdate(candidate.ProTxHash, update, base.Validators, candidate.NextConsensusParams.Validator) + newValSet, err := valsetUpdate(update, base.Validators, candidate.NextConsensusParams.Validator) if err != nil { return fmt.Errorf("validator set updates: %w", err) } @@ -236,6 +238,30 @@ func (rp RoundParams) ToProcessProposal() *abci.ResponseProcessProposal { } } +func (rp *RoundParams) MarshalZerologObject(e *zerolog.Event) { + if rp == nil { + e.Bool("nil", true) + return + } + + e.Str("app_hash", rp.AppHash.ShortString()) + + e.Int("tx_results_len", len(rp.TxResults)) + for i, txResult := range rp.TxResults { + e.Interface(fmt.Sprintf("tx_result[%d]", i), txResult) + if i >= 20 { + e.Str("tx_result[...]", "...") + break + } + } + + e.Interface("consensus_param_updates", rp.ConsensusParamUpdates) + e.Interface("validator_set_update", rp.ValidatorSetUpdate) + e.Interface("core_chain_lock", rp.CoreChainLock) + e.Str("source", rp.Source) + e.Int32("round", rp.Round) +} + // RoundParamsFromPrepareProposal creates RoundParams from ResponsePrepareProposal func RoundParamsFromPrepareProposal(resp *abci.ResponsePrepareProposal, round int32) (RoundParams, error) { rp := RoundParams{ @@ -287,7 +313,6 @@ func RoundParamsFromInitChain(resp *abci.ResponseInitChain) (RoundParams, error) // valsetUpdate processes validator set updates received from ABCI app. func valsetUpdate( - nodeProTxHash types.ProTxHash, vu *abci.ValidatorSetUpdate, currentVals *types.ValidatorSet, params types.ValidatorParams, @@ -313,9 +338,19 @@ func valsetUpdate( } } else { // if we don't have proTxHash, NewValidatorSetWithLocalNodeProTxHash behaves like NewValidatorSet - nValSet = types.NewValidatorSetWithLocalNodeProTxHash(validatorUpdates, thresholdPubKey, - currentVals.QuorumType, quorumHash, nodeProTxHash) + nValSet = types.NewValidatorSetCheckPublicKeys(validatorUpdates, thresholdPubKey, + currentVals.QuorumType, quorumHash) + } + } else { + // validators not changed, but we might have a new quorum hash or threshold public key + if !quorumHash.IsZero() { + nValSet.QuorumHash = quorumHash + } + + if thresholdPubKey != nil { + nValSet.ThresholdPublicKey = thresholdPubKey } } + return nValSet, nil } diff --git a/internal/state/events.go b/internal/state/events.go index 50c55e1ee6..104ca2e5aa 100644 --- a/internal/state/events.go +++ b/internal/state/events.go @@ -23,14 +23,13 @@ func NewFullEventSet( block *types.Block, blockID types.BlockID, uncommittedState CurrentRoundState, - fbResp *abci.ResponseFinalizeBlock, validatorsSet *types.ValidatorSet, ) EventSet { - rpp := uncommittedState.Params.ToProcessProposal() + responseProcessProposal := uncommittedState.Params.ToProcessProposal() es := EventSet{} es. - WithNewBlock(block, blockID, *fbResp). - WthNewBlockHeader(block, *rpp, *fbResp). + WithNewBlock(block, blockID, *responseProcessProposal). + WthNewBlockHeader(block, *responseProcessProposal). WithNewEvidences(block). WithTxs(block, uncommittedState.TxResults). WithValidatorSetUpdate(validatorsSet) @@ -41,12 +40,12 @@ func NewFullEventSet( func (e *EventSet) WithNewBlock( block *types.Block, blockID types.BlockID, - fbResp abci.ResponseFinalizeBlock, + responseProcessProposal abci.ResponseProcessProposal, ) *EventSet { e.NewBlock = &types.EventDataNewBlock{ - Block: block, - BlockID: blockID, - ResultFinalizeBlock: fbResp, + Block: block, + BlockID: blockID, + ResultProcessProposal: responseProcessProposal, } return e } @@ -55,13 +54,11 @@ func (e *EventSet) WithNewBlock( func (e *EventSet) WthNewBlockHeader( block *types.Block, ppResp abci.ResponseProcessProposal, - fbResp abci.ResponseFinalizeBlock, ) *EventSet { e.NewBlockHeader = &types.EventDataNewBlockHeader{ Header: block.Header, NumTxs: int64(len(block.Txs)), ResultProcessProposal: ppResp, - ResultFinalizeBlock: fbResp, } return e } diff --git a/internal/state/events_test.go b/internal/state/events_test.go index 14e9486b95..2b4492898d 100644 --- a/internal/state/events_test.go +++ b/internal/state/events_test.go @@ -32,9 +32,8 @@ func TestEventSetError(t *testing.T) { Params: RoundParams{}, TxResults: []*abci.ExecTxResult{{}}, } - fbResp := abci.ResponseFinalizeBlock{} validatorSet := &types.ValidatorSet{} - es := NewFullEventSet(block, blockID, ucs, &fbResp, validatorSet) + es := NewFullEventSet(block, blockID, ucs, validatorSet) publisher := &mocks.BlockEventPublisher{} publisher. On("PublishEventNewBlock", mock.Anything). @@ -75,15 +74,14 @@ func TestEventSet(t *testing.T) { } blockID := types.BlockID{} ppResp := abci.ResponseProcessProposal{} - fbResp := abci.ResponseFinalizeBlock{} validatorSet := &types.ValidatorSet{} txResults := []*abci.ExecTxResult{{}, {}} publisher := &mocks.BlockEventPublisher{} publisher. On("PublishEventNewBlock", types.EventDataNewBlock{ - Block: block, - BlockID: blockID, - ResultFinalizeBlock: fbResp, + Block: block, + BlockID: blockID, + ResultProcessProposal: ppResp, }). Once(). Return(nil) @@ -92,7 +90,6 @@ func TestEventSet(t *testing.T) { Header: block.Header, NumTxs: 2, ResultProcessProposal: ppResp, - ResultFinalizeBlock: fbResp, }). Once(). Return(nil) @@ -142,8 +139,8 @@ func TestEventSet(t *testing.T) { Return(nil) es := EventSet{} es. - WithNewBlock(block, blockID, fbResp). - WthNewBlockHeader(block, ppResp, fbResp). + WithNewBlock(block, blockID, ppResp). + WthNewBlockHeader(block, ppResp). WithValidatorSetUpdate(validatorSet). WithNewEvidences(block). WithTxs(block, txResults) diff --git a/internal/state/execution.go b/internal/state/execution.go index e1d7bd220c..fbcbd2620e 100644 --- a/internal/state/execution.go +++ b/internal/state/execution.go @@ -5,6 +5,7 @@ package state import ( "bytes" "context" + "encoding/hex" "errors" "fmt" "time" @@ -103,6 +104,10 @@ type BlockExecutor struct { // cache the verification results over a single height cache map[string]struct{} + + // detect non-deterministic prepare proposal responses + lastRequestPrepareProposalHash []byte + lastResponsePrepareProposalHash []byte } // BlockExecWithLogger is an option function to set a logger to BlockExecutor @@ -205,30 +210,30 @@ func (blockExec *BlockExecutor) CreateProposalBlock( } txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) + numRequestedTxs := txs.Len() block := state.MakeBlock(height, txs, commit, evidence, proposerProTxHash, proposedAppVersion) localLastCommit := buildLastCommitInfo(block, state.InitialHeight) version := block.Version.ToProto() - rpp, err := blockExec.appClient.PrepareProposal( - ctx, - &abci.RequestPrepareProposal{ - MaxTxBytes: maxDataBytes, - Txs: block.Txs.ToSliceOfBytes(), - LocalLastCommit: localLastCommit, - Misbehavior: block.Evidence.ToABCI(), - Height: block.Height, - Round: round, - Time: block.Time, - NextValidatorsHash: block.NextValidatorsHash, - - // Dash's fields - CoreChainLockedHeight: state.LastCoreChainLockedBlockHeight, - ProposerProTxHash: block.ProposerProTxHash, - ProposedAppVersion: block.ProposedAppVersion, - Version: &version, - QuorumHash: state.Validators.QuorumHash, - }, - ) + request := abci.RequestPrepareProposal{ + MaxTxBytes: maxDataBytes, + Txs: block.Txs.ToSliceOfBytes(), + LocalLastCommit: localLastCommit, + Misbehavior: block.Evidence.ToABCI(), + Height: block.Height, + Round: round, + Time: block.Time, + NextValidatorsHash: block.NextValidatorsHash, + + // Dash's fields + CoreChainLockedHeight: state.LastCoreChainLockedBlockHeight, + ProposerProTxHash: block.ProposerProTxHash, + ProposedAppVersion: block.ProposedAppVersion, + Version: &version, + QuorumHash: state.Validators.QuorumHash, + } + start := time.Now() + response, err := blockExec.appClient.PrepareProposal(ctx, &request) if err != nil { // The App MUST ensure that only valid (and hence 'processable') transactions // enter the mempool. Hence, at this point, we can't have any non-processable @@ -241,11 +246,31 @@ func (blockExec *BlockExecutor) CreateProposalBlock( return nil, CurrentRoundState{}, err } - if err := rpp.Validate(); err != nil { + // ensure that the proposal response is deterministic + reqHash, respHash := deterministicPrepareProposalHashes(&request, response) + blockExec.logger.Debug("PrepareProposal executed", + "request_hash", hex.EncodeToString(reqHash), + "response_hash", hex.EncodeToString(respHash), + "height", height, + "round", round, + "requested_txs", numRequestedTxs, + "took", time.Since(start).String(), + ) + if bytes.Equal(blockExec.lastRequestPrepareProposalHash, reqHash) && + !bytes.Equal(blockExec.lastResponsePrepareProposalHash, respHash) { + // we don't panic here, as we don't want to break this node and + // remove it from voting quorum + blockExec.logger.Error("PrepareProposal response is non-deterministic", + "request_hash", hex.EncodeToString(reqHash), + "response_hash", hex.EncodeToString(respHash), + ) + } + + if err := response.Validate(); err != nil { return nil, CurrentRoundState{}, fmt.Errorf("PrepareProposal responded with invalid response: %w", err) } - txrSet := types.NewTxRecordSet(rpp.TxRecords) + txrSet := types.NewTxRecordSet(response.TxRecords) if err := txrSet.Validate(maxDataBytes, block.Txs); err != nil { return nil, CurrentRoundState{}, err @@ -258,13 +283,17 @@ func (blockExec *BlockExecutor) CreateProposalBlock( } itxs := txrSet.IncludedTxs() - if err := validateExecTxResults(rpp.TxResults, itxs); err != nil { + if err := validateExecTxResults(response.TxResults, itxs); err != nil { return nil, CurrentRoundState{}, fmt.Errorf("invalid tx results: %w", err) } block.SetTxs(itxs) - rp, err := RoundParamsFromPrepareProposal(rpp, round) + if ver := response.GetAppVersion(); ver > 0 { + block.Version.App = ver + } + + rp, err := RoundParamsFromPrepareProposal(response, round) if err != nil { return nil, CurrentRoundState{}, err } @@ -281,6 +310,23 @@ func (blockExec *BlockExecutor) CreateProposalBlock( return block, stateChanges, nil } +func deterministicPrepareProposalHashes(request *abci.RequestPrepareProposal, response *abci.ResponsePrepareProposal) (requestHash, responseHash []byte) { + deterministicReq := *request + deterministicReq.Round = 0 + + reqBytes, err := deterministicReq.Marshal() + if err != nil { + // should never happen, as we just marshaled it before sending + panic("failed to marshal RequestPrepareProposal: " + err.Error()) + } + respBytes, err := response.Marshal() + if err != nil { + // should never happen, as we just marshaled it before sending + panic("failed to marshal ResponsePrepareProposal: " + err.Error()) + } + return crypto.Checksum(reqBytes), crypto.Checksum(respBytes) +} + // ProcessProposal sends the proposal to ABCI App and verifies the response func (blockExec *BlockExecutor) ProcessProposal( ctx context.Context, @@ -314,12 +360,12 @@ func (blockExec *BlockExecutor) ProcessProposal( if resp.IsStatusUnknown() { return CurrentRoundState{}, fmt.Errorf("ProcessProposal responded with status %s", resp.Status.String()) } - if err := resp.Validate(); err != nil { - return CurrentRoundState{}, fmt.Errorf("ProcessProposal responded with invalid response: %w", err) - } if !resp.IsAccepted() { return CurrentRoundState{}, ErrBlockRejected } + if err := resp.Validate(); err != nil { + return CurrentRoundState{}, fmt.Errorf("ProcessProposal responded with invalid response: %w", err) + } if err := validateExecTxResults(resp.TxResults, block.Data.Txs); err != nil { return CurrentRoundState{}, fmt.Errorf("invalid tx results: %w", err) } @@ -446,30 +492,25 @@ func (blockExec *BlockExecutor) FinalizeBlock( if err := blockExec.ValidateBlockWithRoundState(ctx, state, uncommittedState, block); err != nil { return state, ErrInvalidBlock{err} } - startTime := time.Now().UnixNano() - fbResp, err := execBlockWithoutState(ctx, blockExec.appClient, block, commit, blockExec.logger) - if err != nil { - return state, ErrInvalidBlock{err} - } - endTime := time.Now().UnixNano() - blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) - if err != nil { - return state, ErrProxyAppConn(err) - } - // Save the results before we commit. - // We need to save Prepare/ProcessProposal AND FinalizeBlock responses, as we don't have details like validators - // in FinalizeResponse. + // Save ResponseProcessProposal before FinalizeBlock to be able to recover when app state is ahead of tenderdash state + // (eg. when tenderdash fails just after receiving ResponseFinalizeBlock). abciResponses := tmstate.ABCIResponses{ ProcessProposal: uncommittedState.Params.ToProcessProposal(), - FinalizeBlock: fbResp, } if err := blockExec.store.SaveABCIResponses(block.Height, abciResponses); err != nil { return state, err } - state, err = state.Update(blockID, &block.Header, &uncommittedState) + startTime := time.Now().UnixNano() + fbResp, err := execBlockWithoutState(ctx, blockExec.appClient, block, commit, blockExec.logger) if err != nil { + return state, ErrInvalidBlock{err} + } + endTime := time.Now().UnixNano() + blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) + + if state, err = state.Update(blockID, &block.Header, &uncommittedState); err != nil { return state, fmt.Errorf("commit failed for application: %w", err) } @@ -482,7 +523,7 @@ func (blockExec *BlockExecutor) FinalizeBlock( // Update evpool with the latest state. blockExec.evpool.Update(ctx, state, block.Evidence) - if err := blockExec.store.Save(state); err != nil { + if err = blockExec.store.Save(state); err != nil { return state, err } @@ -494,9 +535,8 @@ func (blockExec *BlockExecutor) FinalizeBlock( // Events are fired after everything else. // NOTE: if we crash between Commit and Save, events wont be fired during replay - es := NewFullEventSet(block, blockID, uncommittedState, fbResp, state.Validators) - err = es.Publish(blockExec.eventPublisher) - if err != nil { + es := NewFullEventSet(block, blockID, uncommittedState, state.Validators) + if err = es.Publish(blockExec.eventPublisher); err != nil { blockExec.logger.Error("failed publishing event", "err", err) } @@ -615,7 +655,7 @@ func buildLastCommitInfo(block *types.Block, initialHeight int64) abci.CommitInf Round: block.LastCommit.Round, QuorumHash: block.LastCommit.QuorumHash, BlockSignature: block.LastCommit.ThresholdBlockSignature, - ThresholdVoteExtensions: types.ThresholdExtensionSignToProto(block.LastCommit.ThresholdVoteExtensions), + ThresholdVoteExtensions: block.LastCommit.ThresholdVoteExtensions, } } @@ -673,9 +713,7 @@ func execBlock( } blockID := block.BlockID(nil) protoBlockID := blockID.ToProto() - if err != nil { - return nil, err - } + responseFinalizeBlock, err := appConn.FinalizeBlock( ctx, &abci.RequestFinalizeBlock{ @@ -692,7 +730,7 @@ func execBlock( logger.Error("executing block", "err", err) return responseFinalizeBlock, err } - logger.Info("executed block", "height", block.Height) + logger.Debug("executed block", "height", block.Height) return responseFinalizeBlock, nil } diff --git a/internal/state/execution_test.go b/internal/state/execution_test.go index cdcdbf2bb4..deeca658b2 100644 --- a/internal/state/execution_test.go +++ b/internal/state/execution_test.go @@ -88,7 +88,7 @@ func TestApplyBlock(t *testing.T) { consensusParamsBefore := state.ConsensusParams validatorsBefore := state.Validators.Hash() - block, err := sf.MakeBlock(state, 1, new(types.Commit), 1) + block, err := sf.MakeBlock(state, 1, new(types.Commit), 2) require.NoError(t, err) bps, err := block.MakePartSet(testPartSize) require.NoError(t, err) @@ -100,8 +100,8 @@ func TestApplyBlock(t *testing.T) { // State for next block // nextState, err := state.NewStateChangeset(ctx, nil) // require.NoError(t, err) - assert.EqualValues(t, 0, block.Version.App, "App version should not change in current block") - assert.EqualValues(t, 1, block.ProposedAppVersion, "Block should propose new version") + assert.EqualValues(t, 1, block.Version.App, "App version should not change in current block") + assert.EqualValues(t, 2, block.ProposedAppVersion, "Block should propose new version") assert.Equal(t, consensusParamsBefore.HashConsensusParams(), block.ConsensusHash, "consensus params should change in next block") assert.Equal(t, validatorsBefore, block.ValidatorsHash, "validators should change from the next block") @@ -236,7 +236,7 @@ func TestProcessProposal(t *testing.T) { Signature: make([]byte, bls12381.SignatureSize), } - lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil, nil) block1, err := sf.MakeBlock(state, height, lastCommit, 1) require.NoError(t, err) block1.SetCoreChainLock(&coreChainLockUpdate) @@ -316,7 +316,7 @@ func TestUpdateConsensusParams(t *testing.T) { eventBus, ) - lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil, nil) txResults := factory.ExecTxResults(txs) app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ @@ -324,6 +324,7 @@ func TestUpdateConsensusParams(t *testing.T) { AppHash: rand.Bytes(crypto.DefaultAppHashSize), TxResults: txResults, ConsensusParamUpdates: &tmtypes.ConsensusParams{Block: &tmtypes.BlockParams{MaxBytes: 1024 * 1024}}, + AppVersion: 1, }, nil).Once() block1, _, err := blockExec.CreateProposalBlock( ctx, @@ -350,6 +351,81 @@ func TestUpdateConsensusParams(t *testing.T) { app.AssertCalled(t, "ProcessProposal", mock.Anything, mock.Anything) } +// TestOverrideAppVersion ensures that app_version set in PrepareProposal overrides the one in the block +// and is passed to ProcessProposal. +func TestOverrideAppVersion(t *testing.T) { + const ( + height = 1 + round = int32(12) + appVersion = uint64(12345) + ) + txs := factory.MakeNTxs(height, 10) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + app := abcimocks.NewApplication(t) + logger := log.NewNopLogger() + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + state, stateDB, _ := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + blockStore := store.NewBlockStore(dbm.NewMemDB()) + + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + pool := new(mpmocks.Mempool) + pool.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(txs).Once() + + blockExec := sm.NewBlockExecutor( + stateStore, + proxyApp, + pool, + sm.EmptyEvidencePool{}, + blockStore, + eventBus, + ) + + lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil, nil) + txResults := factory.ExecTxResults(txs) + + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ + TxRecords: txsToTxRecords(txs), + AppHash: rand.Bytes(crypto.DefaultAppHashSize), + TxResults: txResults, + AppVersion: appVersion, + }, nil).Once() + + block1, _, err := blockExec.CreateProposalBlock( + ctx, + height, + round, + state, + lastCommit, + state.Validators.GetProposer().ProTxHash, + 1, + ) + require.NoError(t, err) + + app.On("ProcessProposal", mock.Anything, + mock.MatchedBy(func(r *abci.RequestProcessProposal) bool { + return r.Version.App == appVersion + })).Return(&abci.ResponseProcessProposal{ + AppHash: block1.AppHash, + TxResults: txResults, + Status: abci.ResponseProcessProposal_ACCEPT, + }, nil).Once() + + _, err = blockExec.ProcessProposal(ctx, block1, round, state, true) + require.NoError(t, err) + assert.EqualValues(t, appVersion, block1.Version.App, "App version should be overridden by PrepareProposal") + + app.AssertExpectations(t) +} + func TestValidateValidatorUpdates(t *testing.T) { pubkey1 := bls12381.GenPrivKey().PubKey() pubkey2 := bls12381.GenPrivKey().PubKey() @@ -601,7 +677,7 @@ func TestFinalizeBlockValidatorUpdates(t *testing.T) { 1, round, state, - types.NewCommit(state.LastBlockHeight, 0, state.LastBlockID, nil), + types.NewCommit(state.LastBlockHeight, 0, state.LastBlockID, nil, nil), proTxHashes[0], 1, ) @@ -715,7 +791,7 @@ func TestEmptyPrepareProposal(t *testing.T) { }) app.On("PrepareProposal", mock.Anything, reqPrepProposalMatch).Return(&abci.ResponsePrepareProposal{ - AppHash: make([]byte, crypto.DefaultAppHashSize), + AppHash: make([]byte, crypto.DefaultAppHashSize), AppVersion: 1, }, nil) cc := abciclient.NewLocalClient(logger, app) proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) @@ -783,8 +859,9 @@ func TestPrepareProposalErrorOnNonExistingRemoved(t *testing.T) { Tx: []byte("new tx"), }, }, - TxResults: []*abci.ExecTxResult{{}}, - AppHash: make([]byte, crypto.DefaultAppHashSize), + TxResults: []*abci.ExecTxResult{{}}, + AppHash: make([]byte, crypto.DefaultAppHashSize), + AppVersion: 1, } app.On("PrepareProposal", mock.Anything, mock.Anything).Return(rpp, nil) @@ -841,9 +918,10 @@ func TestPrepareProposalRemoveTxs(t *testing.T) { app := abcimocks.NewApplication(t) app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ - TxRecords: trs, - TxResults: txResults, - AppHash: make([]byte, crypto.DefaultAppHashSize), + TxRecords: trs, + TxResults: txResults, + AppHash: make([]byte, crypto.DefaultAppHashSize), + AppVersion: 1, }, nil) cc := abciclient.NewLocalClient(logger, app) @@ -873,6 +951,67 @@ func TestPrepareProposalRemoveTxs(t *testing.T) { mp.AssertExpectations(t) } +// TestPrepareProposalDelayedTxs tests that any transactions marked as DELAYED +// are not included in the block produced by CreateProposalBlock. The test also +// ensures that delayed transactions are NOT removed from the mempool. +func TestPrepareProposalDelayedTxs(t *testing.T) { + const height = 2 + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + logger := log.NewNopLogger() + eventBus := eventbus.NewDefault(logger) + require.NoError(t, eventBus.Start(ctx)) + + state, stateDB, privVals := makeState(t, 1, height) + stateStore := sm.NewStore(stateDB) + + evpool := &mocks.EvidencePool{} + evpool.On("PendingEvidence", mock.Anything).Return([]types.Evidence{}, int64(0)) + + txs := factory.MakeNTxs(height, 10) + // 2 first transactions will be removed, so results only contain info about 8 txs + txResults := factory.ExecTxResults(txs[2:]) + mp := &mpmocks.Mempool{} + mp.On("ReapMaxBytesMaxGas", mock.Anything, mock.Anything).Return(txs) + + trs := txsToTxRecords(txs) + trs[0].Action = abci.TxRecord_DELAYED + trs[1].Action = abci.TxRecord_DELAYED + + app := abcimocks.NewApplication(t) + app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ + TxRecords: trs, + TxResults: txResults, + AppHash: make([]byte, crypto.DefaultAppHashSize), + AppVersion: 1, + }, nil) + + cc := abciclient.NewLocalClient(logger, app) + proxyApp := proxy.New(cc, logger, proxy.NopMetrics()) + err := proxyApp.Start(ctx) + require.NoError(t, err) + + blockExec := sm.NewBlockExecutor( + stateStore, + proxyApp, + mp, + evpool, + nil, + eventBus, + ) + val := state.Validators.GetByIndex(0) + commit, _ := makeValidCommit(ctx, t, height, types.BlockID{}, state.Validators, privVals) + block, _, err := blockExec.CreateProposalBlock(ctx, height, 0, state, commit, val.ProTxHash, 0) + require.NoError(t, err) + require.Len(t, block.Data.Txs.ToSliceOfBytes(), len(trs)-2) + + require.Equal(t, -1, block.Data.Txs.Index(types.Tx(trs[0].Tx))) + require.Equal(t, -1, block.Data.Txs.Index(types.Tx(trs[1].Tx))) + + mp.AssertExpectations(t) +} + // TestPrepareProposalAddedTxsIncluded tests that any transactions marked as ADDED // in the prepare proposal response are included in the block. func TestPrepareProposalAddedTxsIncluded(t *testing.T) { @@ -902,9 +1041,10 @@ func TestPrepareProposalAddedTxsIncluded(t *testing.T) { app := abcimocks.NewApplication(t) app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ - AppHash: make([]byte, crypto.DefaultAppHashSize), - TxRecords: trs, - TxResults: txres, + AppHash: make([]byte, crypto.DefaultAppHashSize), + TxRecords: trs, + TxResults: txres, + AppVersion: 1, }, nil) cc := abciclient.NewLocalClient(logger, app) @@ -962,9 +1102,10 @@ func TestPrepareProposalReorderTxs(t *testing.T) { app := abcimocks.NewApplication(t) app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ - AppHash: make([]byte, crypto.DefaultAppHashSize), - TxRecords: trs, - TxResults: txresults, + AppHash: make([]byte, crypto.DefaultAppHashSize), + TxRecords: trs, + TxResults: txresults, + AppVersion: 1, }, nil) cc := abciclient.NewLocalClient(logger, app) @@ -1027,9 +1168,10 @@ func TestPrepareProposalErrorOnTooManyTxs(t *testing.T) { app := abcimocks.NewApplication(t) app.On("PrepareProposal", mock.Anything, mock.Anything).Return(&abci.ResponsePrepareProposal{ - TxRecords: trs, - TxResults: factory.ExecTxResults(txs), - AppHash: make([]byte, crypto.DefaultAppHashSize), + TxRecords: trs, + TxResults: factory.ExecTxResults(txs), + AppHash: make([]byte, crypto.DefaultAppHashSize), + AppVersion: 1, }, nil) cc := abciclient.NewLocalClient(logger, app) diff --git a/internal/state/helpers_test.go b/internal/state/helpers_test.go index 699f855b56..2aa8e74713 100644 --- a/internal/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -102,6 +102,7 @@ func makeValidCommit( return types.NewCommit( height, 0, blockID, + votes[0].VoteExtensions, &types.CommitSigns{ QuorumSigns: *thresholdSigns, QuorumHash: vals.QuorumHash, @@ -196,7 +197,7 @@ func makeRandomStateFromValidatorSet( } } func makeRandomStateFromConsensusParams( - ctx context.Context, + _ctx context.Context, t *testing.T, consensusParams *types.ConsensusParams, height, @@ -226,23 +227,21 @@ type testApp struct { var _ abci.Application = (*testApp)(nil) -func (app *testApp) Info(_ context.Context, req *abci.RequestInfo) (*abci.ResponseInfo, error) { +func (app *testApp) Info(_ context.Context, _req *abci.RequestInfo) (*abci.ResponseInfo, error) { return &abci.ResponseInfo{}, nil } func (app *testApp) FinalizeBlock(_ context.Context, req *abci.RequestFinalizeBlock) (*abci.ResponseFinalizeBlock, error) { app.Misbehavior = req.Misbehavior - return &abci.ResponseFinalizeBlock{ - Events: []abci.Event{}, - }, nil + return &abci.ResponseFinalizeBlock{}, nil } -func (app *testApp) CheckTx(_ context.Context, req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { +func (app *testApp) CheckTx(_ context.Context, _req *abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { return &abci.ResponseCheckTx{}, nil } -func (app *testApp) Query(_ context.Context, req *abci.RequestQuery) (*abci.ResponseQuery, error) { +func (app *testApp) Query(_ context.Context, _req *abci.RequestQuery) (*abci.ResponseQuery, error) { return &abci.ResponseQuery{}, nil } @@ -259,7 +258,8 @@ func (app *testApp) PrepareProposal(_ context.Context, req *abci.RequestPrepareP AppVersion: 1, }, }, - TxResults: resTxs, + AppVersion: 1, + TxResults: resTxs, }, nil } @@ -278,5 +278,6 @@ func (app *testApp) ProcessProposal(_ context.Context, req *abci.RequestProcessP }, TxResults: resTxs, Status: abci.ResponseProcessProposal_ACCEPT, + Events: []abci.Event{}, }, nil } diff --git a/internal/state/indexer/block/kv/kv.go b/internal/state/indexer/block/kv/kv.go index ccba695093..dd9f62764c 100644 --- a/internal/state/indexer/block/kv/kv.go +++ b/internal/state/indexer/block/kv/kv.go @@ -65,7 +65,7 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { } // 2. index FinalizeBlock events - if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, "finalize_block", height); err != nil { + if err := idx.indexEvents(batch, bh.ResultProcessProposal.Events, "finalize_block", height); err != nil { return fmt.Errorf("failed to index FinalizeBlock events: %w", err) } diff --git a/internal/state/indexer/block/kv/kv_test.go b/internal/state/indexer/block/kv/kv_test.go index 20cda5a82c..4d5552745b 100644 --- a/internal/state/indexer/block/kv/kv_test.go +++ b/internal/state/indexer/block/kv/kv_test.go @@ -20,7 +20,7 @@ func TestBlockIndexer(t *testing.T) { require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ Header: types.Header{Height: 1}, - ResultFinalizeBlock: abci.ResponseFinalizeBlock{ + ResultProcessProposal: abci.ResponseProcessProposal{ Events: []abci.Event{ { Type: "finalize_event1", @@ -53,7 +53,7 @@ func TestBlockIndexer(t *testing.T) { } require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ Header: types.Header{Height: int64(i)}, - ResultFinalizeBlock: abci.ResponseFinalizeBlock{ + ResultProcessProposal: abci.ResponseProcessProposal{ Events: []abci.Event{ { Type: "finalize_event1", diff --git a/internal/state/indexer/block/null/null.go b/internal/state/indexer/block/null/null.go index e864490aa4..a2ae24c2f1 100644 --- a/internal/state/indexer/block/null/null.go +++ b/internal/state/indexer/block/null/null.go @@ -14,7 +14,7 @@ var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) // TxIndex implements a no-op block indexer. type BlockerIndexer struct{} -func (idx *BlockerIndexer) Has(height int64) (bool, error) { +func (idx *BlockerIndexer) Has(_height int64) (bool, error) { return false, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } @@ -22,6 +22,6 @@ func (idx *BlockerIndexer) Index(types.EventDataNewBlockHeader) error { return nil } -func (idx *BlockerIndexer) Search(ctx context.Context, q *query.Query) ([]int64, error) { +func (idx *BlockerIndexer) Search(_ctx context.Context, _q *query.Query) ([]int64, error) { return []int64{}, nil } diff --git a/internal/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go index 45d3f22c8a..8288781bc0 100644 --- a/internal/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -24,6 +24,10 @@ type EventSink struct { func (_m *EventSink) GetTxByHash(_a0 []byte) (*types.TxResult, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for GetTxByHash") + } + var r0 *types.TxResult var r1 error if rf, ok := ret.Get(0).(func([]byte) (*types.TxResult, error)); ok { @@ -50,6 +54,10 @@ func (_m *EventSink) GetTxByHash(_a0 []byte) (*types.TxResult, error) { func (_m *EventSink) HasBlock(_a0 int64) (bool, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for HasBlock") + } + var r0 bool var r1 error if rf, ok := ret.Get(0).(func(int64) (bool, error)); ok { @@ -74,6 +82,10 @@ func (_m *EventSink) HasBlock(_a0 int64) (bool, error) { func (_m *EventSink) IndexBlockEvents(_a0 tenderdashtypes.EventDataNewBlockHeader) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for IndexBlockEvents") + } + var r0 error if rf, ok := ret.Get(0).(func(tenderdashtypes.EventDataNewBlockHeader) error); ok { r0 = rf(_a0) @@ -88,6 +100,10 @@ func (_m *EventSink) IndexBlockEvents(_a0 tenderdashtypes.EventDataNewBlockHeade func (_m *EventSink) IndexTxEvents(_a0 []*types.TxResult) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for IndexTxEvents") + } + var r0 error if rf, ok := ret.Get(0).(func([]*types.TxResult) error); ok { r0 = rf(_a0) @@ -102,6 +118,10 @@ func (_m *EventSink) IndexTxEvents(_a0 []*types.TxResult) error { func (_m *EventSink) SearchBlockEvents(_a0 context.Context, _a1 *query.Query) ([]int64, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for SearchBlockEvents") + } + var r0 []int64 var r1 error if rf, ok := ret.Get(0).(func(context.Context, *query.Query) ([]int64, error)); ok { @@ -128,6 +148,10 @@ func (_m *EventSink) SearchBlockEvents(_a0 context.Context, _a1 *query.Query) ([ func (_m *EventSink) SearchTxEvents(_a0 context.Context, _a1 *query.Query) ([]*types.TxResult, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for SearchTxEvents") + } + var r0 []*types.TxResult var r1 error if rf, ok := ret.Get(0).(func(context.Context, *query.Query) ([]*types.TxResult, error)); ok { @@ -154,6 +178,10 @@ func (_m *EventSink) SearchTxEvents(_a0 context.Context, _a1 *query.Query) ([]*t func (_m *EventSink) Stop() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Stop") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -168,6 +196,10 @@ func (_m *EventSink) Stop() error { func (_m *EventSink) Type() indexer.EventSinkType { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Type") + } + var r0 indexer.EventSinkType if rf, ok := ret.Get(0).(func() indexer.EventSinkType); ok { r0 = rf() diff --git a/internal/state/indexer/sink/kv/kv_test.go b/internal/state/indexer/sink/kv/kv_test.go index 690e9d5be0..5ec843672d 100644 --- a/internal/state/indexer/sink/kv/kv_test.go +++ b/internal/state/indexer/sink/kv/kv_test.go @@ -34,7 +34,7 @@ func TestBlockFuncs(t *testing.T) { require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ Header: types.Header{Height: 1}, - ResultFinalizeBlock: abci.ResponseFinalizeBlock{ + ResultProcessProposal: abci.ResponseProcessProposal{ Events: []abci.Event{ { Type: "finalize_eventA", @@ -72,7 +72,7 @@ func TestBlockFuncs(t *testing.T) { require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ Header: types.Header{Height: int64(i)}, - ResultFinalizeBlock: abci.ResponseFinalizeBlock{ + ResultProcessProposal: abci.ResponseProcessProposal{ Events: []abci.Event{ { Type: "finalize_eventA", diff --git a/internal/state/indexer/sink/null/null.go b/internal/state/indexer/sink/null/null.go index 6632eeb8f5..0e800b4796 100644 --- a/internal/state/indexer/sink/null/null.go +++ b/internal/state/indexer/sink/null/null.go @@ -22,27 +22,27 @@ func (nes *EventSink) Type() indexer.EventSinkType { return indexer.NULL } -func (nes *EventSink) IndexBlockEvents(bh types.EventDataNewBlockHeader) error { +func (nes *EventSink) IndexBlockEvents(_bh types.EventDataNewBlockHeader) error { return nil } -func (nes *EventSink) IndexTxEvents(results []*abci.TxResult) error { +func (nes *EventSink) IndexTxEvents(_results []*abci.TxResult) error { return nil } -func (nes *EventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error) { +func (nes *EventSink) SearchBlockEvents(_ctx context.Context, _q *query.Query) ([]int64, error) { return nil, nil } -func (nes *EventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { +func (nes *EventSink) SearchTxEvents(_ctx context.Context, _q *query.Query) ([]*abci.TxResult, error) { return nil, nil } -func (nes *EventSink) GetTxByHash(hash []byte) (*abci.TxResult, error) { +func (nes *EventSink) GetTxByHash(_hash []byte) (*abci.TxResult, error) { return nil, nil } -func (nes *EventSink) HasBlock(h int64) (bool, error) { +func (nes *EventSink) HasBlock(_h int64) (bool, error) { return false, nil } diff --git a/internal/state/indexer/sink/psql/psql.go b/internal/state/indexer/sink/psql/psql.go index 9d8c68761f..dd1c46a206 100644 --- a/internal/state/indexer/sink/psql/psql.go +++ b/internal/state/indexer/sink/psql/psql.go @@ -170,7 +170,7 @@ INSERT INTO `+tableBlocks+` (height, chain_id, created_at) return fmt.Errorf("block meta-events: %w", err) } // Insert all the block events. Order is important here, - if err := insertEvents(dbtx, blockID, 0, h.ResultFinalizeBlock.Events); err != nil { + if err := insertEvents(dbtx, blockID, 0, h.ResultProcessProposal.Events); err != nil { return fmt.Errorf("finalize-block events: %w", err) } return nil @@ -238,22 +238,22 @@ INSERT INTO `+tableTxResults+` (block_id, index, created_at, tx_hash, tx_result) } // SearchBlockEvents is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error) { +func (es *EventSink) SearchBlockEvents(_ctx context.Context, _q *query.Query) ([]int64, error) { return nil, errors.New("block search is not supported via the postgres event sink") } // SearchTxEvents is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { +func (es *EventSink) SearchTxEvents(_ctx context.Context, _q *query.Query) ([]*abci.TxResult, error) { return nil, errors.New("tx search is not supported via the postgres event sink") } // GetTxByHash is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) GetTxByHash(hash []byte) (*abci.TxResult, error) { +func (es *EventSink) GetTxByHash(_hash []byte) (*abci.TxResult, error) { return nil, errors.New("getTxByHash is not supported via the postgres event sink") } // HasBlock is not implemented by this sink, and reports an error for all queries. -func (es *EventSink) HasBlock(h int64) (bool, error) { +func (es *EventSink) HasBlock(_h int64) (bool, error) { return false, errors.New("hasBlock is not supported via the postgres event sink") } diff --git a/internal/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go index 17c457530e..92549c2e77 100644 --- a/internal/state/indexer/sink/psql/psql_test.go +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -227,8 +227,6 @@ func newTestBlockHeader() types.EventDataNewBlockHeader { ResultProcessProposal: abci.ResponseProcessProposal{ Status: abci.ResponseProcessProposal_ACCEPT, AppHash: make([]byte, crypto.DefaultAppHashSize), - }, - ResultFinalizeBlock: abci.ResponseFinalizeBlock{ Events: []abci.Event{ makeIndexedEvent("finalize_event.proposer", "FCAA001"), makeIndexedEvent("thingy.whatzit", "O.O"), diff --git a/internal/state/indexer/tx/null/null.go b/internal/state/indexer/tx/null/null.go index d6a1be056f..969b7e9183 100644 --- a/internal/state/indexer/tx/null/null.go +++ b/internal/state/indexer/tx/null/null.go @@ -15,20 +15,20 @@ var _ indexer.TxIndexer = (*TxIndex)(nil) type TxIndex struct{} // Get on a TxIndex is disabled and panics when invoked. -func (txi *TxIndex) Get(hash []byte) (*abci.TxResult, error) { +func (txi *TxIndex) Get(_hash []byte) (*abci.TxResult, error) { return nil, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } // AddBatch is a noop and always returns nil. -func (txi *TxIndex) AddBatch(batch *indexer.Batch) error { +func (txi *TxIndex) AddBatch(_batch *indexer.Batch) error { return nil } // Index is a noop and always returns nil. -func (txi *TxIndex) Index(results []*abci.TxResult) error { +func (txi *TxIndex) Index(_results []*abci.TxResult) error { return nil } -func (txi *TxIndex) Search(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { +func (txi *TxIndex) Search(_ctx context.Context, _q *query.Query) ([]*abci.TxResult, error) { return []*abci.TxResult{}, nil } diff --git a/internal/state/mocks/block_store.go b/internal/state/mocks/block_store.go index f4a0636d78..63873d194a 100644 --- a/internal/state/mocks/block_store.go +++ b/internal/state/mocks/block_store.go @@ -17,6 +17,10 @@ type BlockStore struct { func (_m *BlockStore) Base() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Base") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -31,6 +35,10 @@ func (_m *BlockStore) Base() int64 { func (_m *BlockStore) CoreChainLockedHeight() uint32 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for CoreChainLockedHeight") + } + var r0 uint32 if rf, ok := ret.Get(0).(func() uint32); ok { r0 = rf() @@ -45,6 +53,10 @@ func (_m *BlockStore) CoreChainLockedHeight() uint32 { func (_m *BlockStore) Height() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Height") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() @@ -59,6 +71,10 @@ func (_m *BlockStore) Height() int64 { func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LoadBaseMeta") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func() *types.BlockMeta); ok { r0 = rf() @@ -75,6 +91,10 @@ func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta { func (_m *BlockStore) LoadBlock(height int64) *types.Block { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlock") + } + var r0 *types.Block if rf, ok := ret.Get(0).(func(int64) *types.Block); ok { r0 = rf(height) @@ -91,6 +111,10 @@ func (_m *BlockStore) LoadBlock(height int64) *types.Block { func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { ret := _m.Called(hash) + if len(ret) == 0 { + panic("no return value specified for LoadBlockByHash") + } + var r0 *types.Block if rf, ok := ret.Get(0).(func([]byte) *types.Block); ok { r0 = rf(hash) @@ -107,6 +131,10 @@ func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockCommit") + } + var r0 *types.Commit if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { r0 = rf(height) @@ -123,6 +151,10 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadBlockMeta") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok { r0 = rf(height) @@ -139,6 +171,10 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { func (_m *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { ret := _m.Called(hash) + if len(ret) == 0 { + panic("no return value specified for LoadBlockMetaByHash") + } + var r0 *types.BlockMeta if rf, ok := ret.Get(0).(func([]byte) *types.BlockMeta); ok { r0 = rf(hash) @@ -155,6 +191,10 @@ func (_m *BlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { ret := _m.Called(height, index) + if len(ret) == 0 { + panic("no return value specified for LoadBlockPart") + } + var r0 *types.Part if rf, ok := ret.Get(0).(func(int64, int) *types.Part); ok { r0 = rf(height, index) @@ -171,6 +211,10 @@ func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { func (_m *BlockStore) LoadSeenCommit() *types.Commit { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for LoadSeenCommit") + } + var r0 *types.Commit if rf, ok := ret.Get(0).(func() *types.Commit); ok { r0 = rf() @@ -187,6 +231,10 @@ func (_m *BlockStore) LoadSeenCommit() *types.Commit { func (_m *BlockStore) LoadSeenCommitAt(height int64) *types.Commit { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for LoadSeenCommitAt") + } + var r0 *types.Commit if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { r0 = rf(height) @@ -203,6 +251,10 @@ func (_m *BlockStore) LoadSeenCommitAt(height int64) *types.Commit { func (_m *BlockStore) PruneBlocks(height int64) (uint64, error) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for PruneBlocks") + } + var r0 uint64 var r1 error if rf, ok := ret.Get(0).(func(int64) (uint64, error)); ok { @@ -232,6 +284,10 @@ func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s func (_m *BlockStore) Size() int64 { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Size") + } + var r0 int64 if rf, ok := ret.Get(0).(func() int64); ok { r0 = rf() diff --git a/internal/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go index 162d1035d3..ff16885fbd 100644 --- a/internal/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -20,6 +20,10 @@ type EvidencePool struct { func (_m *EvidencePool) AddEvidence(_a0 context.Context, _a1 types.Evidence) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for AddEvidence") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) error); ok { r0 = rf(_a0, _a1) @@ -34,6 +38,10 @@ func (_m *EvidencePool) AddEvidence(_a0 context.Context, _a1 types.Evidence) err func (_m *EvidencePool) CheckEvidence(_a0 context.Context, _a1 types.EvidenceList) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CheckEvidence") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, types.EvidenceList) error); ok { r0 = rf(_a0, _a1) @@ -48,6 +56,10 @@ func (_m *EvidencePool) CheckEvidence(_a0 context.Context, _a1 types.EvidenceLis func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { ret := _m.Called(maxBytes) + if len(ret) == 0 { + panic("no return value specified for PendingEvidence") + } + var r0 []types.Evidence var r1 int64 if rf, ok := ret.Get(0).(func(int64) ([]types.Evidence, int64)); ok { diff --git a/internal/state/mocks/executor.go b/internal/state/mocks/executor.go index daa92ee0dc..e287c60cc0 100644 --- a/internal/state/mocks/executor.go +++ b/internal/state/mocks/executor.go @@ -20,6 +20,10 @@ type Executor struct { func (_m *Executor) ApplyBlock(ctx context.Context, _a1 state.State, blockID types.BlockID, block *types.Block, commit *types.Commit) (state.State, error) { ret := _m.Called(ctx, _a1, blockID, block, commit) + if len(ret) == 0 { + panic("no return value specified for ApplyBlock") + } + var r0 state.State var r1 error if rf, ok := ret.Get(0).(func(context.Context, state.State, types.BlockID, *types.Block, *types.Commit) (state.State, error)); ok { @@ -44,6 +48,10 @@ func (_m *Executor) ApplyBlock(ctx context.Context, _a1 state.State, blockID typ func (_m *Executor) CreateProposalBlock(ctx context.Context, height int64, round int32, _a3 state.State, commit *types.Commit, proposerProTxHash []byte, proposedAppVersion uint64) (*types.Block, state.CurrentRoundState, error) { ret := _m.Called(ctx, height, round, _a3, commit, proposerProTxHash, proposedAppVersion) + if len(ret) == 0 { + panic("no return value specified for CreateProposalBlock") + } + var r0 *types.Block var r1 state.CurrentRoundState var r2 error @@ -82,6 +90,10 @@ func (_m *Executor) ExtendVote(ctx context.Context, vote *types.Vote) { func (_m *Executor) FinalizeBlock(ctx context.Context, _a1 state.State, uncommittedState state.CurrentRoundState, blockID types.BlockID, block *types.Block, commit *types.Commit) (state.State, error) { ret := _m.Called(ctx, _a1, uncommittedState, blockID, block, commit) + if len(ret) == 0 { + panic("no return value specified for FinalizeBlock") + } + var r0 state.State var r1 error if rf, ok := ret.Get(0).(func(context.Context, state.State, state.CurrentRoundState, types.BlockID, *types.Block, *types.Commit) (state.State, error)); ok { @@ -106,6 +118,10 @@ func (_m *Executor) FinalizeBlock(ctx context.Context, _a1 state.State, uncommit func (_m *Executor) ProcessProposal(ctx context.Context, block *types.Block, round int32, _a3 state.State, verify bool) (state.CurrentRoundState, error) { ret := _m.Called(ctx, block, round, _a3, verify) + if len(ret) == 0 { + panic("no return value specified for ProcessProposal") + } + var r0 state.CurrentRoundState var r1 error if rf, ok := ret.Get(0).(func(context.Context, *types.Block, int32, state.State, bool) (state.CurrentRoundState, error)); ok { @@ -130,6 +146,10 @@ func (_m *Executor) ProcessProposal(ctx context.Context, block *types.Block, rou func (_m *Executor) ValidateBlock(ctx context.Context, _a1 state.State, block *types.Block) error { ret := _m.Called(ctx, _a1, block) + if len(ret) == 0 { + panic("no return value specified for ValidateBlock") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, state.State, *types.Block) error); ok { r0 = rf(ctx, _a1, block) @@ -144,6 +164,10 @@ func (_m *Executor) ValidateBlock(ctx context.Context, _a1 state.State, block *t func (_m *Executor) ValidateBlockWithRoundState(ctx context.Context, _a1 state.State, uncommittedState state.CurrentRoundState, block *types.Block) error { ret := _m.Called(ctx, _a1, uncommittedState, block) + if len(ret) == 0 { + panic("no return value specified for ValidateBlockWithRoundState") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, state.State, state.CurrentRoundState, *types.Block) error); ok { r0 = rf(ctx, _a1, uncommittedState, block) @@ -158,6 +182,10 @@ func (_m *Executor) ValidateBlockWithRoundState(ctx context.Context, _a1 state.S func (_m *Executor) VerifyVoteExtension(ctx context.Context, vote *types.Vote) error { ret := _m.Called(ctx, vote) + if len(ret) == 0 { + panic("no return value specified for VerifyVoteExtension") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, *types.Vote) error); ok { r0 = rf(ctx, vote) diff --git a/internal/state/mocks/store.go b/internal/state/mocks/store.go index 8f8fc65857..4a7fa92190 100644 --- a/internal/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -20,6 +20,10 @@ type Store struct { func (_m *Store) Bootstrap(_a0 state.State) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Bootstrap") + } + var r0 error if rf, ok := ret.Get(0).(func(state.State) error); ok { r0 = rf(_a0) @@ -34,6 +38,10 @@ func (_m *Store) Bootstrap(_a0 state.State) error { func (_m *Store) Close() error { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Close") + } + var r0 error if rf, ok := ret.Get(0).(func() error); ok { r0 = rf() @@ -48,6 +56,10 @@ func (_m *Store) Close() error { func (_m *Store) Load() (state.State, error) { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Load") + } + var r0 state.State var r1 error if rf, ok := ret.Get(0).(func() (state.State, error)); ok { @@ -72,6 +84,10 @@ func (_m *Store) Load() (state.State, error) { func (_m *Store) LoadABCIResponses(_a0 int64) (*tendermintstate.ABCIResponses, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for LoadABCIResponses") + } + var r0 *tendermintstate.ABCIResponses var r1 error if rf, ok := ret.Get(0).(func(int64) (*tendermintstate.ABCIResponses, error)); ok { @@ -98,6 +114,10 @@ func (_m *Store) LoadABCIResponses(_a0 int64) (*tendermintstate.ABCIResponses, e func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for LoadConsensusParams") + } + var r0 types.ConsensusParams var r1 error if rf, ok := ret.Get(0).(func(int64) (types.ConsensusParams, error)); ok { @@ -122,6 +142,10 @@ func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { func (_m *Store) LoadValidators(_a0 int64) (*types.ValidatorSet, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for LoadValidators") + } + var r0 *types.ValidatorSet var r1 error if rf, ok := ret.Get(0).(func(int64) (*types.ValidatorSet, error)); ok { @@ -148,6 +172,10 @@ func (_m *Store) LoadValidators(_a0 int64) (*types.ValidatorSet, error) { func (_m *Store) PruneStates(_a0 int64) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for PruneStates") + } + var r0 error if rf, ok := ret.Get(0).(func(int64) error); ok { r0 = rf(_a0) @@ -162,6 +190,10 @@ func (_m *Store) PruneStates(_a0 int64) error { func (_m *Store) Save(_a0 state.State) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Save") + } + var r0 error if rf, ok := ret.Get(0).(func(state.State) error); ok { r0 = rf(_a0) @@ -176,6 +208,10 @@ func (_m *Store) Save(_a0 state.State) error { func (_m *Store) SaveABCIResponses(_a0 int64, _a1 tendermintstate.ABCIResponses) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for SaveABCIResponses") + } + var r0 error if rf, ok := ret.Get(0).(func(int64, tendermintstate.ABCIResponses) error); ok { r0 = rf(_a0, _a1) @@ -190,6 +226,10 @@ func (_m *Store) SaveABCIResponses(_a0 int64, _a1 tendermintstate.ABCIResponses) func (_m *Store) SaveValidatorSets(_a0 int64, _a1 int64, _a2 *types.ValidatorSet) error { ret := _m.Called(_a0, _a1, _a2) + if len(ret) == 0 { + panic("no return value specified for SaveValidatorSets") + } + var r0 error if rf, ok := ret.Get(0).(func(int64, int64, *types.ValidatorSet) error); ok { r0 = rf(_a0, _a1, _a2) diff --git a/internal/state/services.go b/internal/state/services.go index ad104641dc..836e5fa926 100644 --- a/internal/state/services.go +++ b/internal/state/services.go @@ -57,12 +57,12 @@ type EvidencePool interface { // to the consensus evidence pool interface type EmptyEvidencePool struct{} -func (EmptyEvidencePool) PendingEvidence(maxBytes int64) (ev []types.Evidence, size int64) { +func (EmptyEvidencePool) PendingEvidence(_maxBytes int64) (ev []types.Evidence, size int64) { return nil, 0 } func (EmptyEvidencePool) AddEvidence(context.Context, types.Evidence) error { return nil } func (EmptyEvidencePool) Update(context.Context, State, types.EvidenceList) {} -func (EmptyEvidencePool) CheckEvidence(ctx context.Context, evList types.EvidenceList) error { +func (EmptyEvidencePool) CheckEvidence(_ctx context.Context, _evList types.EvidenceList) error { return nil } -func (EmptyEvidencePool) ReportConflictingVotes(voteA, voteB *types.Vote) {} +func (EmptyEvidencePool) ReportConflictingVotes(_voteA, _voteB *types.Vote) {} diff --git a/internal/state/state_test.go b/internal/state/state_test.go index 5c2e05056d..8fd12710b5 100644 --- a/internal/state/state_test.go +++ b/internal/state/state_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" + "github.com/dashpay/tenderdash/abci/example/kvstore" abci "github.com/dashpay/tenderdash/abci/types" "github.com/dashpay/tenderdash/config" "github.com/dashpay/tenderdash/crypto" @@ -44,7 +45,7 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { err = stateStore.Save(state) require.NoError(t, err) - tearDown := func(t *testing.T) { _ = os.RemoveAll(cfg.RootDir) } + tearDown := func(_ *testing.T) { _ = os.RemoveAll(cfg.RootDir) } return tearDown, stateDB, state } @@ -332,6 +333,39 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } } +// TestEmptyValidatorUpdates tests that the validator set is updated correctly when there are no validator updates. +func TestEmptyValidatorUpdates(t *testing.T) { + tearDown, _, state := setupTestCase(t) + defer tearDown(t) + + firstNode := state.Validators.GetByIndex(0) + require.NotZero(t, firstNode.ProTxHash) + ctx := dash.ContextWithProTxHash(context.Background(), firstNode.ProTxHash) + + newPrivKey := bls12381.GenPrivKeyFromSecret([]byte("test")) + newPubKey := newPrivKey.PubKey() + newQuorumHash := crypto.RandQuorumHash() + + expectValidators := types.ValidatorListString(state.Validators.Validators) + + resp := abci.ResponseProcessProposal{ + ValidatorSetUpdate: &abci.ValidatorSetUpdate{ + ValidatorUpdates: nil, + ThresholdPublicKey: cryptoenc.MustPubKeyToProto(newPubKey), + QuorumHash: newQuorumHash, + }} + + changes, err := state.NewStateChangeset( + ctx, + sm.RoundParamsFromProcessProposal(&resp, nil, 0), + ) + require.NoError(t, err) + + assert.EqualValues(t, newQuorumHash, changes.NextValidators.QuorumHash, "quorum hash should be updated") + assert.EqualValues(t, newPubKey, changes.NextValidators.ThresholdPublicKey, "threshold public key should be updated") + assert.Equal(t, expectValidators, types.ValidatorListString(changes.NextValidators.Validators), "validator should not change") +} + //func TestProposerFrequency(t *testing.T) { // ctx, cancel := context.WithCancel(context.Background()) // defer cancel() @@ -1000,6 +1034,8 @@ func TestStateMakeBlock(t *testing.T) { proposerProTxHash := state.Validators.GetProposer().ProTxHash stateVersion := state.Version.Consensus + // temporary workaround; state.Version.Consensus is deprecated and will be removed + stateVersion.App = kvstore.ProtocolVersion var height int64 = 2 state.LastBlockHeight = height - 1 block, err := statefactory.MakeBlock(state, height, new(types.Commit), 0) @@ -1100,24 +1136,26 @@ func TestStateProto(t *testing.T) { for _, tt := range tc { tt := tt - pbs, err := tt.state.ToProto() - if !tt.expPass1 { - assert.Error(t, err) - } else { - assert.NoError(t, err, tt.testName) - } + t.Run(tt.testName, func(t *testing.T) { + pbs, err := tt.state.ToProto() + if !tt.expPass1 { + assert.Error(t, err) + } else { + assert.NoError(t, err, tt.testName) + } - smt, err := sm.FromProto(pbs) - if tt.expPass2 { - require.NoError(t, err, tt.testName) - require.Equal(t, tt.state, smt, tt.testName) - } else { - require.Error(t, err, tt.testName) - } + smt, err := sm.FromProto(pbs) + if tt.expPass2 { + require.NoError(t, err, tt.testName) + require.Equal(t, tt.state, smt, tt.testName) + } else { + require.Error(t, err, tt.testName) + } + }) } } -func blockExecutorFunc(ctx context.Context, t *testing.T) func(prevState, state sm.State, ucState sm.CurrentRoundState) sm.State { +func blockExecutorFunc(_ctx context.Context, t *testing.T) func(prevState, state sm.State, ucState sm.CurrentRoundState) sm.State { return func(prevState, state sm.State, ucState sm.CurrentRoundState) sm.State { t.Helper() diff --git a/internal/state/store_test.go b/internal/state/store_test.go index 153fa4bd7b..174f63ce9f 100644 --- a/internal/state/store_test.go +++ b/internal/state/store_test.go @@ -233,7 +233,6 @@ func TestPruneStates(t *testing.T) { {Data: []byte{3}}, }, }, - FinalizeBlock: &abci.ResponseFinalizeBlock{}, }) require.NoError(t, err) } diff --git a/internal/state/test/factory/block.go b/internal/state/test/factory/block.go index d6ff4f237a..49ad77261d 100644 --- a/internal/state/test/factory/block.go +++ b/internal/state/test/factory/block.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/dashpay/tenderdash/abci/example/kvstore" abci "github.com/dashpay/tenderdash/abci/types" "github.com/dashpay/tenderdash/crypto" sm "github.com/dashpay/tenderdash/internal/state" @@ -67,6 +68,10 @@ func MakeBlock(state sm.State, height int64, c *types.Commit, proposedAppVersion if block.ResultsHash, err = abci.TxResultsHash(factory.ExecTxResults(block.Txs)); err != nil { return nil, err } + // this should be set by PrepareProposal, but we don't always call PrepareProposal + if block.Version.App == 0 { + block.Version.App = kvstore.ProtocolVersion + } return block, nil } @@ -84,7 +89,11 @@ func makeBlockAndPartSet( t.Helper() quorumSigns := &types.CommitSigns{QuorumHash: state.LastValidators.QuorumHash} - lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, quorumSigns) + var ve types.VoteExtensions + if lastBlock != nil && lastBlock.LastCommit != nil { + ve = types.VoteExtensionsFromProto(lastBlock.LastCommit.ThresholdVoteExtensions...) + } + lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, ve, quorumSigns) if height > 1 { var err error votes := make([]*types.Vote, len(privVals)) @@ -106,6 +115,7 @@ func makeBlockAndPartSet( lastBlock.Header.Height, 0, lastBlockMeta.BlockID, + types.VoteExtensionsFromProto(lastBlock.LastCommit.ThresholdVoteExtensions...), &types.CommitSigns{ QuorumSigns: *thresholdSigns, QuorumHash: state.LastValidators.QuorumHash, diff --git a/internal/state/tx_filter_test.go b/internal/state/tx_filter_test.go index bec5162434..3456fed3b1 100644 --- a/internal/state/tx_filter_test.go +++ b/internal/state/tx_filter_test.go @@ -15,7 +15,7 @@ import ( func TestTxFilter(t *testing.T) { const maxBlockBytes = 3241 - maxTxSize := maxBlockBytes - 1131 + maxTxSize := maxBlockBytes - 1132 genDoc := factory.MinimalGenesisDoc() genDoc.ConsensusParams.Block.MaxBytes = maxBlockBytes genDoc.ConsensusParams.Evidence.MaxBytes = 1500 diff --git a/internal/state/validation.go b/internal/state/validation.go index e197b02d59..89914093c8 100644 --- a/internal/state/validation.go +++ b/internal/state/validation.go @@ -27,9 +27,9 @@ func validateBlock(state State, block *types.Block) error { } // Validate basic info. - if block.Version.App != state.Version.Consensus.App || - block.Version.Block != state.Version.Consensus.Block { - return fmt.Errorf("wrong Block.Header.Version. Expected %v, got %v", + // We don't validate app version because proposer can override it on a block-by-block basis. + if block.Version.Block != state.Version.Consensus.Block { + return fmt.Errorf("wrong Block.Header.Version.Block. Expected %v, got %v", state.Version.Consensus, block.Version, ) diff --git a/internal/state/validation_test.go b/internal/state/validation_test.go index 281ac4eb7e..4f92fdffa7 100644 --- a/internal/state/validation_test.go +++ b/internal/state/validation_test.go @@ -3,6 +3,7 @@ package state_test import ( "context" "errors" + "fmt" "strings" "testing" "time" @@ -78,7 +79,7 @@ func TestValidateBlockHeader(t *testing.T) { }) require.NoError(t, err) - lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) + lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil, nil) // some bad values wrongHash := crypto.Checksum([]byte("this hash is wrong")) @@ -93,7 +94,6 @@ func TestValidateBlockHeader(t *testing.T) { malleateBlock func(block *types.Block) }{ {"Version wrong1", func(block *types.Block) { block.Version = wrongVersion1 }}, - {"Version wrong2", func(block *types.Block) { block.Version = wrongVersion2 }}, {"ChainID wrong", func(block *types.Block) { block.ChainID = "not-the-real-one" }}, {"Height wrong", func(block *types.Block) { block.Height += 10 }}, {"Core Height does not match chain lock", func(block *types.Block) { @@ -126,11 +126,6 @@ func TestValidateBlockHeader(t *testing.T) { "Proposer invalid", func(block *types.Block) { block.ProposerProTxHash = []byte("wrong size") }, }, - // Set appVersion to 2 allow "invalid proposed app version" case - { - "Proposed app version is invalid", - func(block *types.Block) { block.ProposedAppVersion = 1; state.Version.Consensus.App = 2 }, - }, } // Set appVersion to 2 allow "invalid proposed app version" case @@ -142,16 +137,18 @@ func TestValidateBlockHeader(t *testing.T) { Invalid blocks don't pass */ for _, tc := range testCases { - block, err := statefactory.MakeBlock(state, height, lastCommit, 0) - require.NoError(t, err) - err = changes.UpdateBlock(block) - assert.NoError(t, err) + t.Run(fmt.Sprintf("H:%d/%s", height, tc.name), func(t *testing.T) { + block, err := statefactory.MakeBlock(state, height, lastCommit, 0) + require.NoError(t, err) + err = changes.UpdateBlock(block) + assert.NoError(t, err) - tc.malleateBlock(block) + tc.malleateBlock(block) - err = blockExec.ValidateBlockWithRoundState(ctx, state, changes, block) - t.Logf("%s: %v", tc.name, err) - require.Error(t, err, tc.name) + err = blockExec.ValidateBlockWithRoundState(ctx, state, changes, block) + t.Logf("%s: %v", tc.name, err) + require.Error(t, err, tc.name) + }) } /* @@ -206,8 +203,8 @@ func TestValidateBlockCommit(t *testing.T) { blockStore, eventBus, ) - lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) - wrongVoteMessageSignedCommit := types.NewCommit(1, 0, types.BlockID{}, nil) + lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil, nil) + wrongVoteMessageSignedCommit := types.NewCommit(1, 0, types.BlockID{}, nil, nil) badPrivValQuorumHash := crypto.RandQuorumHash() badPrivVal := types.NewMockPVForQuorum(badPrivValQuorumHash) @@ -235,6 +232,7 @@ func TestValidateBlockCommit(t *testing.T) { wrongHeightVote.Height, wrongHeightVote.Round, state.LastBlockID, + wrongHeightVote.VoteExtensions, &types.CommitSigns{ QuorumSigns: *thresholdSigns, QuorumHash: state.Validators.QuorumHash, @@ -328,8 +326,8 @@ func TestValidateBlockCommit(t *testing.T) { require.NoError(t, err, "height %d", height) goodVote.BlockSignature, badVote.BlockSignature = g.BlockSignature, b.BlockSignature - goodVote.VoteExtensions = types.VoteExtensionsFromProto(g.VoteExtensions) - badVote.VoteExtensions = types.VoteExtensionsFromProto(b.VoteExtensions) + goodVote.VoteExtensions = types.VoteExtensionsFromProto(g.VoteExtensions...) + badVote.VoteExtensions = types.VoteExtensionsFromProto(b.VoteExtensions...) thresholdSigns, err := types.NewSignsRecoverer([]*types.Vote{badVote}).Recover() require.NoError(t, err) @@ -338,7 +336,7 @@ func TestValidateBlockCommit(t *testing.T) { QuorumHash: state.Validators.QuorumHash, } wrongVoteMessageSignedCommit = types.NewCommit(goodVote.Height, goodVote.Round, - blockID, quorumSigns) + blockID, goodVote.VoteExtensions, quorumSigns) } } @@ -385,7 +383,7 @@ func TestValidateBlockEvidence(t *testing.T) { blockStore, eventBus, ) - lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) + lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil, nil) for height := int64(1); height < validationTestsStopHeight; height++ { proposerProTxHash := state.Validators.GetProposer().ProTxHash diff --git a/internal/statesync/block_queue.go b/internal/statesync/block_queue.go index 04ff84f6fd..bb6690fbb7 100644 --- a/internal/statesync/block_queue.go +++ b/internal/statesync/block_queue.go @@ -119,6 +119,7 @@ func (q *blockQueue) nextHeight() <-chan int64 { return ch } + // we check initialHeight instead of startHeight as also need to address the startTime which we don't have here if q.terminal == nil && q.fetchHeight >= q.initialHeight { // return and decrement the fetch height ch <- q.fetchHeight diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 5ded294ca1..6539285fef 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -229,7 +229,7 @@ func (p *BlockProvider) LightBlock(ctx context.Context, height int64) (*types.Li // attacks. This is a no op as there currently isn't a way to wire this up to // the evidence reactor (we should endeavor to do this in the future but for now // it's not critical for backwards verification) -func (p *BlockProvider) ReportEvidence(ctx context.Context, ev types.Evidence) error { +func (p *BlockProvider) ReportEvidence(_ctx context.Context, _ev types.Evidence) error { return nil } diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index b4301dffcb..73b6b7b372 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -23,6 +23,10 @@ type StateProvider struct { func (_m *StateProvider) AppHash(ctx context.Context, height uint64) (bytes.HexBytes, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for AppHash") + } + var r0 bytes.HexBytes var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (bytes.HexBytes, error)); ok { @@ -49,6 +53,10 @@ func (_m *StateProvider) AppHash(ctx context.Context, height uint64) (bytes.HexB func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 *types.Commit var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (*types.Commit, error)); ok { @@ -75,6 +83,10 @@ func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*types.Comm func (_m *StateProvider) State(ctx context.Context, height uint64) (state.State, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for State") + } + var r0 state.State var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint64) (state.State, error)); ok { diff --git a/internal/statesync/peer.go b/internal/statesync/peer.go index bc53201de1..589acecacc 100644 --- a/internal/statesync/peer.go +++ b/internal/statesync/peer.go @@ -136,7 +136,7 @@ func (p *PeerManager) Start(ctx context.Context) { } return nil }) - p.peerSubs.On(p2p.PeerStatusDown, func(ctx context.Context, update p2p.PeerUpdate) error { + p.peerSubs.On(p2p.PeerStatusDown, func(_ctx context.Context, update p2p.PeerUpdate) error { p.peerStore.Delete(update.NodeID) return nil }) diff --git a/internal/statesync/peer_test.go b/internal/statesync/peer_test.go index 23d6d0d7f8..61547e1d63 100644 --- a/internal/statesync/peer_test.go +++ b/internal/statesync/peer_test.go @@ -12,7 +12,7 @@ import ( "github.com/dashpay/tenderdash/types" ) -func TestPeerSubscriberBasic(t *testing.T) { +func TestPeerSubscriberBasic(_t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Millisecond) defer cancel() inCh := make(chan p2p.PeerUpdate) @@ -26,11 +26,11 @@ func TestPeerSubscriberBasic(t *testing.T) { } peerSub := NewPeerSubscriber(log.NewNopLogger(), p2pSub) outCh := make(chan struct{}) - peerSub.On(p2p.PeerStatusUp, func(ctx context.Context, update p2p.PeerUpdate) error { + peerSub.On(p2p.PeerStatusUp, func(_ctx context.Context, _update p2p.PeerUpdate) error { outCh <- struct{}{} return nil }) - peerSub.On(p2p.PeerStatusDown, func(ctx context.Context, update p2p.PeerUpdate) error { + peerSub.On(p2p.PeerStatusDown, func(_ctx context.Context, _update p2p.PeerUpdate) error { outCh <- struct{}{} return nil }) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index d1b64b329a..0440e047ba 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -48,18 +48,6 @@ const ( // recentSnapshots is the number of recent snapshots to send and receive per peer. recentSnapshots = 10 - // snapshotMsgSize is the maximum size of a snapshotResponseMessage - snapshotMsgSize = int(4e6) // ~4MB - - // chunkMsgSize is the maximum size of a chunkResponseMessage - chunkMsgSize = int(16e6) // ~16MB - - // lightBlockMsgSize is the maximum size of a lightBlockResponseMessage - lightBlockMsgSize = int(1e7) // ~1MB - - // paramMsgSize is the maximum size of a paramsResponseMessage - paramMsgSize = int(1e5) // ~100kb - // lightBlockResponseTimeout is how long the dispatcher waits for a peer to // return a light block lightBlockResponseTimeout = 10 * time.Second @@ -83,41 +71,7 @@ const ( ) func getChannelDescriptors() map[p2p.ChannelID]*p2p.ChannelDescriptor { - return map[p2p.ChannelID]*p2p.ChannelDescriptor{ - SnapshotChannel: { - ID: SnapshotChannel, - Priority: 6, - SendQueueCapacity: 10, - RecvMessageCapacity: snapshotMsgSize, - RecvBufferCapacity: 128, - Name: "snapshot", - }, - ChunkChannel: { - ID: ChunkChannel, - Priority: 3, - SendQueueCapacity: 4, - RecvMessageCapacity: chunkMsgSize, - RecvBufferCapacity: 128, - Name: "chunk", - }, - LightBlockChannel: { - ID: LightBlockChannel, - Priority: 5, - SendQueueCapacity: 10, - RecvMessageCapacity: lightBlockMsgSize, - RecvBufferCapacity: 128, - Name: "light-block", - }, - ParamsChannel: { - ID: ParamsChannel, - Priority: 2, - SendQueueCapacity: 10, - RecvMessageCapacity: paramMsgSize, - RecvBufferCapacity: 128, - Name: "params", - }, - } - + return p2p.StatesyncChannelDescriptors() } // Metricer defines an interface used for the rpc sync info query, please see statesync.metrics @@ -389,7 +343,7 @@ func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { return sm.State{}, fmt.Errorf("failed to bootstrap node with new state: %w", err) } - if err := r.blockStore.SaveSeenCommit(state.LastBlockHeight, commit); err != nil { + if err := r.blockStore.SaveSeenCommit(commit); err != nil { return sm.State{}, fmt.Errorf("failed to store last seen commit: %w", err) } @@ -535,38 +489,42 @@ func (r *Reactor) backfill( return } r.logger.Debug("fetching next block", "height", height, "peer", peer) - lb, err := func() (*types.LightBlock, error) { - subCtx, reqCancel := context.WithTimeout(ctxWithCancel, lightBlockResponseTimeout) - defer reqCancel() - // request the light block with a timeout - return r.dispatcher.LightBlock(subCtx, height, peer) - }() - if lb == nil { - r.logger.Info("backfill: peer didn't have block, fetching from another peer", "height", height) - queue.retry(height) - // As we are fetching blocks backwards, if this node doesn't have the block it likely doesn't - // have any prior ones, thus we remove it from the peer list. - continue - } - // once the peer has returned a value, add it back to the peer list to be used again - r.peers.Append(peer) - if errors.Is(err, context.Canceled) { - return - } + // request the light block with a timeout + subCtx, subCtxCancel := context.WithTimeout(ctxWithCancel, lightBlockResponseTimeout) + lb, err := r.dispatcher.LightBlock(subCtx, height, peer) + subCtxCancel() + if err != nil { queue.retry(height) if errors.Is(err, errNoConnectedPeers) { r.logger.Info("backfill: no connected peers to fetch light blocks from; sleeping...", "sleepTime", sleepTime) time.Sleep(sleepTime) - } else { + } else if errors.Is(err, context.DeadlineExceeded) { // we don't punish the peer as it might just have not responded in time - r.logger.Info("backfill: error with fetching light block", + // In future, we might want to consider a backoff strategy + r.logger.Debug("backfill: peer didn't respond on time", + "height", height, "peer", peer, "error", err) + r.peers.Append(peer) + } else { + r.logger.Info("backfill: error fetching light block", "height", height, "error", err) } continue } + if lb == nil { + r.logger.Info("backfill: peer didn't have block, fetching from another peer", "height", height, "peers_outstanding", r.peers.Len()) + queue.retry(height) + // As we are fetching blocks backwards, if this node doesn't have the block it likely doesn't + // have any prior ones, thus we remove it from the peer list. + continue + } + // once the peer has returned a value, add it back to the peer list to be used again + r.peers.Append(peer) + if errors.Is(err, context.Canceled) { + return + } // run a validate basic. This checks the validator set and commit // hashes line up @@ -580,6 +538,7 @@ func (r *Reactor) backfill( NodeID: peer, Err: fmt.Errorf("received invalid light block: %w", err), }); serr != nil { + r.logger.Error("backfill: failed to send block error", "error", serr) return } continue @@ -1033,7 +992,7 @@ func (r *Reactor) processPeerUp(ctx context.Context, peerUpdate p2p.PeerUpdate) r.peers.Append(peerUpdate.NodeID) } else { - r.logger.Error("could not use peer for statesync", "peer", peerUpdate.NodeID) + r.logger.Warn("could not use peer for statesync", "peer", peerUpdate.NodeID) } newProvider := NewBlockProvider(peerUpdate.NodeID, r.chainID, r.dispatcher) @@ -1055,7 +1014,7 @@ func (r *Reactor) processPeerUp(ctx context.Context, peerUpdate p2p.PeerUpdate) } } -func (r *Reactor) processPeerDown(ctx context.Context, peerUpdate p2p.PeerUpdate) { +func (r *Reactor) processPeerDown(_ctx context.Context, peerUpdate p2p.PeerUpdate) { r.peers.Remove(peerUpdate.NodeID) syncer := r.getSyncer() if syncer != nil { diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 17a6895c1a..8178f313f2 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -154,7 +154,7 @@ func setup( rts.privVal = types.NewMockPV() rts.dashcoreClient = dashcore.NewMockClient(chainID, llmqType, rts.privVal, false) - chCreator := func(ctx context.Context, desc *p2p.ChannelDescriptor) (p2p.Channel, error) { + chCreator := func(_ctx context.Context, desc *p2p.ChannelDescriptor) (p2p.Channel, error) { switch desc.ID { case SnapshotChannel: return rts.snapshotChannel, nil @@ -261,7 +261,7 @@ func TestReactor_Sync(t *testing.T) { appHash := []byte{1, 2, 3} - go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0, 0) go graduallyAddPeers(ctx, t, rts.peerUpdateCh, closeCh, 1*time.Second) go handleSnapshotRequests(ctx, t, rts.snapshotOutCh, rts.snapshotInCh, closeCh, []snapshot{ { @@ -549,7 +549,7 @@ func TestReactor_BlockProviders(t *testing.T) { defer close(closeCh) chain := buildLightBlockChain(ctx, t, 1, 10, time.Now(), rts.privVal) - go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0, 0) peers := rts.reactor.peers.All() require.Len(t, peers, 2) @@ -608,7 +608,7 @@ func TestReactor_StateProviderP2P(t *testing.T) { defer close(closeCh) chain := buildLightBlockChain(ctx, t, 1, 10, time.Now(), rts.privVal) - go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0, 0) go handleConsensusParamsRequest(ctx, t, rts.paramsOutCh, rts.paramsInCh, closeCh) rts.reactor.cfg.UseP2P = true @@ -689,7 +689,7 @@ func TestReactor_Backfill(t *testing.T) { for _, tc := range testCases { t.Run(fmt.Sprintf("failure rate: %d", tc.failureRate), func(t *testing.T) { - ctx, cancel := context.WithCancel(ctx) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute)) @@ -716,7 +716,7 @@ func TestReactor_Backfill(t *testing.T) { mock.AnythingOfType("int64"), mock.AnythingOfType("*types.ValidatorSet")). Maybe(). - Return(func(lh, uh int64, vals *types.ValidatorSet) error { + Return(func(lh, uh int64, _vals *types.ValidatorSet) error { require.Equal(t, trackingHeight, lh) require.Equal(t, lh, uh) require.GreaterOrEqual(t, lh, stopHeight) @@ -728,7 +728,7 @@ func TestReactor_Backfill(t *testing.T) { closeCh := make(chan struct{}) defer close(closeCh) - go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, tc.failureRate) + go handleLightBlockRequests(ctx, t, chain, rts.blockOutCh, rts.blockInCh, closeCh, tc.failureRate, uint64(stopHeight)) err := rts.reactor.backfill( ctx, @@ -775,6 +775,19 @@ func retryUntil(ctx context.Context, t *testing.T, fn func() bool, timeout time. } } +// handleLightBlockRequests will handle light block requests and respond with the appropriate light block +// based on the height of the request. It will also simulate failures based on the failure rate. +// The function will return when the context is done. +// # Arguments +// * `ctx` - the context +// * `t` - the testing.T instance +// * `chain` - the light block chain +// * `receiving` - the channel to receive requests +// * `sending` - the channel to send responses +// * `close` - the channel to close the function +// * `failureRate` - the rate of failure +// * `stopHeight` - minimum height for which to respond; below this height, the function will not respond to requests, +// causing timeouts. Use 0 to disable this mechanism. func handleLightBlockRequests( ctx context.Context, t *testing.T, @@ -782,7 +795,9 @@ func handleLightBlockRequests( receiving chan p2p.Envelope, sending chan p2p.Envelope, close chan struct{}, - failureRate int) { + failureRate int, + stopHeight uint64, +) { requests := 0 errorCount := 0 for { @@ -791,6 +806,12 @@ func handleLightBlockRequests( return case envelope := <-receiving: if msg, ok := envelope.Message.(*ssproto.LightBlockRequest); ok { + if msg.Height < stopHeight { + // this causes timeout; needed for backfill tests + // to ensure heights below stopHeight are not processed + // before all heights above stopHeight are processed + continue + } if requests%10 >= failureRate { lb, err := chain[int64(msg.Height)].ToProto() require.NoError(t, err) @@ -901,7 +922,7 @@ func mockLB(ctx context.Context, t *testing.T, height int64, time time.Time, las StateID: stateID.Hash(), } voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, currentVals) - commit, err := factory.MakeCommit(ctx, lastBlockID, height, 0, voteSet, currentVals, currentPrivVals, stateID) + commit, err := factory.MakeCommit(ctx, lastBlockID, height, 0, voteSet, currentVals, currentPrivVals) require.NoError(t, err) return nextVals, nextPrivVals, &types.LightBlock{ SignedHeader: &types.SignedHeader{ diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index 2d98e82c51..f30688c735 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -246,9 +246,6 @@ func (s *stateProviderP2P) AppHash(ctx context.Context, height uint64) (tmbytes. if err != nil { return nil, err } - if err != nil { - return nil, err - } return header.AppHash, nil } diff --git a/internal/store/store.go b/internal/store/store.go index 04d3d8ccdc..02faa0e519 100644 --- a/internal/store/store.go +++ b/internal/store/store.go @@ -545,7 +545,7 @@ func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part, b } // SaveSeenCommit saves a seen commit, used by e.g. the state sync reactor when bootstrapping node. -func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) error { +func (bs *BlockStore) SaveSeenCommit(seenCommit *types.Commit) error { pbc := seenCommit.ToProto() seenCommitBytes, err := proto.Marshal(pbc) if err != nil { diff --git a/internal/store/store_test.go b/internal/store/store_test.go index 1b03e7ef25..f7dbbaa844 100644 --- a/internal/store/store_test.go +++ b/internal/store/store_test.go @@ -7,7 +7,6 @@ import ( "runtime/debug" "strings" "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -25,8 +24,8 @@ import ( "github.com/dashpay/tenderdash/version" ) -// make a Commit with a single vote containing just the height and a timestamp -func makeTestCommit(state sm.State, height int64, timestamp time.Time) *types.Commit { +// make a Commit with a single vote containing just the height +func makeTestCommit(state sm.State, height int64) *types.Commit { blockID := types.BlockID{ Hash: []byte(""), PartSetHeader: types.PartSetHeader{ @@ -53,7 +52,7 @@ func makeTestCommit(state sm.State, height int64, timestamp time.Time) *types.Co _ = privVal.SignVote(context.Background(), "chainID", state.Validators.QuorumType, state.Validators.QuorumHash, g, nil) goodVote.BlockSignature = g.BlockSignature - goodVote.VoteExtensions = types.VoteExtensionsFromProto(g.VoteExtensions) + goodVote.VoteExtensions = types.VoteExtensionsFromProto(g.VoteExtensions...) thresholdSigns, _ := types.NewSignsRecoverer([]*types.Vote{goodVote}).Recover() return types.NewCommit(height, 0, @@ -62,6 +61,7 @@ func makeTestCommit(state sm.State, height int64, timestamp time.Time) *types.Co PartSetHeader: types.PartSetHeader{Hash: []byte(""), Total: 2}, StateID: []byte{}, }, + goodVote.VoteExtensions, &types.CommitSigns{ QuorumSigns: *thresholdSigns, QuorumHash: crypto.RandQuorumHash(), @@ -105,7 +105,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { require.NoError(t, err) part2 := validPartSet.GetPart(1) - seenCommit := makeTestCommit(state, block.Header.Height, tmtime.Now()) + seenCommit := makeTestCommit(state, block.Header.Height) bs.SaveBlock(block, validPartSet, seenCommit) require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed") require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed") @@ -125,7 +125,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } // End of setup, test data - commitAtH10 := makeTestCommit(state, 10, tmtime.Now()) + commitAtH10 := makeTestCommit(state, 10) tuples := []struct { block *types.Block parts *types.PartSet @@ -159,17 +159,17 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { Time: tmtime.Now(), ValidatorsHash: tmrand.Bytes(crypto.DefaultHashSize), ProposerProTxHash: tmrand.Bytes(crypto.DefaultHashSize)}, - makeTestCommit(state, 5, tmtime.Now()), + makeTestCommit(state, 5), ), parts: validPartSet, - seenCommit: makeTestCommit(state, 5, tmtime.Now()), + seenCommit: makeTestCommit(state, 5), }, { block: newBlock(header1, commitAtH10), parts: incompletePartSet, wantPanic: "only save complete block", // incomplete parts - seenCommit: makeTestCommit(state, 10, tmtime.Now()), + seenCommit: makeTestCommit(state, 10), }, { @@ -315,7 +315,7 @@ func TestLoadBaseMeta(t *testing.T) { require.NoError(t, err) partSet, err := block.MakePartSet(2) require.NoError(t, err) - seenCommit := makeTestCommit(state, h, tmtime.Now()) + seenCommit := makeTestCommit(state, h) bs.SaveBlock(block, partSet, seenCommit) } @@ -396,7 +396,7 @@ func TestPruneBlocks(t *testing.T) { require.NoError(t, err) partSet, err := block.MakePartSet(2) require.NoError(t, err) - seenCommit := makeTestCommit(state, h, tmtime.Now()) + seenCommit := makeTestCommit(state, h) bs.SaveBlock(block, partSet, seenCommit) } @@ -503,7 +503,7 @@ func TestBlockFetchAtHeight(t *testing.T) { partSet, err := block.MakePartSet(2) require.NoError(t, err) - seenCommit := makeTestCommit(state, block.Header.Height, tmtime.Now()) + seenCommit := makeTestCommit(state, block.Header.Height) bs.SaveBlock(block, partSet, seenCommit) require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") @@ -542,12 +542,12 @@ func TestSeenAndCanonicalCommit(t *testing.T) { // are persisted. for h := int64(3); h <= 5; h++ { state.LastBlockHeight = h - 1 - blockCommit := makeTestCommit(state, h-1, tmtime.Now()) + blockCommit := makeTestCommit(state, h-1) block, err := factory.MakeBlock(state, h, blockCommit, 0) require.NoError(t, err) partSet, err := block.MakePartSet(2) require.NoError(t, err) - seenCommit := makeTestCommit(state, h, tmtime.Now()) + seenCommit := makeTestCommit(state, h) store.SaveBlock(block, partSet, seenCommit) c3 := store.LoadSeenCommit() require.NotNil(t, c3) diff --git a/internal/test/factory/commit.go b/internal/test/factory/commit.go index 80c52ea7e8..d93898552d 100644 --- a/internal/test/factory/commit.go +++ b/internal/test/factory/commit.go @@ -15,7 +15,6 @@ func MakeCommit( voteSet *types.VoteSet, validatorSet *types.ValidatorSet, validators []types.PrivValidator, - stateID tmproto.StateID, ) (*types.Commit, error) { // all sign for i := 0; i < len(validators); i++ { @@ -38,10 +37,12 @@ func MakeCommit( return nil, err } vote.BlockSignature = v.BlockSignature - err = vote.VoteExtensions.CopySignsFromProto(v.VoteExtensionsToMap()) + + err = vote.VoteExtensions.CopySignsFromProto(v.VoteExtensions) if err != nil { return nil, err } + if _, err := voteSet.AddVote(vote); err != nil { return nil, err } diff --git a/internal/test/factory/params.go b/internal/test/factory/params.go index dcbe137a2e..b4d73727c4 100644 --- a/internal/test/factory/params.go +++ b/internal/test/factory/params.go @@ -11,12 +11,10 @@ import ( func ConsensusParams(opts ...func(*types.ConsensusParams)) *types.ConsensusParams { c := types.DefaultConsensusParams() c.Timeout = types.TimeoutParams{ - Commit: 10 * time.Millisecond, - Propose: 40 * time.Millisecond, - ProposeDelta: 1 * time.Millisecond, - Vote: 10 * time.Millisecond, - VoteDelta: 1 * time.Millisecond, - BypassCommitTimeout: true, + Propose: 40 * time.Millisecond, + ProposeDelta: 1 * time.Millisecond, + Vote: 10 * time.Millisecond, + VoteDelta: 1 * time.Millisecond, } for _, opt := range opts { opt(c) diff --git a/internal/test/factory/vote.go b/internal/test/factory/vote.go index 40c0a33c40..7a5110858f 100644 --- a/internal/test/factory/vote.go +++ b/internal/test/factory/vote.go @@ -39,8 +39,10 @@ func MakeVote( } v.BlockSignature = vpb.BlockSignature - err = v.VoteExtensions.CopySignsFromProto(vpb.VoteExtensionsToMap()) + + err = v.VoteExtensions.CopySignsFromProto(vpb.VoteExtensions) if err != nil { + return nil, err } return v, nil diff --git a/libs/bits/bit_array.go b/libs/bits/bit_array.go index 5083a36d59..41ef071769 100644 --- a/libs/bits/bit_array.go +++ b/libs/bits/bit_array.go @@ -269,7 +269,7 @@ func (bA *BitArray) PickRandom() (int, bool) { // rand.New(rand.NewSeed(time.Now().Unix())).Intn() to // counteract this possibility if it proved to be material. // - // nolint:gosec // G404: Use of weak random number generator + //nolint:gosec // G404: Use of weak random number generator return trueIndices[rand.Intn(len(trueIndices))], true } diff --git a/libs/bits/bit_array_test.go b/libs/bits/bit_array_test.go index fb7af5dd16..e5f8982984 100644 --- a/libs/bits/bit_array_test.go +++ b/libs/bits/bit_array_test.go @@ -189,7 +189,7 @@ func TestEmptyFull(t *testing.T) { } } -func TestUpdateNeverPanics(t *testing.T) { +func TestUpdateNeverPanics(_t *testing.T) { newRandBitArray := func(n int) *BitArray { return randBitArray(n) } pairs := []struct { a, b *BitArray @@ -208,7 +208,7 @@ func TestUpdateNeverPanics(t *testing.T) { } } -func TestNewBitArrayNeverCrashesOnNegatives(t *testing.T) { +func TestNewBitArrayNeverCrashesOnNegatives(_t *testing.T) { bitList := []int{-127, -128, -1 << 31} for _, bits := range bitList { _ = NewBitArray(bits) diff --git a/libs/cli/setup.go b/libs/cli/setup.go index 54ea90358e..63734dd4c0 100644 --- a/libs/cli/setup.go +++ b/libs/cli/setup.go @@ -68,7 +68,7 @@ func concatCobraCmdFuncs(fs ...cobraCmdFunc) cobraCmdFunc { } // Bind all flags and read the config into viper -func BindFlagsLoadViper(cmd *cobra.Command, args []string) error { +func BindFlagsLoadViper(cmd *cobra.Command, _args []string) error { // cmd.Flags() includes flags from this command and all persistent flags from the parent if err := viper.BindPFlags(cmd.Flags()); err != nil { return err diff --git a/libs/ds/ordered_map.go b/libs/ds/ordered_map.go index e7be78ebf0..fa1297c6c0 100644 --- a/libs/ds/ordered_map.go +++ b/libs/ds/ordered_map.go @@ -68,12 +68,15 @@ func (m *OrderedMap[T, V]) Delete(key T) { if !ok { return } - i++ - for ; i < len(m.values); i++ { - m.values[i-1] = m.values[i] - } delete(m.keys, key) + + m.values = append(m.values[:i], m.values[i+1:]...) m.len-- + for k, v := range m.keys { + if v > i { + m.keys[k] = v - 1 + } + } } // Values returns all values in the map diff --git a/libs/ds/ordered_map_test.go b/libs/ds/ordered_map_test.go index 3ed15647a5..a1c011e8f6 100644 --- a/libs/ds/ordered_map_test.go +++ b/libs/ds/ordered_map_test.go @@ -5,6 +5,7 @@ import ( "sync" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -59,3 +60,25 @@ func TestOrderedMapMultithread(t *testing.T) { wg.Wait() } + +func TestOrderedMapDelete(t *testing.T) { + m := NewOrderedMap[int, int]() + m.Put(1, 1) + m.Put(2, 2) + m.Put(3, 3) + m.Delete(2) + keys := m.Keys() + if len(keys) != 2 { + t.Errorf("Expected 2 keys, got %d", len(keys)) + } + if keys[0] != 1 && keys[1] != 3 { + t.Errorf("Expected keys [1, 3], got %v", keys) + } + v1, ok := m.Get(1) + assert.Equal(t, v1, 1) + assert.True(t, ok) + + v3, ok := m.Get(3) + assert.Equal(t, v3, 3) + assert.True(t, ok) +} diff --git a/libs/log/default.go b/libs/log/default.go index 6a57b35502..ade3d0d5fd 100644 --- a/libs/log/default.go +++ b/libs/log/default.go @@ -88,6 +88,10 @@ func (l defaultLogger) Info(msg string, keyVals ...interface{}) { l.Logger.Info().Fields(getLogFields(keyVals...)).Msg(msg) } +func (l defaultLogger) Warn(msg string, keyVals ...interface{}) { + l.Logger.Warn().Fields(getLogFields(keyVals...)).Msg(msg) +} + func (l defaultLogger) Error(msg string, keyVals ...interface{}) { l.Logger.Error().Fields(getLogFields(keyVals...)).Msg(msg) } diff --git a/libs/log/logger.go b/libs/log/logger.go index ccad9e0742..3caf0991c1 100644 --- a/libs/log/logger.go +++ b/libs/log/logger.go @@ -34,9 +34,17 @@ const ( type Logger interface { io.Closer + // Trace events are logged from the primary path of execution (eg. when everything goes well) Trace(msg string, keyVals ...interface{}) + // Debug events are used in non-primary path to provide fine-grained information useful for debugging Debug(msg string, keyVals ...interface{}) + // Info events provide general, business-level information about what's happening inside the application, to + // let the user know what the application is doing Info(msg string, keyVals ...interface{}) + // Warn events are used when something unexpected happened, but the application can recover/continue + Warn(msg string, keyVals ...interface{}) + // Error events are used when something unexpected happened and the application cannot recover, or the + // issue is serious and the user needs to take action Error(msg string, keyVals ...interface{}) With(keyVals ...interface{}) Logger diff --git a/libs/log/testing.go b/libs/log/testing.go index e356163255..f25e3c5815 100644 --- a/libs/log/testing.go +++ b/libs/log/testing.go @@ -70,6 +70,20 @@ type TestingLogger struct { assertions []assertion } +// WithTimestamp returns a new TestingLogger with timestamp enabled. +func (tw *TestingLogger) WithTimestamp() *TestingLogger { + l := TestingLogger{ + t: tw.t, + assertions: tw.assertions, + defaultLogger: defaultLogger{ + Logger: tw.defaultLogger.Logger.With().Timestamp().Logger(), + closeFuncs: tw.defaultLogger.closeFuncs, + }, + } + + return &l +} + type assertion struct { match regexp.Regexp passed bool @@ -104,6 +118,16 @@ func (tw *TestingLogger) AssertMatch(re *regexp.Regexp) { tw.Logger = tw.Logger.Level(zerolog.DebugLevel) } +// AssertContains defines assertions to check for each subsequent +// log item. It must be called before the log is generated. +// Assertion will pass if at least one log contains `s`. +// +// Note that assertions are only executed on logs matching defined log level. +// Use NewTestingLoggerWithLevel(t, zerolog.LevelDebugValue) to control this. +func (tw *TestingLogger) AssertContains(s string) { + tw.AssertMatch(regexp.MustCompile(regexp.QuoteMeta(s))) +} + // Run implements zerolog.Hook. // Execute all log assertions against a message. func (tw *TestingLogger) checkAssertions(msg string) { diff --git a/libs/os/os.go b/libs/os/os.go index 3d74c22082..fda66fc9ab 100644 --- a/libs/os/os.go +++ b/libs/os/os.go @@ -48,3 +48,8 @@ func CopyFile(src, dst string) error { _, err = io.Copy(dstfile, srcfile) return err } + +// NoopCloser is a no-op io.Closer. +type NoopCloser struct{} + +func (NoopCloser) Close() error { return nil } diff --git a/libs/rand/random.go b/libs/rand/random.go index 6b486a7fdf..032be40f74 100644 --- a/libs/rand/random.go +++ b/libs/rand/random.go @@ -44,7 +44,7 @@ func buildString(length int, picker func() int64) string { func Bytes(n int) []byte { bs := make([]byte, n) for i := 0; i < len(bs); i++ { - // nolint:gosec // G404: Use of weak random number generator + //nolint:gosec // G404: Use of weak random number generator bs[i] = byte(mrand.Int() & 0xFF) } return bs diff --git a/libs/store/mocks/store.go b/libs/store/mocks/store.go index aa0d3779b8..f8d1989f64 100644 --- a/libs/store/mocks/store.go +++ b/libs/store/mocks/store.go @@ -16,6 +16,10 @@ type Store[K comparable, V interface{}] struct { func (_m *Store[K, V]) All() []V { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for All") + } + var r0 []V if rf, ok := ret.Get(0).(func() []V); ok { r0 = rf() @@ -37,6 +41,10 @@ func (_m *Store[K, V]) Delete(key K) { func (_m *Store[K, V]) Get(key K) (V, bool) { ret := _m.Called(key) + if len(ret) == 0 { + panic("no return value specified for Get") + } + var r0 V var r1 bool if rf, ok := ret.Get(0).(func(K) (V, bool)); ok { @@ -61,6 +69,10 @@ func (_m *Store[K, V]) Get(key K) (V, bool) { func (_m *Store[K, V]) GetAndDelete(key K) (V, bool) { ret := _m.Called(key) + if len(ret) == 0 { + panic("no return value specified for GetAndDelete") + } + var r0 V var r1 bool if rf, ok := ret.Get(0).(func(K) (V, bool)); ok { @@ -85,6 +97,10 @@ func (_m *Store[K, V]) GetAndDelete(key K) (V, bool) { func (_m *Store[K, V]) IsZero() bool { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for IsZero") + } + var r0 bool if rf, ok := ret.Get(0).(func() bool); ok { r0 = rf() @@ -99,6 +115,10 @@ func (_m *Store[K, V]) IsZero() bool { func (_m *Store[K, V]) Len() int { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Len") + } + var r0 int if rf, ok := ret.Get(0).(func() int); ok { r0 = rf() @@ -118,6 +138,10 @@ func (_m *Store[K, V]) Put(key K, data V) { func (_m *Store[K, V]) Query(spec store.QueryFunc[K, V], limit int) []V { ret := _m.Called(spec, limit) + if len(ret) == 0 { + panic("no return value specified for Query") + } + var r0 []V if rf, ok := ret.Get(0).(func(store.QueryFunc[K, V], int) []V); ok { r0 = rf(spec, limit) diff --git a/light/client.go b/light/client.go index d31157f51e..e2e10487ec 100644 --- a/light/client.go +++ b/light/client.go @@ -467,7 +467,7 @@ func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now return c.verifyLightBlock(ctx, l, now) } -func (c *Client) verifyLightBlock(ctx context.Context, newLightBlock *types.LightBlock, now time.Time) error { +func (c *Client) verifyLightBlock(ctx context.Context, newLightBlock *types.LightBlock, _now time.Time) error { c.logger.Info("verify light block", "height", newLightBlock.Height, "hash", newLightBlock.Hash()) if err := newLightBlock.ValidateBasic(c.ChainID()); err != nil { @@ -514,7 +514,7 @@ func (c *Client) verifyBlockWithDashCore(ctx context.Context, newLightBlock *typ return nil } -func (c *Client) verifyBlockSignatureWithDashCore(ctx context.Context, newLightBlock *types.LightBlock) error { +func (c *Client) verifyBlockSignatureWithDashCore(_ctx context.Context, newLightBlock *types.LightBlock) error { quorumHash := newLightBlock.ValidatorSet.QuorumHash quorumType := newLightBlock.ValidatorSet.QuorumType @@ -878,7 +878,7 @@ func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.S return c.removeWitnesses(witnessesToRemove) } -func (c *Client) Status(ctx context.Context) *types.LightClientInfo { +func (c *Client) Status(_ctx context.Context) *types.LightClientInfo { chunks := make([]string, len(c.witnesses)) // If primary is in witness list we do not want to count it twice in the number of peers diff --git a/light/client_benchmark_test.go b/light/client_benchmark_test.go index d4fbf86fc5..09bf34ac06 100644 --- a/light/client_benchmark_test.go +++ b/light/client_benchmark_test.go @@ -45,7 +45,7 @@ func newProviderBenchmarkImpl(headers map[int64]*types.SignedHeader, return &impl } -func (impl *providerBenchmarkImpl) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { +func (impl *providerBenchmarkImpl) LightBlock(_ctx context.Context, height int64) (*types.LightBlock, error) { if height == 0 { return impl.blocks[impl.currentHeight], nil } diff --git a/light/helpers_test.go b/light/helpers_test.go index 4b68cfc953..9ff02e0ca0 100644 --- a/light/helpers_test.go +++ b/light/helpers_test.go @@ -87,7 +87,7 @@ func (pkz privKeys) signHeader(t testing.TB, header *types.Header, valSet *types QuorumSigns: *thresholdSigns, QuorumHash: valSet.QuorumHash, } - return types.NewCommit(header.Height, 1, blockID, quorumSigns) + return types.NewCommit(header.Height, 1, blockID, votes[0].VoteExtensions, quorumSigns) } func makeVote(t testing.TB, header *types.Header, valset *types.ValidatorSet, proTxHash crypto.ProTxHash, diff --git a/light/provider/mocks/provider.go b/light/provider/mocks/provider.go index 93042ab3dc..24cd675bf9 100644 --- a/light/provider/mocks/provider.go +++ b/light/provider/mocks/provider.go @@ -19,6 +19,10 @@ type Provider struct { func (_m *Provider) ID() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ID") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -33,6 +37,10 @@ func (_m *Provider) ID() string { func (_m *Provider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for LightBlock") + } + var r0 *types.LightBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64) (*types.LightBlock, error)); ok { @@ -59,6 +67,10 @@ func (_m *Provider) LightBlock(ctx context.Context, height int64) (*types.LightB func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 types.Evidence) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for ReportEvidence") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) error); ok { r0 = rf(_a0, _a1) diff --git a/light/proxy/routes.go b/light/proxy/routes.go index e00091dc70..3bc23f1251 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -15,7 +15,7 @@ type proxyService struct { Client *lrpc.Client } -func (p proxyService) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { panic("ok") } +func (p proxyService) ABCIInfo(_ctx context.Context) (*coretypes.ResultABCIInfo, error) { panic("ok") } func (p proxyService) ABCIQuery(ctx context.Context, req *coretypes.RequestABCIQuery) (*coretypes.ResultABCIQuery, error) { return p.Client.ABCIQueryWithOptions(ctx, req.Path, req.Data, rpcclient.ABCIQueryOptions{ diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index deb22f7d2a..8d53cd4f91 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -21,6 +21,10 @@ type LightClient struct { func (_m *LightClient) ChainID() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for ChainID") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -35,6 +39,10 @@ func (_m *LightClient) ChainID() string { func (_m *LightClient) Status(ctx context.Context) *types.LightClientInfo { ret := _m.Called(ctx) + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 *types.LightClientInfo if rf, ok := ret.Get(0).(func(context.Context) *types.LightClientInfo); ok { r0 = rf(ctx) @@ -51,6 +59,10 @@ func (_m *LightClient) Status(ctx context.Context) *types.LightClientInfo { func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error) { ret := _m.Called(height) + if len(ret) == 0 { + panic("no return value specified for TrustedLightBlock") + } + var r0 *types.LightBlock var r1 error if rf, ok := ret.Get(0).(func(int64) (*types.LightBlock, error)); ok { @@ -77,6 +89,10 @@ func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error func (_m *LightClient) Update(ctx context.Context, now time.Time) (*types.LightBlock, error) { ret := _m.Called(ctx, now) + if len(ret) == 0 { + panic("no return value specified for Update") + } + var r0 *types.LightBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, time.Time) (*types.LightBlock, error)); ok { @@ -103,6 +119,10 @@ func (_m *LightClient) Update(ctx context.Context, now time.Time) (*types.LightB func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*types.LightBlock, error) { ret := _m.Called(ctx, height, now) + if len(ret) == 0 { + panic("no return value specified for VerifyLightBlockAtHeight") + } + var r0 *types.LightBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time) (*types.LightBlock, error)); ok { diff --git a/light/store/db/db_test.go b/light/store/db/db_test.go index a0742c5af6..16be663975 100644 --- a/light/store/db/db_test.go +++ b/light/store/db/db_test.go @@ -193,7 +193,7 @@ func Test_Concurrency(t *testing.T) { wg.Wait() } -func randLightBlock(ctx context.Context, t *testing.T, height int64) *types.LightBlock { +func randLightBlock(_ctx context.Context, t *testing.T, height int64) *types.LightBlock { t.Helper() vals, _ := types.RandValidatorSet(2) return &types.LightBlock{ diff --git a/node/node.go b/node/node.go index b46ec76b08..67292f777e 100644 --- a/node/node.go +++ b/node/node.go @@ -101,7 +101,8 @@ func newDefaultNode( ) } - appClient, _, err := proxy.ClientFactory(logger, cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + logger.Debug("Loaded ABCI config", "config", cfg.Abci) + appClient, _, err := proxy.ClientFactory(logger, *cfg.Abci, cfg.DBDir()) if err != nil { return nil, err } @@ -305,7 +306,9 @@ func makeNode( p2p.ChannelDescriptors(cfg), node.router.OpenChannel, p2pclient.WithLogger(logger), + p2pclient.WithSendRateLimits(p2pclient.NewRateLimit(ctx, cfg.Mempool.TxSendRateLimit, false, logger), p2p.MempoolChannel), ) + evReactor, evPool, edbCloser, err := createEvidenceReactor(logger, cfg, dbProvider, stateStore, blockStore, peerManager.Subscribe, node.router.OpenChannel, nodeMetrics.evidence, eventBus) closers = append(closers, edbCloser) diff --git a/node/node_test.go b/node/node_test.go index 4376202e41..2d085c33f3 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -378,7 +378,7 @@ func TestCreateProposalBlock(t *testing.T) { ) proposedAppVersion := uint64(1) - commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + commit := types.NewCommit(height-1, 0, types.BlockID{}, nil, nil) block, _, err := blockExec.CreateProposalBlock( ctx, height, @@ -466,7 +466,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { eventBus, ) - commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + commit := types.NewCommit(height-1, 0, types.BlockID{}, nil, nil) block, _, err := blockExec.CreateProposalBlock( ctx, height, @@ -505,6 +505,7 @@ func TestMaxProposalBlockSize(t *testing.T) { app, err := kvstore.NewMemoryApp( kvstore.WithLogger(logger.With("module", "kvstore")), kvstore.WithState(math.MaxInt64-1, nil), + kvstore.WithAppVersion(math.MaxUint64), ) require.NoError(t, err) @@ -614,7 +615,7 @@ func TestMaxProposalBlockSize(t *testing.T) { require.Equal(t, types.MaxHeaderBytes, int64(pb.Header.Size())) require.Equal(t, types.MaxCommitOverheadBytes, int64(pb.LastCommit.Size())) // make sure that the block is less than the max possible size - assert.Equal(t, int64(1115+cfg.Mempool.MaxTxBytes), int64(pb.Size())) + assert.Equal(t, int64(1116+cfg.Mempool.MaxTxBytes), int64(pb.Size())) // because of the proto overhead we expect the part set bytes to be equal or // less than the pb block size assert.LessOrEqual(t, partSet.ByteSize(), int64(pb.Size())) @@ -780,7 +781,7 @@ func TestLoadStateFromGenesis(t *testing.T) { _ = loadStatefromGenesis(ctx, t) } -func loadStatefromGenesis(ctx context.Context, t *testing.T) sm.State { +func loadStatefromGenesis(_ctx context.Context, t *testing.T) sm.State { t.Helper() stateDB := dbm.NewMemDB() diff --git a/node/setup.go b/node/setup.go index f78d8f4f47..8d3dbf2bef 100644 --- a/node/setup.go +++ b/node/setup.go @@ -20,6 +20,8 @@ import ( "github.com/dashpay/tenderdash/internal/eventbus" "github.com/dashpay/tenderdash/internal/evidence" tmstrings "github.com/dashpay/tenderdash/internal/libs/strings" + tmsync "github.com/dashpay/tenderdash/internal/libs/sync" + "github.com/dashpay/tenderdash/internal/mempool" "github.com/dashpay/tenderdash/internal/p2p" "github.com/dashpay/tenderdash/internal/p2p/client" @@ -363,20 +365,20 @@ func makeNodeInfo( NodeID: nodeKey.ID, Network: genDoc.ChainID, Version: version.TMCoreSemVer, - Channels: []byte{ - byte(p2p.BlockSyncChannel), - byte(consensus.StateChannel), - byte(consensus.DataChannel), - byte(consensus.VoteChannel), - byte(consensus.VoteSetBitsChannel), - byte(p2p.MempoolChannel), - byte(evidence.EvidenceChannel), - byte(statesync.SnapshotChannel), - byte(statesync.ChunkChannel), - byte(statesync.LightBlockChannel), - byte(statesync.ParamsChannel), - byte(pex.PexChannel), - }, + Channels: tmsync.NewConcurrentSlice[uint16]( + uint16(p2p.BlockSyncChannel), + uint16(p2p.ConsensusStateChannel), + uint16(p2p.ConsensusDataChannel), + uint16(p2p.ConsensusVoteChannel), + uint16(p2p.VoteSetBitsChannel), + uint16(p2p.MempoolChannel), + uint16(evidence.EvidenceChannel), + uint16(statesync.SnapshotChannel), + uint16(statesync.ChunkChannel), + uint16(statesync.LightBlockChannel), + uint16(statesync.ParamsChannel), + uint16(pex.PexChannel), + ), Moniker: cfg.Moniker, Other: types.NodeInfoOther{ TxIndex: txIndexerStatus, @@ -405,13 +407,11 @@ func makeSeedNodeInfo( Block: state.Version.Consensus.Block, App: state.Version.Consensus.App, }, - NodeID: nodeKey.ID, - Network: genDoc.ChainID, - Version: version.TMCoreSemVer, - Channels: []byte{ - pex.PexChannel, - }, - Moniker: cfg.Moniker, + NodeID: nodeKey.ID, + Network: genDoc.ChainID, + Version: version.TMCoreSemVer, + Channels: tmsync.NewConcurrentSlice[uint16](pex.PexChannel), + Moniker: cfg.Moniker, Other: types.NodeInfoOther{ TxIndex: "off", RPCAddress: cfg.RPC.ListenAddress, diff --git a/privval/dash_consensus_key.go b/privval/dash_consensus_key.go index 5eb6a909d3..60a976c8bb 100644 --- a/privval/dash_consensus_key.go +++ b/privval/dash_consensus_key.go @@ -9,7 +9,7 @@ import ( "github.com/dashpay/dashd-go/btcjson" tmcrypto "github.com/dashpay/tenderdash/crypto" - tmbytes "github.com/dashpay/tenderdash/libs/bytes" + "github.com/dashpay/tenderdash/types" ) type dashConsensusPrivateKey struct { @@ -105,12 +105,7 @@ func (pub DashConsensusPublicKey) VerifySignature(msg []byte, sig []byte) bool { return pub.VerifySignatureDigest(hash, sig) } func (pub DashConsensusPublicKey) VerifySignatureDigest(hash []byte, sig []byte) bool { - signID := tmcrypto.SignID( - pub.quorumType, - tmbytes.Reverse(pub.quorumHash), - tmbytes.Reverse(hash[:]), - tmbytes.Reverse(hash[:]), - ) + signID := types.NewSignItemFromHash(pub.quorumType, pub.quorumHash, hash, hash).SignHash return pub.PubKey.VerifySignatureDigest(signID, sig) } diff --git a/privval/dash_core_mock_signer_server.go b/privval/dash_core_mock_signer_server.go index a096320dff..73faf3bdb9 100644 --- a/privval/dash_core_mock_signer_server.go +++ b/privval/dash_core_mock_signer_server.go @@ -17,7 +17,7 @@ type DashCoreMockSignerServer struct { } func NewDashCoreMockSignerServer( - endpoint *SignerDialerEndpoint, + _endpoint *SignerDialerEndpoint, chainID string, quorumHash crypto.QuorumHash, privVal types.PrivValidator, diff --git a/privval/dash_core_signer_client.go b/privval/dash_core_signer_client.go index 56185952b4..a2f183d169 100644 --- a/privval/dash_core_signer_client.go +++ b/privval/dash_core_signer_client.go @@ -171,11 +171,11 @@ func (sc *DashCoreSignerClient) GetPubKey(ctx context.Context, quorumHash crypto return bls12381.PubKey(decodedPublicKeyShare), nil } -func (sc *DashCoreSignerClient) GetFirstQuorumHash(ctx context.Context) (crypto.QuorumHash, error) { +func (sc *DashCoreSignerClient) GetFirstQuorumHash(_ctx context.Context) (crypto.QuorumHash, error) { return nil, errors.New("getFirstQuorumHash should not be called on a dash core signer client") } -func (sc *DashCoreSignerClient) GetThresholdPublicKey(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { +func (sc *DashCoreSignerClient) GetThresholdPublicKey(_ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { if len(quorumHash.Bytes()) != crypto.DefaultHashSize { return nil, fmt.Errorf("quorum hash must be 32 bytes long if requesting public key from dash core") } @@ -200,11 +200,11 @@ func (sc *DashCoreSignerClient) GetThresholdPublicKey(ctx context.Context, quoru return bls12381.PubKey(decodedThresholdPublicKey), nil } -func (sc *DashCoreSignerClient) GetHeight(ctx context.Context, quorumHash crypto.QuorumHash) (int64, error) { +func (sc *DashCoreSignerClient) GetHeight(_ctx context.Context, quorumHash crypto.QuorumHash) (int64, error) { return 0, fmt.Errorf("getHeight should not be called on a dash core signer client %s", quorumHash.String()) } -func (sc *DashCoreSignerClient) GetProTxHash(ctx context.Context) (crypto.ProTxHash, error) { +func (sc *DashCoreSignerClient) GetProTxHash(_ctx context.Context) (crypto.ProTxHash, error) { if sc.cachedProTxHash != nil { return sc.cachedProTxHash, nil } @@ -275,10 +275,10 @@ func (sc *DashCoreSignerClient) SignVote( "voteType", protoVote.Type, "quorumType", quorumType, "quorumHash", quorumHash, - "signature", qs.sign, + "signature", hex.EncodeToString(qs.sign), "proTxHash", proTxHash, "coreBlockRequestId", qs.ID, - "coreSignId", tmbytes.Reverse(qs.signHash), + "coreSignId", hex.EncodeToString(tmbytes.Reverse(qs.signHash)), "signItem", quorumSigns, "signResult", qs, ) @@ -314,11 +314,8 @@ func (sc *DashCoreSignerClient) QuorumSign( quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, ) ([]byte, []byte, error) { - signItem := types.SignItem{ - ReqID: requestIDHash, - ID: types.MakeSignID(msgHash, requestIDHash, quorumType, quorumHash), - Hash: msgHash, - } + signItem := types.NewSignItemFromHash(quorumType, quorumHash, requestIDHash, msgHash) + qs, err := sc.quorumSignAndVerify(ctx, quorumType, quorumHash, signItem) if err != nil { return nil, nil, err @@ -327,16 +324,16 @@ func (sc *DashCoreSignerClient) QuorumSign( } func (sc *DashCoreSignerClient) UpdatePrivateKey( - ctx context.Context, - privateKey crypto.PrivKey, - quorumHash crypto.QuorumHash, - thresholdPublicKey crypto.PubKey, - height int64, + _ctx context.Context, + _privateKey crypto.PrivKey, + _quorumHash crypto.QuorumHash, + _thresholdPublicKey crypto.PubKey, + _height int64, ) { } -func (sc *DashCoreSignerClient) GetPrivateKey(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { +func (sc *DashCoreSignerClient) GetPrivateKey(_ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { key := &dashConsensusPrivateKey{ quorumHash: quorumHash, quorumType: sc.defaultQuorumType, @@ -372,22 +369,27 @@ func (sc *DashCoreSignerClient) signVoteExtensions( protoVote *tmproto.Vote, quorumSignData types.QuorumSignData, ) error { + sc.logger.Trace("signing vote extensions", "vote", protoVote) + if protoVote.Type != tmproto.PrecommitType { if len(protoVote.VoteExtensions) > 0 { return errors.New("unexpected vote extension - extensions are only allowed in precommits") } return nil } - for et, extensions := range protoVote.VoteExtensionsToMap() { - for i, ext := range extensions { - signItem := quorumSignData.Extensions[et][i] - resp, err := sc.quorumSignAndVerify(ctx, quorumType, quorumHash, signItem) - if err != nil { - return err - } - ext.Signature = resp.sign + + for i, ext := range quorumSignData.VoteExtensionSignItems { + signItem := ext + resp, err := sc.quorumSignAndVerify(ctx, quorumType, quorumHash, signItem) + if err != nil { + return err } + + protoVote.VoteExtensions[i].Signature = resp.sign } + + sc.logger.Trace("vote extensions signed", "extensions", protoVote.VoteExtensions) + return nil } @@ -404,16 +406,16 @@ func (sc *DashCoreSignerClient) quorumSignAndVerify( sc.logger.Trace("quorum sign result", "sign", hex.EncodeToString(qs.sign), "sign_hash", hex.EncodeToString(qs.signHash), - "req_id", hex.EncodeToString(signItem.ReqID), - "id", hex.EncodeToString(signItem.ID), - "raw", hex.EncodeToString(signItem.Raw), - "hash", hex.EncodeToString(signItem.Hash), + "req_id", hex.EncodeToString(signItem.ID), + "id", hex.EncodeToString(signItem.SignHash), + "raw", hex.EncodeToString(signItem.Msg), + "hash", hex.EncodeToString(signItem.MsgHash), "quorum_sign_result", *qs.QuorumSignResult) pubKey, err := sc.GetPubKey(ctx, quorumHash) if err != nil { return nil, &RemoteSignerError{Code: 500, Description: err.Error()} } - verified := pubKey.VerifySignatureDigest(signItem.ID, qs.sign) + verified := pubKey.VerifySignatureDigest(signItem.SignHash, qs.sign) if !verified { return nil, fmt.Errorf("unable to verify signature with pubkey %s", pubKey.String()) } @@ -425,7 +427,7 @@ func (sc *DashCoreSignerClient) quorumSign( quorumHash crypto.QuorumHash, signItem types.SignItem, ) (*quorumSignResult, error) { - resp, err := sc.dashCoreRPCClient.QuorumSign(quorumType, signItem.ReqID, signItem.Hash, quorumHash) + resp, err := sc.dashCoreRPCClient.QuorumSign(quorumType, signItem.ID, signItem.MsgHash, quorumHash) if err != nil { return nil, &RemoteSignerError{Code: 500, Description: "cannot sign vote: " + err.Error()} } diff --git a/privval/file.go b/privval/file.go index 3f6492841f..29f15b5462 100644 --- a/privval/file.go +++ b/privval/file.go @@ -423,7 +423,7 @@ func LoadOrGenFilePV(keyFilePath, stateFilePath string) (*FilePV, error) { // GetPubKey returns the public key of the validator. // Implements PrivValidator. -func (pv *FilePV) GetPubKey(context context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { +func (pv *FilePV) GetPubKey(_ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() @@ -435,7 +435,7 @@ func (pv *FilePV) GetPubKey(context context.Context, quorumHash crypto.QuorumHas // GetFirstPubKey returns the first public key of the validator. // Implements PrivValidator. -func (pv *FilePV) GetFirstPubKey(context context.Context) (crypto.PubKey, error) { +func (pv *FilePV) GetFirstPubKey(_ctx context.Context) (crypto.PubKey, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() @@ -445,7 +445,7 @@ func (pv *FilePV) GetFirstPubKey(context context.Context) (crypto.PubKey, error) return nil, nil } -func (pv *FilePV) GetQuorumHashes(context context.Context) ([]crypto.QuorumHash, error) { +func (pv *FilePV) GetQuorumHashes(_ctx context.Context) ([]crypto.QuorumHash, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() @@ -462,7 +462,7 @@ func (pv *FilePV) GetQuorumHashes(context context.Context) ([]crypto.QuorumHash, return quorumHashes, nil } -func (pv *FilePV) GetFirstQuorumHash(context context.Context) (crypto.QuorumHash, error) { +func (pv *FilePV) GetFirstQuorumHash(_ctx context.Context) (crypto.QuorumHash, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() @@ -473,7 +473,7 @@ func (pv *FilePV) GetFirstQuorumHash(context context.Context) (crypto.QuorumHash } // GetThresholdPublicKey ... -func (pv *FilePV) GetThresholdPublicKey(context context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { +func (pv *FilePV) GetThresholdPublicKey(_ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() @@ -491,7 +491,7 @@ func (pv *FilePV) GetPrivateKey(context context.Context, quorumHash crypto.Quoru return pv.getPrivateKey(context, quorumHash) } -func (pv *FilePV) getPrivateKey(context context.Context, quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { +func (pv *FilePV) getPrivateKey(_ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { if keys, ok := pv.Key.PrivateKeys[quorumHash.String()]; ok { return keys.PrivKey, nil } @@ -515,7 +515,7 @@ func (pv *FilePV) GetPublicKey(context context.Context, quorumHash crypto.Quorum } // GetHeight ... -func (pv *FilePV) GetHeight(_ context.Context, quorumHash crypto.QuorumHash) (int64, error) { +func (pv *FilePV) GetHeight(_ctx context.Context, quorumHash crypto.QuorumHash) (int64, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() @@ -543,7 +543,7 @@ func (pv *FilePV) ExtractIntoValidator(ctx context.Context, quorumHash crypto.Qu // GetProTxHash returns the pro tx hash of the validator. // Implements PrivValidator. -func (pv *FilePV) GetProTxHash(context context.Context) (crypto.ProTxHash, error) { +func (pv *FilePV) GetProTxHash(_ctx context.Context) (crypto.ProTxHash, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() @@ -561,7 +561,7 @@ func (pv *FilePV) SignVote( quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, vote *tmproto.Vote, - logger log.Logger, + _logger log.Logger, ) error { pv.mtx.RLock() defer pv.mtx.RUnlock() @@ -626,7 +626,7 @@ func (pv *FilePV) String() string { } func (pv *FilePV) UpdatePrivateKey( - context context.Context, + _ctx context.Context, privateKey crypto.PrivKey, quorumHash crypto.QuorumHash, thresholdPublicKey crypto.PubKey, @@ -686,16 +686,14 @@ func (pv *FilePV) signVote( // application may have created a different extension. We therefore always // re-sign the vote extensions of precommits. For prevotes, the extension // signature will always be empty. - extSigns := make(map[tmproto.VoteExtensionType][]tmbytes.HexBytes) + extSigns := make([]tmbytes.HexBytes, 0, len(quorumSigns.VoteExtensionSignItems)) if vote.Type == tmproto.PrecommitType { - for et, signItems := range quorumSigns.Extensions { - for _, signItem := range signItems { - extSig, err := privKey.SignDigest(signItem.ID) - if err != nil { - return err - } - extSigns[et] = append(extSigns[et], extSig) + for _, signItem := range quorumSigns.VoteExtensionSignItems { + extSig, err := privKey.SignDigest(signItem.SignHash) + if err != nil { + return err } + extSigns = append(extSigns, extSig) } } else if len(vote.VoteExtensions) > 0 { return errors.New("unexpected vote extension - extensions are only allowed in precommits") @@ -707,16 +705,16 @@ func (pv *FilePV) signVote( // If they only differ by timestamp, use last timestamp and signature // Otherwise, return error if sameHRS { - if bytes.Equal(quorumSigns.Block.Raw, lss.BlockSignBytes) { + if bytes.Equal(quorumSigns.Block.Msg, lss.BlockSignBytes) { vote.BlockSignature = lss.BlockSignature } else { return errors.New("conflicting data") } - fillProtoVoteExtensionSigns(vote.VoteExtensionsToMap(), extSigns) + fillProtoVoteExtensionSigns(vote.VoteExtensions, extSigns) return nil } - sigBlock, err := privKey.SignDigest(quorumSigns.Block.ID) + sigBlock, err := privKey.SignDigest(quorumSigns.Block.SignHash) if err != nil { return err } @@ -729,13 +727,13 @@ func (pv *FilePV) signVote( // sigBlock, vote) // } - err = pv.saveSigned(height, round, step, quorumSigns.Block.Raw, sigBlock) + err = pv.saveSigned(height, round, step, quorumSigns.Block.Msg, sigBlock) if err != nil { return err } vote.BlockSignature = sigBlock - fillProtoVoteExtensionSigns(vote.VoteExtensionsToMap(), extSigns) + fillProtoVoteExtensionSigns(vote.VoteExtensions, extSigns) return nil } @@ -815,12 +813,12 @@ func (pv *FilePV) saveSigned( } func fillProtoVoteExtensionSigns( - voteExtensions map[tmproto.VoteExtensionType][]*tmproto.VoteExtension, - signs map[tmproto.VoteExtensionType][]tmbytes.HexBytes, + extensions []*tmproto.VoteExtension, + signs []tmbytes.HexBytes, ) { - for et, extensions := range voteExtensions { - for i, ext := range extensions { - ext.Signature = signs[et][i] - } + + for i, ext := range extensions { + ext.Signature = signs[i] } + } diff --git a/privval/file_test.go b/privval/file_test.go index 2a89fa1925..ef51f62482 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -238,7 +238,7 @@ func TestSignVote(t *testing.T) { } for _, c := range cases { - assert.Error(t, privVal.SignVote(ctx, "mychainid", 0, crypto.QuorumHash{}, c.ToProto(), nil), + assert.Error(t, privVal.SignVote(ctx, "mychainid", 0, quorumHash, c.ToProto(), nil), "expected error on signing conflicting vote") } @@ -289,7 +289,7 @@ func TestSignProposal(t *testing.T) { } for _, c := range cases { - _, err = privVal.SignProposal(ctx, "mychainid", 0, crypto.QuorumHash{}, c.ToProto()) + _, err = privVal.SignProposal(ctx, "mychainid", 0, quorumHash, c.ToProto()) assert.Error(t, err, "expected error on signing conflicting proposal") } } @@ -359,9 +359,11 @@ func TestVoteExtensionsAreAlwaysSigned(t *testing.T) { } voteType := tmproto.PrecommitType - exts := types.VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []types.VoteExtension{{Extension: []byte("extension")}}, - } + exts := types.VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("extension"), + }) + // We initially sign this vote without an extension vote1 := newVote(proTxHash, 0, height, round, voteType, blockID, exts) vpb1 := vote1.ToProto() @@ -370,16 +372,17 @@ func TestVoteExtensionsAreAlwaysSigned(t *testing.T) { assert.NoError(t, err, "expected no error signing vote") assert.NotNil(t, vpb1.VoteExtensions[0].Signature) - extSignItem1, err := types.MakeVoteExtensionSignItems(chainID, vpb1, quorumType, quorumHash) + extSignItem1, err := types.VoteExtensionsFromProto(vpb1.VoteExtensions...).SignItems(chainID, quorumType, quorumHash, vpb1.Height, vpb1.Round) require.NoError(t, err) - assert.True(t, pubKey.VerifySignatureDigest(extSignItem1[tmproto.VoteExtensionType_DEFAULT][0].ID, vpb1.VoteExtensions[0].Signature)) + assert.True(t, pubKey.VerifySignatureDigest(extSignItem1[0].SignHash, vpb1.VoteExtensions[0].Signature)) // We duplicate this vote precisely, including its timestamp, but change // its extension vote2 := vote1.Copy() - vote2.VoteExtensions = types.VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []types.VoteExtension{{Extension: []byte("new extension")}}, - } + vote2.VoteExtensions = types.VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("new extension")}) + vpb2 := vote2.ToProto() err = privVal.SignVote(ctx, chainID, quorumType, quorumHash, vpb2, logger) @@ -389,10 +392,10 @@ func TestVoteExtensionsAreAlwaysSigned(t *testing.T) { // that validates against the vote extension sign bytes with the new // extension, and does not validate against the vote extension sign bytes // with the old extension. - extSignItem2, err := types.MakeVoteExtensionSignItems(chainID, vpb2, quorumType, quorumHash) + extSignItem2, err := types.VoteExtensionsFromProto(vpb2.VoteExtensions...).SignItems(chainID, quorumType, quorumHash, vpb2.Height, vpb2.Round) require.NoError(t, err) - assert.True(t, pubKey.VerifySignatureDigest(extSignItem2[tmproto.VoteExtensionType_DEFAULT][0].ID, vpb2.VoteExtensions[0].Signature)) - assert.False(t, pubKey.VerifySignatureDigest(extSignItem1[tmproto.VoteExtensionType_DEFAULT][0].ID, vpb2.VoteExtensions[0].Signature)) + assert.True(t, pubKey.VerifySignatureDigest(extSignItem2[0].SignHash, vpb2.VoteExtensions[0].Signature)) + assert.False(t, pubKey.VerifySignatureDigest(extSignItem1[0].SignHash, vpb2.VoteExtensions[0].Signature)) vpb2.BlockSignature = nil vpb2.VoteExtensions[0].Signature = nil @@ -400,10 +403,10 @@ func TestVoteExtensionsAreAlwaysSigned(t *testing.T) { err = privVal.SignVote(ctx, chainID, quorumType, quorumHash, vpb2, logger) assert.NoError(t, err, "expected no error signing same vote with manipulated timestamp and vote extension") - extSignItem3, err := types.MakeVoteExtensionSignItems(chainID, vpb2, quorumType, quorumHash) + extSignItem3, err := types.VoteExtensionsFromProto(vpb2.VoteExtensions...).SignItems(chainID, quorumType, quorumHash, vpb2.Height, vpb2.Round) require.NoError(t, err) - assert.True(t, pubKey.VerifySignatureDigest(extSignItem3[tmproto.VoteExtensionType_DEFAULT][0].ID, vpb2.VoteExtensions[0].Signature)) - assert.False(t, pubKey.VerifySignatureDigest(extSignItem1[tmproto.VoteExtensionType_DEFAULT][0].ID, vpb2.VoteExtensions[0].Signature)) + assert.True(t, pubKey.VerifySignatureDigest(extSignItem3[0].SignHash, vpb2.VoteExtensions[0].Signature)) + assert.False(t, pubKey.VerifySignatureDigest(extSignItem1[0].SignHash, vpb2.VoteExtensions[0].Signature)) } func newVote(proTxHash types.ProTxHash, idx int32, height int64, round int32, diff --git a/privval/grpc/client.go b/privval/grpc/client.go index 7a031f3680..99ba6506d7 100644 --- a/privval/grpc/client.go +++ b/privval/grpc/client.go @@ -104,7 +104,7 @@ func (sc *SignerClient) GetProTxHash(ctx context.Context) (crypto.ProTxHash, err return resp.ProTxHash, nil } -func (sc *SignerClient) GetFirstQuorumHash(ctx context.Context) (crypto.QuorumHash, error) { +func (sc *SignerClient) GetFirstQuorumHash(_ctx context.Context) (crypto.QuorumHash, error) { return nil, errors.New("getFirstQuorumHash should not be called on a signer client") } @@ -126,14 +126,14 @@ func (sc *SignerClient) GetThresholdPublicKey(ctx context.Context, quorumHash cr return pk, nil } -func (sc *SignerClient) GetHeight(ctx context.Context, quorumHash crypto.QuorumHash) (int64, error) { +func (sc *SignerClient) GetHeight(_ctx context.Context, quorumHash crypto.QuorumHash) (int64, error) { return 0, fmt.Errorf("getHeight should not be called on asigner client %s", quorumHash.String()) } // SignVote requests a remote signer to sign a vote func (sc *SignerClient) SignVote( - ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, - vote *tmproto.Vote, logger log.Logger) error { + ctx context.Context, _chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, + vote *tmproto.Vote, _logger log.Logger) error { if len(quorumHash.Bytes()) != crypto.DefaultHashSize { return fmt.Errorf("quorum hash must be 32 bytes long when signing vote") } @@ -173,11 +173,11 @@ func (sc *SignerClient) SignProposal( } func (sc *SignerClient) UpdatePrivateKey( - ctx context.Context, privateKey crypto.PrivKey, quorumHash crypto.QuorumHash, thresholdPublicKey crypto.PubKey, height int64, + _ctx context.Context, _privateKey crypto.PrivKey, _quorumHash crypto.QuorumHash, _thresholdPublicKey crypto.PubKey, _height int64, ) { } -func (sc *SignerClient) GetPrivateKey(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { +func (sc *SignerClient) GetPrivateKey(_ctx context.Context, _quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { return nil, nil } diff --git a/privval/grpc/client_test.go b/privval/grpc/client_test.go index 9b3fca568d..18df566cd8 100644 --- a/privval/grpc/client_test.go +++ b/privval/grpc/client_test.go @@ -50,7 +50,7 @@ func TestSignerClient_GetPubKey(t *testing.T) { srv, dialer := dialer(t, mockPV, logger) defer srv.Stop() - conn, err := grpc.DialContext(ctx, "", + conn, err := grpc.NewClient("localhost", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialer), ) @@ -78,7 +78,7 @@ func TestSignerClient_SignVote(t *testing.T) { srv, dialer := dialer(t, mockPV, logger) defer srv.Stop() - conn, err := grpc.DialContext(ctx, "", + conn, err := grpc.NewClient("localhost", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialer), ) @@ -139,11 +139,11 @@ func TestSignerClient_SignProposal(t *testing.T) { logger := log.NewTestingLogger(t) srv, dialer := dialer(t, mockPV, logger) defer srv.Stop() - - conn, err := grpc.DialContext(ctx, "", + conn, err := grpc.NewClient("localhost", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(dialer), ) + require.NoError(t, err) defer conn.Close() diff --git a/privval/grpc/server.go b/privval/grpc/server.go index 928f031691..00a4552224 100644 --- a/privval/grpc/server.go +++ b/privval/grpc/server.go @@ -76,7 +76,7 @@ func (ss *SignerServer) GetThresholdPubKey(ctx context.Context, req *privvalprot // GetProTxHash receives a request for the proTxHash // returns the proTxHash on success and error on failure -func (ss *SignerServer) GetProTxHash(ctx context.Context, req *privvalproto.ProTxHashRequest) ( +func (ss *SignerServer) GetProTxHash(ctx context.Context, _req *privvalproto.ProTxHashRequest) ( *privvalproto.ProTxHashResponse, error) { var proTxHash crypto.ProTxHash diff --git a/privval/grpc/util.go b/privval/grpc/util.go index 46b0ba12de..9ab209510e 100644 --- a/privval/grpc/util.go +++ b/privval/grpc/util.go @@ -89,7 +89,7 @@ func GenerateTLS(certPath, keyPath, ca string, log log.Logger) grpc.DialOption { // DialRemoteSigner is a generalized function to dial the gRPC server. func DialRemoteSigner( - ctx context.Context, + _ctx context.Context, cfg *config.PrivValidatorConfig, chainID string, logger log.Logger, @@ -113,9 +113,10 @@ func DialRemoteSigner( dialOptions = append(dialOptions, transportSecurity) _, address := tmnet.ProtocolAndAddress(cfg.ListenAddr) - conn, err := grpc.DialContext(ctx, address, dialOptions...) + conn, err := grpc.NewClient(address, dialOptions...) if err != nil { logger.Error("unable to connect to server", "target", address, "err", err) + return nil, err } return NewSignerClient(conn, chainID, logger) diff --git a/privval/msgs_test.go b/privval/msgs_test.go index bbb4d5f339..32ba1354e3 100644 --- a/privval/msgs_test.go +++ b/privval/msgs_test.go @@ -44,9 +44,10 @@ func exampleVote() *types.Vote { }, ValidatorProTxHash: crypto.ProTxHashFromSeedBytes([]byte("validator_pro_tx_hash")), ValidatorIndex: 56789, - VoteExtensions: types.VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []types.VoteExtension{{Extension: []byte("extension")}}, - }, + VoteExtensions: types.VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("extension"), + }), } } @@ -95,8 +96,8 @@ func TestPrivvalVectors(t *testing.T) { {"pubKey request", &privproto.PubKeyRequest{}, "0a00"}, {"pubKey response", &privproto.PubKeyResponse{PubKey: ppk, Error: nil}, "12340a321a30991a1c4f159f8e4730bf897e97e27c11f27ba0c1337111a3c102e1081a19372832b596623b1a248a0e00b156d80690cf"}, {"pubKey response with error", &privproto.PubKeyResponse{PubKey: cryptoproto.PublicKey{}, Error: remoteError}, "12140a0012100801120c697427732061206572726f72"}, - {"Vote Request", &privproto.SignVoteRequest{Vote: votepb}, "1aaa010aa701080210031802226c0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a1a20b583d49b95a0a5526966b519d8b7bba2aefc800b370a7438c7063728904e58ee2a20959a8f5ef2be68d0ed3a07ed8cff85991ee7995c2ac17030f742c135f9729fbe30d5bb03420b1209657874656e73696f6e"}, - {"Vote Response", &privproto.SignedVoteResponse{Vote: *votepb, Error: nil}, "22aa010aa701080210031802226c0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a1a20b583d49b95a0a5526966b519d8b7bba2aefc800b370a7438c7063728904e58ee2a20959a8f5ef2be68d0ed3a07ed8cff85991ee7995c2ac17030f742c135f9729fbe30d5bb03420b1209657874656e73696f6e"}, + {"Vote Request", &privproto.SignVoteRequest{Vote: votepb}, "1aac010aa901080210031802226c0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a1a20b583d49b95a0a5526966b519d8b7bba2aefc800b370a7438c7063728904e58ee2a20959a8f5ef2be68d0ed3a07ed8cff85991ee7995c2ac17030f742c135f9729fbe30d5bb03420d08011209657874656e73696f6e"}, + {"Vote Response", &privproto.SignedVoteResponse{Vote: *votepb, Error: nil}, "22ac010aa901080210031802226c0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a1a20b583d49b95a0a5526966b519d8b7bba2aefc800b370a7438c7063728904e58ee2a20959a8f5ef2be68d0ed3a07ed8cff85991ee7995c2ac17030f742c135f9729fbe30d5bb03420d08011209657874656e73696f6e"}, {"Vote Response with error", &privproto.SignedVoteResponse{Vote: tmproto.Vote{}, Error: remoteError}, "22180a042202120012100801120c697427732061206572726f72"}, {"Proposal Request", &privproto.SignProposalRequest{Proposal: proposalpb}, "2a700a6e08011003180220022a4a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a320608f49a8ded053a10697427732061207369676e6174757265"}, {"Proposal Response", &privproto.SignedProposalResponse{Proposal: *proposalpb, Error: nil}, "32700a6e08011003180220022a4a0a208b01023386c371778ecb6368573e539afc3cc860ec3a2f614e54fe5652f4fc80122608c0843d122072db3d959635dff1bb567bedaa70573392c5159666a3f8caf11e413aac52207a320608f49a8ded053a10697427732061207369676e6174757265"}, diff --git a/privval/retry_signer_client.go b/privval/retry_signer_client.go index c3590f85fd..35427c74a8 100644 --- a/privval/retry_signer_client.go +++ b/privval/retry_signer_client.go @@ -104,7 +104,7 @@ func (sc *RetrySignerClient) GetProTxHash(ctx context.Context) (crypto.ProTxHash return nil, fmt.Errorf("exhausted all attempts to get protxhash: %w", err) } -func (sc *RetrySignerClient) GetFirstQuorumHash(context context.Context) (crypto.QuorumHash, error) { +func (sc *RetrySignerClient) GetFirstQuorumHash(_ctx context.Context) (crypto.QuorumHash, error) { return nil, errors.New("getFirstQuorumHash should not be called on a signer client") } @@ -133,13 +133,13 @@ func (sc *RetrySignerClient) GetThresholdPublicKey(ctx context.Context, quorumHa } return nil, fmt.Errorf("exhausted all attempts to get pubkey: %w", err) } -func (sc *RetrySignerClient) GetHeight(ctx context.Context, quorumHash crypto.QuorumHash) (int64, error) { +func (sc *RetrySignerClient) GetHeight(_ctx context.Context, quorumHash crypto.QuorumHash) (int64, error) { return 0, fmt.Errorf("getHeight should not be called on asigner client %s", quorumHash.String()) } func (sc *RetrySignerClient) SignVote( ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, - vote *tmproto.Vote, logger log.Logger) error { + vote *tmproto.Vote, _logger log.Logger) error { var err error for i := 0; i < sc.retries || sc.retries == 0; i++ { err = sc.next.SignVote(ctx, chainID, quorumType, quorumHash, vote, nil) @@ -175,11 +175,11 @@ func (sc *RetrySignerClient) SignProposal( } func (sc *RetrySignerClient) UpdatePrivateKey( - ctx context.Context, privateKey crypto.PrivKey, quorumHash crypto.QuorumHash, thresholdPublicKey crypto.PubKey, height int64, + _ctx context.Context, _privateKey crypto.PrivKey, _quorumHash crypto.QuorumHash, _thresholdPublicKey crypto.PubKey, _height int64, ) { } -func (sc *RetrySignerClient) GetPrivateKey(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { +func (sc *RetrySignerClient) GetPrivateKey(_ctx context.Context, _quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { return nil, nil } diff --git a/privval/secret_connection.go b/privval/secret_connection.go index 2c69517a39..3095125804 100644 --- a/privval/secret_connection.go +++ b/privval/secret_connection.go @@ -249,7 +249,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { if 0 < len(sc.recvBuffer) { n = copy(data, sc.recvBuffer) sc.recvBuffer = sc.recvBuffer[n:] - return + return n, err } // read off the conn @@ -257,7 +257,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { defer pool.Put(sealedFrame) _, err = io.ReadFull(sc.conn, sealedFrame) if err != nil { - return + return n, err } // decrypt the frame. @@ -269,7 +269,7 @@ func (sc *SecretConnection) Read(data []byte) (n int, err error) { return n, fmt.Errorf("failed to decrypt SecretConnection: %w", err) } if err = incrNonce(sc.recvNonce); err != nil { - return + return n, err } // end decryption @@ -339,7 +339,7 @@ func shareEphPubKey(conn io.ReadWriter, locEphPub *[32]byte) (remEphPub *[32]byt // If error: if trs.FirstError() != nil { err = trs.FirstError() - return + return nil, err } // Otherwise: @@ -452,11 +452,10 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte // If error: if trs.FirstError() != nil { err = trs.FirstError() - return + return recvMsg, err } - var _recvMsg = trs.FirstValue().(authSigMessage) - return _recvMsg, nil + return trs.FirstValue().(authSigMessage), nil } //-------------------------------------------------------------------------------- diff --git a/privval/signer_client.go b/privval/signer_client.go index 8b5a74c80f..dbc00b6596 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -142,7 +142,7 @@ func (sc *SignerClient) GetProTxHash(ctx context.Context) (crypto.ProTxHash, err return resp.ProTxHash, nil } -func (sc *SignerClient) GetFirstQuorumHash(ctx context.Context) (crypto.QuorumHash, error) { +func (sc *SignerClient) GetFirstQuorumHash(_ctx context.Context) (crypto.QuorumHash, error) { return nil, errors.New("getFirstQuorumHash should not be called on a signer client") } @@ -173,7 +173,7 @@ func (sc *SignerClient) GetThresholdPublicKey(ctx context.Context, quorumHash cr return pk, nil } -func (sc *SignerClient) GetHeight(ctx context.Context, quorumHash crypto.QuorumHash) (int64, error) { +func (sc *SignerClient) GetHeight(_ctx context.Context, quorumHash crypto.QuorumHash) (int64, error) { return 0, fmt.Errorf("getHeight should not be called on asigner client %s", quorumHash.String()) } @@ -184,7 +184,7 @@ func (sc *SignerClient) SignVote( quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, vote *tmproto.Vote, - logger log.Logger, + _logger log.Logger, ) error { // fmt.Printf("--> sending request to sign vote (%d/%d) %v - %v", vote.Height, vote.Round, vote.BlockID, vote) voteRequest := privvalproto.SignVoteRequest{ @@ -245,11 +245,11 @@ func (sc *SignerClient) SignProposal( } func (sc *SignerClient) UpdatePrivateKey( - ctx context.Context, privateKey crypto.PrivKey, quorumHash crypto.QuorumHash, thresholdPublicKey crypto.PubKey, height int64, + _ctx context.Context, _privateKey crypto.PrivKey, _quorumHash crypto.QuorumHash, _thresholdPublicKey crypto.PubKey, _height int64, ) { } -func (sc *SignerClient) GetPrivateKey(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { +func (sc *SignerClient) GetPrivateKey(_ctx context.Context, _quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { return nil, nil } diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index 73496b82fd..dff5fc1fb7 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -408,7 +408,7 @@ func TestSignerSignVoteErrors(t *testing.T) { } } -func brokenHandler(ctx context.Context, privVal types.PrivValidator, request privvalproto.Message, chainID string) (privvalproto.Message, error) { +func brokenHandler(_ctx context.Context, _privVal types.PrivValidator, request privvalproto.Message, _chainID string) (privvalproto.Message, error) { var res privvalproto.Message var err error diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go index 5431e17c7c..a74e1b3fd0 100644 --- a/privval/signer_requestHandler.go +++ b/privval/signer_requestHandler.go @@ -155,7 +155,7 @@ func handleKeyRequest( Description: err.Error(), }, )) - return + return res, err } var pk cryptoproto.PublicKey diff --git a/privval/utils.go b/privval/utils.go index 5b51da4f19..f37ab797a5 100644 --- a/privval/utils.go +++ b/privval/utils.go @@ -28,7 +28,7 @@ func IsConnTimeout(err error) bool { // NewSignerListener creates a new SignerListenerEndpoint using the corresponding listen address func NewSignerListener(listenAddr string, logger log.Logger) (*SignerListenerEndpoint, error) { protocol, address := tmnet.ProtocolAndAddress(listenAddr) - if protocol != "unix" && protocol != "tcp" { //nolint:goconst + if protocol != "unix" && protocol != "tcp" { return nil, fmt.Errorf("unsupported address family %q, want unix or tcp", protocol) } diff --git a/proto/README.md b/proto/README.md index a0701d3bca..c2df2ef261 100644 --- a/proto/README.md +++ b/proto/README.md @@ -19,3 +19,4 @@ The `.proto` files within this section are core to the protocol and updates must 1b. Merge the RFC. 2. Make the necessary changes to the `.proto` file(s), [core data structures](../spec/core/data_structures.md) and/or [ABCI protocol](../spec/abci/apps.md). 3. Rebuild the Go protocol buffers by running `make proto-gen`. Ensure that the project builds correctly by running `make build`. + diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index e5d66e39f6..24bb19df35 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -54,7 +54,7 @@ message RequestFlush {} // // Used to sync Tenderdash with the application during a handshake that happens on startup. // The returned app_version will be included in the Header of every block. -// Tenderdsah expects last_block_app_hash and last_block_height to be updated during Commit, +// Tenderdash expects last_block_app_hash and last_block_height to be updated during Commit, // ensuring that Commit is never called twice for the same block height. message RequestInfo { string version = 1; // The Tenderdash software semantic version. @@ -219,7 +219,7 @@ message RequestApplySnapshotChunk { // their propose timeout goes off. // - As a result of executing the prepared proposal, the Application may produce header events or transaction events. // The Application must keep those events until a block is decided and then pass them on to Tenderdash via -// `ResponseFinalizeBlock`. +// `ResponsePrepareProposal`. // - As a sanity check, Tenderdash will check the returned parameters for validity if the Application modified them. // In particular, `ResponsePrepareProposal.tx_records` will be deemed invalid if // - There is a duplicate transaction in the list. @@ -290,6 +290,7 @@ message RequestPrepareProposal { // Proposer's latest available app protocol version. uint64 proposed_app_version = 11; // App and block version used to generate the block. + // App version included in the block can be modified by setting ResponsePrepareProposal.app_version. tendermint.version.Consensus version = 12; // quorum_hash contains hash of validator quorum that will sign the block bytes quorum_hash = 13; @@ -365,6 +366,7 @@ message RequestProcessProposal { // Proposer's latest available app protocol version. uint64 proposed_app_version = 12; // App and block version used to generate the block. + // App version MUST be verified by the app. tendermint.version.Consensus version = 13; // quorum_hash contains hash of validator quorum that will sign the block bytes quorum_hash = 14; @@ -463,8 +465,8 @@ message RequestVerifyVoteExtension { // - The application must execute the transactions in full, in the order they appear in `RequestFinalizeBlock.txs`, // before returning control to Tenderdash. Alternatively, it can commit the candidate state corresponding to the same block // previously executed via `PrepareProposal` or `ProcessProposal`. -// - `ResponseFinalizeBlock.tx_results[i].Code == 0` only if the _i_-th transaction is fully valid. -// - Application is expected to persist its state at the end of this call, before calling `ResponseFinalizeBlock`. +// - If ProcessProposal for the same arguments have succeeded, FinalizeBlock MUST always succeed. +// - Application is expected to persist its state at the end of this call, before returning `ResponseFinalizeBlock`. // - Later calls to `Query` can return proofs about the application state anchored // in this Merkle root hash. // - Use `ResponseFinalizeBlock.retain_height` with caution! If all nodes in the network remove historical @@ -637,6 +639,8 @@ message ResponsePrepareProposal { tendermint.types.CoreChainLock core_chain_lock_update = 5; // Changes to validator set that will be applied at next height. ValidatorSetUpdate validator_set_update = 6 [(gogoproto.nullable) = true]; + // Application version that was used to create the current proposal. + uint64 app_version = 7; } message ResponseProcessProposal { @@ -658,15 +662,32 @@ message ResponseProcessProposal { // Changes to validator set (set voting power to 0 to remove). ValidatorSetUpdate validator_set_update = 5 [(gogoproto.nullable) = true]; + + // Type & Key-Value events for indexing + repeated Event events = 6 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; } -// Provides a vote extension for signing. Each field is mandatory for filling +// Provides a vote extension for signing. `type` and `extension` fields are mandatory for filling message ExtendVoteExtension { - // Vote extension type can be either DEFAULT or THRESHOLD_RECOVER. - // The Tenderdash supports only THRESHOLD_RECOVER at this moment. + // Vote extension type can be either DEFAULT, THRESHOLD_RECOVER or THRESHOLD_RECOVER_RAW. + // The Tenderdash supports only THRESHOLD_RECOVER and THRESHOLD_RECOVER_RAW at this moment. tendermint.types.VoteExtensionType type = 1; // Deterministic or (Non-Deterministic) extension provided by the sending validator's Application. + // + // For THRESHOLD_RECOVER_RAW, it MUST be 32 bytes. bytes extension = 2; + // Sign request ID that will be used to sign the vote extensions. + // Only applicable for THRESHOLD_RECOVER_RAW vote extension type. + // + // Tenderdash will use SHA256 checksum of `sign_request_id` when generating quorum signatures of + // THRESHOLD_RECOVER_RAW vote extensions. It MUST NOT be set for any other vote extension types. + + // If not set, Tenderdash will generate it based on height and round. + // + // If set, it SHOULD be unique per voting round, and it MUST start with `dpevote` or `\x06plwdtx` prefix. + // + // Use with caution - it can have severe security consequences. + optional bytes sign_request_id = 3; } message ResponseExtendVote { @@ -684,8 +705,10 @@ message ResponseVerifyVoteExtension { } message ResponseFinalizeBlock { - // Type & Key-Value events for indexing - repeated Event events = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; + // Events are moved to ProcessProposal + reserved "events"; + reserved 1; + // Blocks below this height may be removed. Defaults to `0` (retain all). int64 retain_height = 2; } @@ -719,12 +742,13 @@ message EventAttribute { // ExecTxResult contains results of executing one individual transaction. message ExecTxResult { - uint32 code = 1; // Response code within codespace; by convention, 0 means success. - bytes data = 2; // Result bytes, if any (arbitrary data, not interpreted by Tenderdash). - string log = 3; // The output of the application's logger. May be non-deterministic. - string info = 4; // Additional information. May be non-deterministic. - int64 gas_wanted = 5; // Amount of gas requested for transaction. - int64 gas_used = 6; // Amount of gas consumed by transaction. + uint32 code = 1; // Response code within codespace; by convention, 0 means success. + bytes data = 2; // Result bytes, if any (arbitrary data, not interpreted by Tenderdash). + string log = 3; // The output of the application's logger. May be non-deterministic. + string info = 4; // Additional information. May be non-deterministic. + reserved "gas_wanted"; + reserved 5; + int64 gas_used = 6; // Amount of gas consumed by transaction. // Type & Key-Value events for indexing transactions (e.g. by account). repeated Event events = 7 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"]; @@ -751,6 +775,7 @@ message TxRecord { UNMODIFIED = 1; // The Application did not modify this transaction. ADDED = 2; // The Application added this transaction. REMOVED = 3; // The Application wants this transaction removed from the proposal and the mempool. + DELAYED = 4; // The Application wants this transaction removed from the proposal but not the mempool. } } @@ -773,6 +798,21 @@ message ValidatorUpdate { string node_address = 4 [(gogoproto.nullable) = true]; } +// ValidatorSetUpdate represents a change in the validator set. +// It can be used to add, remove, or update a validator. +// +// Validator set update consists of multiple ValidatorUpdate records, +// each of them can be used to add, remove, or update a validator, according to the +// following rules: +// +// 1. If a validator with the same public key already exists in the validator set +// and power is greater than 0, the existing validator will be updated with the new power. +// 2. If a validator with the same public key already exists in the validator set +// and power is 0, the existing validator will be removed from the validator set. +// 3. If a validator with the same public key does not exist in the validator set and the power is greater than 0, +// a new validator will be added to the validator set. +// 4. As a special case, if quorum hash has changed, all existing validators will be removed before applying +// the new validator set update. message ValidatorSetUpdate { repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false]; tendermint.crypto.PublicKey threshold_public_key = 2 [(gogoproto.nullable) = false]; diff --git a/proto/tendermint/p2p/types.pb.go b/proto/tendermint/p2p/types.pb.go index 14aeefe3cb..0f1e210ac6 100644 --- a/proto/tendermint/p2p/types.pb.go +++ b/proto/tendermint/p2p/types.pb.go @@ -98,7 +98,7 @@ type NodeInfo struct { ListenAddr string `protobuf:"bytes,3,opt,name=listen_addr,json=listenAddr,proto3" json:"listen_addr,omitempty"` Network string `protobuf:"bytes,4,opt,name=network,proto3" json:"network,omitempty"` Version string `protobuf:"bytes,5,opt,name=version,proto3" json:"version,omitempty"` - Channels []byte `protobuf:"bytes,6,opt,name=channels,proto3" json:"channels,omitempty"` + Channels []uint32 `protobuf:"varint,6,rep,packed,name=channels,proto3" json:"channels,omitempty"` Moniker string `protobuf:"bytes,7,opt,name=moniker,proto3" json:"moniker,omitempty"` Other NodeInfoOther `protobuf:"bytes,8,opt,name=other,proto3" json:"other"` ProTxHash []byte `protobuf:"bytes,9,opt,name=pro_tx_hash,json=proTxHash,proto3" json:"pro_tx_hash,omitempty"` @@ -172,7 +172,7 @@ func (m *NodeInfo) GetVersion() string { return "" } -func (m *NodeInfo) GetChannels() []byte { +func (m *NodeInfo) GetChannels() []uint32 { if m != nil { return m.Channels } @@ -899,7 +899,7 @@ func init() { func init() { proto.RegisterFile("tendermint/p2p/types.proto", fileDescriptor_c8a29e659aeca578) } var fileDescriptor_c8a29e659aeca578 = []byte{ - // 1436 bytes of a gzipped FileDescriptorProto + // 1438 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x57, 0x4b, 0x6f, 0xdb, 0x46, 0x17, 0x95, 0xe4, 0x97, 0x74, 0x65, 0x59, 0xf6, 0xc4, 0x5f, 0xc2, 0x38, 0x89, 0xe4, 0xcf, 0xe9, 0xc3, 0xe8, 0x42, 0x2a, 0x14, 0xa0, 0x48, 0x83, 0x06, 0x48, 0xe4, 0x38, 0x91, 0x0b, 0x27, 0x11, @@ -923,73 +923,73 @@ var fileDescriptor_c8a29e659aeca578 = []byte{ 0x3d, 0xf0, 0x89, 0xb9, 0x2c, 0xb7, 0x0e, 0x5c, 0xd4, 0x84, 0x6a, 0xe0, 0x73, 0x81, 0x89, 0x65, 0xbb, 0x6e, 0xa8, 0xaa, 0xab, 0x98, 0x10, 0xa5, 0x1e, 0xbb, 0x6e, 0x88, 0x0c, 0x58, 0x21, 0x58, 0xbc, 0xa6, 0xe1, 0x99, 0xb1, 0xa8, 0x36, 0xe3, 0x50, 0xee, 0xc4, 0x85, 0x2e, 0x45, 0x3b, 0x3a, - 0x44, 0x5b, 0x50, 0x76, 0x3c, 0x9b, 0x10, 0x1c, 0x70, 0x63, 0x79, 0xbb, 0xb8, 0xbb, 0x6a, 0x26, + 0x44, 0x5b, 0x50, 0x76, 0x3c, 0x9b, 0x10, 0x1c, 0x70, 0x63, 0x79, 0x7b, 0x61, 0xb7, 0x66, 0x26, 0xb1, 0x64, 0x0d, 0x29, 0xf1, 0xcf, 0x70, 0x68, 0xac, 0x44, 0x2c, 0x1d, 0xa2, 0xaf, 0x61, 0x89, 0x0a, 0x0f, 0x87, 0x46, 0x59, 0x1d, 0xfb, 0x4e, 0xfe, 0xd8, 0xb1, 0x55, 0x2f, 0x25, 0x48, 0x1f, - 0x3a, 0x62, 0xa0, 0x06, 0x54, 0x59, 0x48, 0x2d, 0x31, 0xb1, 0x3c, 0x9b, 0x7b, 0x46, 0x45, 0x3d, - 0xb3, 0xc2, 0x42, 0x7a, 0x3c, 0xe9, 0xd9, 0xdc, 0xdb, 0xf9, 0x09, 0x6a, 0x53, 0x6c, 0x74, 0x13, - 0xca, 0x62, 0x62, 0xf9, 0xc4, 0xc5, 0x13, 0xe5, 0x72, 0xc5, 0x5c, 0x11, 0x93, 0x03, 0x19, 0xa2, - 0x36, 0x54, 0x43, 0xe6, 0x28, 0x3b, 0x30, 0xe7, 0xda, 0xba, 0xb5, 0xcb, 0x8b, 0x26, 0x98, 0xfd, - 0xbd, 0xc7, 0x51, 0xd6, 0x84, 0x90, 0x39, 0x7a, 0xbd, 0xf3, 0x77, 0x11, 0xca, 0x7d, 0x8c, 0x43, - 0xd5, 0xc6, 0xeb, 0x50, 0xf2, 0xdd, 0x48, 0xb2, 0xbb, 0x7c, 0x79, 0xd1, 0x2c, 0x1d, 0x3c, 0x31, - 0x4b, 0xbe, 0x8b, 0xba, 0xb0, 0xaa, 0x15, 0x2d, 0x9f, 0x9c, 0x50, 0xa3, 0xb4, 0xbd, 0x70, 0x65, - 0x6b, 0x31, 0x0e, 0xb5, 0xae, 0x94, 0x33, 0xab, 0x76, 0x1a, 0xa0, 0x67, 0xb0, 0x16, 0xd8, 0x5c, - 0x58, 0x0e, 0x25, 0x04, 0x3b, 0x02, 0xbb, 0xaa, 0x5d, 0xd5, 0xce, 0x56, 0x2b, 0x9a, 0xdf, 0x56, - 0x3c, 0xbf, 0xad, 0xe3, 0x78, 0x7e, 0xbb, 0x8b, 0x6f, 0xfe, 0x68, 0x16, 0xcd, 0x9a, 0xe4, 0xed, - 0xc5, 0x34, 0xd9, 0x1f, 0x9f, 0xd8, 0x8e, 0xf0, 0xc7, 0x58, 0x35, 0xb5, 0x6c, 0x26, 0x71, 0xde, - 0xca, 0xa5, 0xbc, 0x95, 0xff, 0x14, 0xa1, 0x9e, 0xab, 0x52, 0xf6, 0x34, 0xb6, 0x4b, 0x9b, 0xa9, - 0x43, 0x74, 0x08, 0x1b, 0xaa, 0x64, 0xd7, 0xb7, 0x03, 0x8b, 0x8f, 0x1c, 0x27, 0xb6, 0xf4, 0x63, - 0xaa, 0xae, 0x4b, 0xea, 0x13, 0xdf, 0x0e, 0x8e, 0x22, 0xe2, 0xb4, 0xda, 0x89, 0xed, 0x07, 0xa3, - 0x10, 0x7f, 0xb4, 0x07, 0x89, 0xda, 0xd3, 0x88, 0x88, 0xee, 0x42, 0x2d, 0x2b, 0xc4, 0x95, 0x15, - 0x35, 0x73, 0xd5, 0x4d, 0x31, 0x7c, 0xe7, 0x36, 0x2c, 0xee, 0x3b, 0x1e, 0x95, 0xbf, 0xe9, 0xb1, - 0x1d, 0x8c, 0xb0, 0x3e, 0x60, 0x14, 0xec, 0xfc, 0xb6, 0x01, 0xe5, 0x7d, 0x32, 0xc6, 0x01, 0x65, - 0x18, 0xf5, 0x00, 0x6c, 0x21, 0x42, 0x7f, 0x30, 0x12, 0x58, 0x1a, 0x21, 0x1b, 0xbc, 0x9b, 0x6f, - 0x70, 0x8c, 0x6e, 0x3d, 0x4e, 0xa0, 0xfb, 0x44, 0x84, 0xe7, 0x66, 0x86, 0x8b, 0xbe, 0x80, 0x45, - 0xec, 0x78, 0x54, 0x1b, 0xb5, 0x39, 0xa3, 0xe1, 0x78, 0xb4, 0x57, 0x30, 0x15, 0x06, 0x3d, 0x84, - 0x2a, 0xc3, 0x13, 0x2b, 0xc4, 0xbf, 0x8e, 0x30, 0x17, 0x89, 0x1b, 0x33, 0x73, 0x35, 0x31, 0x23, - 0x44, 0xaf, 0x60, 0x02, 0x4b, 0x22, 0xf4, 0x08, 0x56, 0x23, 0x3a, 0x67, 0xf2, 0x5e, 0x56, 0x1e, - 0x54, 0x3b, 0xb7, 0xae, 0xe4, 0x47, 0x90, 0x5e, 0xc1, 0xac, 0xb2, 0x34, 0x44, 0xf7, 0xa1, 0x1c, - 0x5f, 0xc4, 0x6a, 0x5a, 0x72, 0x4f, 0x8f, 0x6e, 0xe0, 0x7d, 0x8d, 0xe8, 0x15, 0xcc, 0x04, 0x8d, - 0x1e, 0x40, 0x55, 0xdf, 0xd3, 0x96, 0x98, 0x44, 0x37, 0x45, 0xb5, 0x73, 0x23, 0x4b, 0xd6, 0xdb, - 0xad, 0xe3, 0x09, 0x97, 0x75, 0xeb, 0xf0, 0x78, 0xc2, 0xd1, 0x01, 0xd4, 0xd4, 0xb5, 0x9a, 0x1c, - 0x7c, 0x45, 0xb1, 0x77, 0xb2, 0xec, 0xe4, 0x5d, 0xd4, 0xea, 0xca, 0x55, 0x6a, 0xc0, 0xea, 0x20, - 0x13, 0xa3, 0x23, 0xd8, 0x20, 0xd4, 0x8a, 0xd5, 0xb4, 0x0f, 0xd1, 0x1d, 0xf4, 0xe9, 0xd5, 0x72, - 0x2f, 0xa8, 0x16, 0x4c, 0x1c, 0xa9, 0x93, 0xe9, 0x14, 0x3a, 0x84, 0xb5, 0x9c, 0x62, 0x45, 0x29, - 0xde, 0x9d, 0x5b, 0x60, 0xa2, 0x57, 0x1b, 0xe4, 0xd5, 0xe4, 0x4b, 0x71, 0xc4, 0x93, 0xe3, 0xc2, - 0x3c, 0xb5, 0x23, 0x85, 0x4d, 0xcf, 0x5b, 0xe3, 0xd9, 0x04, 0x7a, 0x09, 0xf5, 0x44, 0x4d, 0x17, - 0x57, 0x55, 0x72, 0x9f, 0xcc, 0x97, 0x4b, 0xaa, 0x5b, 0xe3, 0x53, 0x19, 0xf4, 0x1d, 0x6c, 0x70, - 0x62, 0x33, 0xee, 0x51, 0x91, 0x56, 0xb8, 0xaa, 0x24, 0x3f, 0xcb, 0x4a, 0x26, 0x2f, 0xf6, 0xd6, - 0x51, 0x0c, 0x4f, 0x8b, 0x5c, 0xe7, 0xb9, 0x1c, 0xfa, 0x1e, 0x50, 0x56, 0x56, 0x97, 0x5a, 0x53, - 0xba, 0x9f, 0x7f, 0x50, 0x37, 0xa9, 0x76, 0x83, 0xe7, 0x93, 0x72, 0x7a, 0x1c, 0x6f, 0x44, 0xd2, - 0xe9, 0x59, 0x9b, 0x9d, 0x9e, 0x54, 0x74, 0x4f, 0x42, 0x33, 0xd3, 0xe3, 0x64, 0x62, 0xd9, 0x9a, - 0x58, 0x4a, 0x17, 0x58, 0x9f, 0x6d, 0xcd, 0x8c, 0x56, 0xda, 0x68, 0x27, 0x9b, 0x40, 0x3f, 0xc0, - 0xb5, 0xc0, 0x3f, 0xf5, 0x84, 0x35, 0x3d, 0xdc, 0xeb, 0xf3, 0xce, 0x7c, 0x28, 0x09, 0xb9, 0x09, - 0xdf, 0x08, 0xf2, 0x49, 0xf4, 0x33, 0x6c, 0x4e, 0x4b, 0xeb, 0x72, 0x37, 0x94, 0xf6, 0xee, 0x87, - 0xb5, 0x93, 0x9a, 0x51, 0x30, 0x93, 0x95, 0x36, 0x30, 0x3b, 0xb4, 0x87, 0x69, 0xff, 0xd1, 0x3c, - 0x1b, 0xfa, 0x0a, 0x9b, 0x99, 0x50, 0x96, 0x4d, 0xc8, 0x09, 0x4d, 0xd4, 0x74, 0x99, 0xd7, 0x66, - 0x27, 0x74, 0x56, 0x2e, 0x9d, 0x50, 0x36, 0x95, 0x41, 0xdf, 0xc2, 0x1a, 0xc1, 0xaf, 0xad, 0x90, - 0x8e, 0x88, 0x6b, 0x71, 0x81, 0x99, 0xb1, 0x39, 0xdb, 0xf1, 0xe4, 0xcb, 0xb4, 0xf5, 0x02, 0xbf, - 0x36, 0x25, 0xf4, 0x48, 0x60, 0x26, 0x3b, 0x4e, 0x32, 0x31, 0x7a, 0x0e, 0x75, 0xa9, 0x35, 0xb6, - 0x03, 0xdf, 0x8d, 0xcc, 0x34, 0xfe, 0x37, 0x7b, 0xd6, 0x29, 0xb1, 0x57, 0x12, 0xab, 0x0c, 0x93, - 0x67, 0x25, 0xd9, 0x04, 0xfa, 0x06, 0xca, 0x2c, 0xa4, 0x8c, 0x72, 0x3b, 0x30, 0xae, 0x2b, 0x9d, - 0xc6, 0xd5, 0x3a, 0x7d, 0x8d, 0x92, 0x77, 0x68, 0xcc, 0x40, 0x4f, 0x61, 0x35, 0x5e, 0x5b, 0x8c, - 0x06, 0xc6, 0x0d, 0xa5, 0xf0, 0xff, 0xf9, 0x0a, 0xfd, 0x97, 0x87, 0xea, 0x16, 0x8f, 0x43, 0x1a, - 0xa0, 0x47, 0x00, 0xd1, 0x5c, 0x30, 0x3b, 0x14, 0x86, 0x31, 0xfb, 0xe1, 0x99, 0xaa, 0xa8, 0xb2, - 0xfb, 0x76, 0x28, 0xfb, 0x56, 0x19, 0xc4, 0x01, 0xfa, 0x12, 0x16, 0xc7, 0x54, 0x60, 0xe3, 0xe6, - 0xec, 0x3b, 0x20, 0xe5, 0xbe, 0xa2, 0x42, 0xb6, 0x47, 0x21, 0xd1, 0x03, 0x28, 0x7b, 0x36, 0xb7, - 0x14, 0x6b, 0x6b, 0xf6, 0x9b, 0x2f, 0x65, 0xf5, 0x6c, 0xae, 0x89, 0x2b, 0x5e, 0xb4, 0x94, 0x0d, - 0x95, 0x3c, 0x8b, 0x63, 0x61, 0x0d, 0xed, 0x5f, 0x3a, 0xf7, 0x8c, 0x5b, 0xf3, 0x1a, 0x2a, 0x39, - 0x47, 0x58, 0x3c, 0x97, 0x48, 0xd9, 0xd0, 0x71, 0x26, 0x46, 0xcf, 0xa0, 0x96, 0x68, 0x0d, 0x7c, - 0xc1, 0x8d, 0xdb, 0xf3, 0x4c, 0xd4, 0x52, 0x5d, 0x5f, 0xc8, 0x77, 0x52, 0x75, 0x9c, 0x86, 0xe8, - 0x2b, 0x58, 0x76, 0xe8, 0x70, 0xe8, 0x0b, 0xe3, 0x8e, 0x52, 0xb8, 0x7d, 0xb5, 0xc2, 0x9e, 0xc2, - 0xf4, 0x0a, 0xa6, 0x46, 0x4b, 0xf3, 0xa5, 0x11, 0x9a, 0xdb, 0x98, 0x67, 0x7e, 0xcf, 0xe6, 0x09, - 0xbd, 0xe2, 0xc5, 0xc1, 0xd6, 0x43, 0xa8, 0xe7, 0x3e, 0x28, 0xe4, 0xff, 0x8d, 0x33, 0x7c, 0xae, - 0xbf, 0x57, 0xe4, 0x32, 0xfd, 0x86, 0x29, 0x65, 0xbe, 0x61, 0x1e, 0x94, 0xee, 0x17, 0xbb, 0x4b, - 0xb0, 0xc0, 0x47, 0xc3, 0xee, 0xe1, 0xdb, 0xcb, 0x46, 0xf1, 0xdd, 0x65, 0xa3, 0xf8, 0xe7, 0x65, - 0xa3, 0xf8, 0xe6, 0x7d, 0xa3, 0xf0, 0xee, 0x7d, 0xa3, 0xf0, 0xfb, 0xfb, 0x46, 0xe1, 0xc7, 0xce, - 0xa9, 0x2f, 0xbc, 0xd1, 0xa0, 0xe5, 0xd0, 0x61, 0xdb, 0xb5, 0xb9, 0xc7, 0xec, 0xf3, 0x76, 0x54, - 0x9f, 0x8c, 0xa2, 0x3f, 0x4e, 0xed, 0xe9, 0xff, 0x57, 0x83, 0x65, 0x95, 0xbd, 0xf7, 0x6f, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x5d, 0xf9, 0xc2, 0xa1, 0x61, 0x0e, 0x00, 0x00, + 0x3a, 0x62, 0xa0, 0x06, 0x54, 0x59, 0x48, 0x2d, 0x31, 0xb1, 0x3c, 0x9b, 0x7b, 0x46, 0x65, 0xbb, + 0xb8, 0xbb, 0x6a, 0x56, 0x58, 0x48, 0x8f, 0x27, 0x3d, 0x9b, 0x7b, 0x3b, 0x3f, 0x41, 0x6d, 0x8a, + 0x8d, 0x6e, 0x42, 0x59, 0x4c, 0x2c, 0x9f, 0xb8, 0x78, 0xa2, 0x5c, 0xae, 0x98, 0x2b, 0x62, 0x72, + 0x20, 0x43, 0xd4, 0x86, 0x6a, 0xc8, 0x1c, 0x65, 0x07, 0xe6, 0x5c, 0x5b, 0xb7, 0x76, 0x79, 0xd1, + 0x04, 0xb3, 0xbf, 0xf7, 0x38, 0xca, 0x9a, 0x10, 0x32, 0x47, 0xaf, 0x77, 0xfe, 0x2e, 0x42, 0xb9, + 0x8f, 0x71, 0xa8, 0xda, 0x78, 0x1d, 0x4a, 0xbe, 0x1b, 0x49, 0x76, 0x97, 0x2f, 0x2f, 0x9a, 0xa5, + 0x83, 0x27, 0x66, 0xc9, 0x77, 0x51, 0x17, 0x56, 0xb5, 0xa2, 0xe5, 0x93, 0x13, 0x6a, 0x94, 0xb6, + 0x17, 0xae, 0x6c, 0x2d, 0xc6, 0xa1, 0xd6, 0x95, 0x72, 0x66, 0xd5, 0x4e, 0x03, 0xf4, 0x0c, 0xd6, + 0x02, 0x9b, 0x0b, 0xcb, 0xa1, 0x84, 0x60, 0x47, 0x60, 0x57, 0xb5, 0xab, 0xda, 0xd9, 0x6a, 0x45, + 0xf3, 0xdb, 0x8a, 0xe7, 0xb7, 0x75, 0x1c, 0xcf, 0x6f, 0x77, 0xf1, 0xcd, 0x1f, 0xcd, 0xa2, 0x59, + 0x93, 0xbc, 0xbd, 0x98, 0x26, 0xfb, 0xe3, 0x13, 0xdb, 0x11, 0xfe, 0x18, 0xab, 0xa6, 0x96, 0xcd, + 0x24, 0xce, 0x5b, 0xb9, 0x94, 0xb7, 0xf2, 0x9f, 0x22, 0xd4, 0x73, 0x55, 0xca, 0x9e, 0xc6, 0x76, + 0x69, 0x33, 0x75, 0x88, 0x0e, 0x61, 0x43, 0x95, 0xec, 0xfa, 0x76, 0x60, 0xf1, 0x91, 0xe3, 0xc4, + 0x96, 0x7e, 0x4c, 0xd5, 0x75, 0x49, 0x7d, 0xe2, 0xdb, 0xc1, 0x51, 0x44, 0x9c, 0x56, 0x3b, 0xb1, + 0xfd, 0x60, 0x14, 0xe2, 0x8f, 0xf6, 0x20, 0x51, 0x7b, 0x1a, 0x11, 0xd1, 0x5d, 0xa8, 0x65, 0x85, + 0xb8, 0xb2, 0xa2, 0x66, 0xae, 0xba, 0x29, 0x86, 0xef, 0xdc, 0x86, 0xc5, 0x7d, 0xc7, 0xa3, 0xf2, + 0x37, 0x3d, 0xb6, 0x83, 0x11, 0xd6, 0x07, 0x8c, 0x82, 0x9d, 0xdf, 0x36, 0xa0, 0xbc, 0x4f, 0xc6, + 0x38, 0xa0, 0x0c, 0xa3, 0x1e, 0x80, 0x2d, 0x44, 0xe8, 0x0f, 0x46, 0x02, 0x4b, 0x23, 0x64, 0x83, + 0x77, 0xf3, 0x0d, 0x8e, 0xd1, 0xad, 0xc7, 0x09, 0x74, 0x9f, 0x88, 0xf0, 0xdc, 0xcc, 0x70, 0xd1, + 0x17, 0xb0, 0x88, 0x1d, 0x8f, 0x6a, 0xa3, 0x36, 0x67, 0x34, 0x1c, 0x8f, 0xf6, 0x0a, 0xa6, 0xc2, + 0xa0, 0x87, 0x50, 0x65, 0x78, 0x62, 0x85, 0xf8, 0xd7, 0x11, 0xe6, 0x22, 0x71, 0x63, 0x66, 0xae, + 0x26, 0x66, 0x84, 0xe8, 0x15, 0x4c, 0x60, 0x49, 0x84, 0x1e, 0xc1, 0x6a, 0x44, 0xe7, 0x4c, 0xde, + 0xcb, 0xca, 0x83, 0x6a, 0xe7, 0xd6, 0x95, 0xfc, 0x08, 0xd2, 0x2b, 0x98, 0x55, 0x96, 0x86, 0xe8, + 0x3e, 0x94, 0xe3, 0x8b, 0x58, 0x4d, 0x4b, 0xee, 0xe9, 0xd1, 0x0d, 0xbc, 0xaf, 0x11, 0xbd, 0x82, + 0x99, 0xa0, 0xd1, 0x03, 0xa8, 0xea, 0x7b, 0xda, 0x12, 0x13, 0x79, 0x53, 0x48, 0xf2, 0x8d, 0x2c, + 0x59, 0x6f, 0xb7, 0x8e, 0x27, 0x5c, 0xd6, 0xad, 0xc3, 0xe3, 0x09, 0x47, 0x07, 0x50, 0x53, 0xd7, + 0x6a, 0x72, 0xf0, 0x15, 0xc5, 0xde, 0xc9, 0xb2, 0x93, 0x77, 0x51, 0xab, 0x2b, 0x57, 0xa9, 0x01, + 0xab, 0x83, 0x4c, 0x8c, 0x8e, 0x60, 0x83, 0x50, 0x2b, 0x56, 0xd3, 0x3e, 0x44, 0x77, 0xd0, 0xa7, + 0x57, 0xcb, 0xbd, 0xa0, 0x5a, 0x30, 0x71, 0xa4, 0x4e, 0xa6, 0x53, 0xe8, 0x10, 0xd6, 0x72, 0x8a, + 0x15, 0xa5, 0x78, 0x77, 0x6e, 0x81, 0x89, 0x5e, 0x6d, 0x90, 0x57, 0x93, 0x2f, 0xc5, 0x11, 0x4f, + 0x8e, 0x0b, 0xf3, 0xd4, 0x8e, 0x14, 0x36, 0x3d, 0x6f, 0x8d, 0x67, 0x13, 0xe8, 0x25, 0xd4, 0x13, + 0x35, 0x5d, 0x5c, 0x55, 0xc9, 0x7d, 0x32, 0x5f, 0x2e, 0xa9, 0x6e, 0x8d, 0x4f, 0x65, 0xd0, 0x77, + 0xb0, 0xc1, 0x89, 0xcd, 0xb8, 0x47, 0x45, 0x5a, 0xe1, 0xaa, 0x92, 0xfc, 0x2c, 0x2b, 0x99, 0xbc, + 0xd8, 0x5b, 0x47, 0x31, 0x3c, 0x2d, 0x72, 0x9d, 0xe7, 0x72, 0xe8, 0x7b, 0x40, 0x59, 0x59, 0x5d, + 0x6a, 0x4d, 0xe9, 0x7e, 0xfe, 0x41, 0xdd, 0xa4, 0xda, 0x0d, 0x9e, 0x4f, 0xca, 0xe9, 0x71, 0xbc, + 0x11, 0x49, 0xa7, 0x67, 0x6d, 0x76, 0x7a, 0x52, 0xd1, 0x3d, 0x09, 0xcd, 0x4c, 0x8f, 0x93, 0x89, + 0x65, 0x6b, 0x62, 0x29, 0x5d, 0x60, 0x7d, 0xb6, 0x35, 0x33, 0x5a, 0x69, 0xa3, 0x9d, 0x6c, 0x02, + 0xfd, 0x00, 0xd7, 0x02, 0xff, 0xd4, 0x13, 0xd6, 0xf4, 0x70, 0xaf, 0xcf, 0x3b, 0xf3, 0xa1, 0x24, + 0xe4, 0x26, 0x7c, 0x23, 0xc8, 0x27, 0xd1, 0xcf, 0xb0, 0x39, 0x2d, 0xad, 0xcb, 0xdd, 0x50, 0xda, + 0xbb, 0x1f, 0xd6, 0x4e, 0x6a, 0x46, 0xc1, 0x4c, 0x56, 0xda, 0xc0, 0xec, 0xd0, 0x1e, 0xa6, 0xfd, + 0x47, 0xf3, 0x6c, 0xe8, 0x2b, 0x6c, 0x66, 0x42, 0x59, 0x36, 0x21, 0x27, 0x34, 0x51, 0xd3, 0x65, + 0x5e, 0x9b, 0x9d, 0xd0, 0x59, 0xb9, 0x74, 0x42, 0xd9, 0x54, 0x06, 0x7d, 0x0b, 0x6b, 0x04, 0xbf, + 0xb6, 0x42, 0x3a, 0x22, 0xae, 0xc5, 0x05, 0x66, 0xc6, 0xe6, 0x6c, 0xc7, 0x93, 0x2f, 0xd3, 0xd6, + 0x0b, 0xfc, 0xda, 0x94, 0xd0, 0x23, 0x81, 0x99, 0xec, 0x38, 0xc9, 0xc4, 0xe8, 0x39, 0xd4, 0xa5, + 0xd6, 0xd8, 0x0e, 0x7c, 0x37, 0x32, 0xd3, 0xf8, 0xdf, 0xec, 0x59, 0xa7, 0xc4, 0x5e, 0x49, 0xac, + 0x32, 0x4c, 0x9e, 0x95, 0x64, 0x13, 0xe8, 0x1b, 0x28, 0xb3, 0x90, 0x32, 0xca, 0xed, 0xc0, 0xb8, + 0xae, 0x74, 0x1a, 0x57, 0xeb, 0xf4, 0x35, 0x4a, 0xde, 0xa1, 0x31, 0x03, 0x3d, 0x85, 0xd5, 0x78, + 0x6d, 0x31, 0x1a, 0x18, 0x37, 0x94, 0xc2, 0xff, 0xe7, 0x2b, 0xf4, 0x5f, 0x1e, 0xaa, 0x5b, 0x3c, + 0x0e, 0x69, 0x80, 0x1e, 0x01, 0x44, 0x73, 0xc1, 0xec, 0x50, 0x18, 0xc6, 0xec, 0x87, 0x67, 0xaa, + 0xa2, 0xca, 0xee, 0xdb, 0xa1, 0xec, 0x5b, 0x65, 0x10, 0x07, 0xe8, 0x4b, 0x58, 0x1c, 0x53, 0x81, + 0x8d, 0x9b, 0xb3, 0xef, 0x80, 0x94, 0xfb, 0x8a, 0x0a, 0xd9, 0x1e, 0x85, 0x44, 0x0f, 0xa0, 0xec, + 0xd9, 0xdc, 0x52, 0xac, 0xad, 0xd9, 0x6f, 0xbe, 0x94, 0xd5, 0xb3, 0xb9, 0x26, 0xae, 0x78, 0xd1, + 0x52, 0x36, 0x54, 0xf2, 0x2c, 0x8e, 0x85, 0x35, 0xb4, 0x7f, 0xe9, 0xdc, 0x33, 0x6e, 0xcd, 0x6b, + 0xa8, 0xe4, 0x1c, 0x61, 0xf1, 0x5c, 0x22, 0x65, 0x43, 0xc7, 0x99, 0x18, 0x3d, 0x83, 0x5a, 0xa2, + 0x35, 0xf0, 0x05, 0x37, 0x6e, 0xcf, 0x33, 0x51, 0x4b, 0x75, 0x7d, 0x21, 0xdf, 0x49, 0xd5, 0x71, + 0x1a, 0xa2, 0xaf, 0x60, 0xd9, 0xa1, 0xc3, 0xa1, 0x2f, 0x8c, 0x3b, 0x4a, 0xe1, 0xf6, 0xd5, 0x0a, + 0x7b, 0x0a, 0xd3, 0x2b, 0x98, 0x1a, 0x2d, 0xcd, 0x97, 0x46, 0x68, 0x6e, 0x63, 0x9e, 0xf9, 0x3d, + 0x9b, 0x27, 0xf4, 0x8a, 0x17, 0x07, 0x5b, 0x0f, 0xa1, 0x9e, 0xfb, 0xa0, 0x90, 0xff, 0x37, 0xce, + 0xf0, 0xb9, 0xfe, 0x5e, 0x91, 0xcb, 0xf4, 0x1b, 0xa6, 0x94, 0xf9, 0x86, 0x79, 0x50, 0xba, 0x5f, + 0xec, 0x2e, 0xc1, 0x02, 0x1f, 0x0d, 0xbb, 0x87, 0x6f, 0x2f, 0x1b, 0xc5, 0x77, 0x97, 0x8d, 0xe2, + 0x9f, 0x97, 0x8d, 0xe2, 0x9b, 0xf7, 0x8d, 0xc2, 0xbb, 0xf7, 0x8d, 0xc2, 0xef, 0xef, 0x1b, 0x85, + 0x1f, 0x3b, 0xa7, 0xbe, 0xf0, 0x46, 0x83, 0x96, 0x43, 0x87, 0x6d, 0xd7, 0xe6, 0x1e, 0xb3, 0xcf, + 0xdb, 0x51, 0x7d, 0x32, 0x8a, 0xfe, 0x38, 0xb5, 0xa7, 0xff, 0x5f, 0x0d, 0x96, 0x55, 0xf6, 0xde, + 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4c, 0xe1, 0x61, 0x1a, 0x61, 0x0e, 0x00, 0x00, } func (m *ProtocolVersion) Marshal() (dAtA []byte, err error) { @@ -1075,9 +1075,20 @@ func (m *NodeInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x3a } if len(m.Channels) > 0 { - i -= len(m.Channels) - copy(dAtA[i:], m.Channels) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Channels))) + dAtA3 := make([]byte, len(m.Channels)*10) + var j2 int + for _, num := range m.Channels { + for num >= 1<<7 { + dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j2++ + } + dAtA3[j2] = uint8(num) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA3[:j2]) + i = encodeVarintTypes(dAtA, i, uint64(j2)) i-- dAtA[i] = 0x32 } @@ -1197,12 +1208,12 @@ func (m *PeerInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x20 } if m.LastConnected != nil { - n3, err3 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastConnected, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected):]) - if err3 != nil { - return 0, err3 + n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastConnected, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastConnected):]) + if err5 != nil { + return 0, err5 } - i -= n3 - i = encodeVarintTypes(dAtA, i, uint64(n3)) + i -= n5 + i = encodeVarintTypes(dAtA, i, uint64(n5)) i-- dAtA[i] = 0x1a } @@ -1256,22 +1267,22 @@ func (m *PeerAddressInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x20 } if m.LastDialFailure != nil { - n4, err4 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastDialFailure, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastDialFailure):]) - if err4 != nil { - return 0, err4 + n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastDialFailure, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastDialFailure):]) + if err6 != nil { + return 0, err6 } - i -= n4 - i = encodeVarintTypes(dAtA, i, uint64(n4)) + i -= n6 + i = encodeVarintTypes(dAtA, i, uint64(n6)) i-- dAtA[i] = 0x1a } if m.LastDialSuccess != nil { - n5, err5 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastDialSuccess, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastDialSuccess):]) - if err5 != nil { - return 0, err5 + n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.LastDialSuccess, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.LastDialSuccess):]) + if err7 != nil { + return 0, err7 } - i -= n5 - i = encodeVarintTypes(dAtA, i, uint64(n5)) + i -= n7 + i = encodeVarintTypes(dAtA, i, uint64(n7)) i-- dAtA[i] = 0x12 } @@ -2058,9 +2069,12 @@ func (m *NodeInfo) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.Channels) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) + if len(m.Channels) > 0 { + l = 0 + for _, e := range m.Channels { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l } l = len(m.Moniker) if l > 0 { @@ -2832,39 +2846,81 @@ func (m *NodeInfo) Unmarshal(dAtA []byte) error { m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Channels", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { + m.Channels = append(m.Channels, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } } + elementCount = count + if elementCount != 0 && len(m.Channels) == 0 { + m.Channels = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Channels = append(m.Channels, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Channels", wireType) } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Channels = append(m.Channels[:0], dAtA[iNdEx:postIndex]...) - if m.Channels == nil { - m.Channels = []byte{} - } - iNdEx = postIndex case 7: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Moniker", wireType) diff --git a/proto/tendermint/p2p/types.proto b/proto/tendermint/p2p/types.proto index 9baa663eca..2b37dcdde8 100644 --- a/proto/tendermint/p2p/types.proto +++ b/proto/tendermint/p2p/types.proto @@ -24,7 +24,7 @@ message NodeInfo { string listen_addr = 3; string network = 4; string version = 5; - bytes channels = 6; + repeated uint32 channels = 6; string moniker = 7; NodeInfoOther other = 8 [(gogoproto.nullable) = false]; bytes pro_tx_hash = 9; diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 69ed892f8c..1a53d15db6 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -35,7 +35,6 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // It is persisted to disk for each height before calling Commit. type ABCIResponses struct { ProcessProposal *types.ResponseProcessProposal `protobuf:"bytes,1,opt,name=process_proposal,json=processProposal,proto3" json:"process_proposal,omitempty"` - FinalizeBlock *types.ResponseFinalizeBlock `protobuf:"bytes,2,opt,name=finalize_block,json=finalizeBlock,proto3" json:"finalize_block,omitempty"` } func (m *ABCIResponses) Reset() { *m = ABCIResponses{} } @@ -78,13 +77,6 @@ func (m *ABCIResponses) GetProcessProposal() *types.ResponseProcessProposal { return nil } -func (m *ABCIResponses) GetFinalizeBlock() *types.ResponseFinalizeBlock { - if m != nil { - return m.FinalizeBlock - } - return nil -} - // ValidatorsInfo represents the latest validator set, or the last height it changed type ValidatorsInfo struct { ValidatorSet *types1.ValidatorSet `protobuf:"bytes,1,opt,name=validator_set,json=validatorSet,proto3" json:"validator_set,omitempty"` @@ -421,57 +413,56 @@ func init() { func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) } var fileDescriptor_ccfacf933f22bf93 = []byte{ - // 799 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x3f, 0x6f, 0xfb, 0x44, - 0x18, 0x8e, 0xf9, 0xb5, 0x4d, 0x72, 0xa9, 0x93, 0xf6, 0xda, 0xc1, 0x4d, 0xa9, 0x13, 0x02, 0x54, - 0x11, 0x83, 0x23, 0x01, 0x0b, 0x0b, 0x52, 0x93, 0x0a, 0x1a, 0x51, 0xaa, 0xca, 0x45, 0x1d, 0x58, - 0xac, 0x8b, 0x7d, 0x89, 0x2d, 0x1c, 0x9f, 0xe5, 0xbb, 0x14, 0xca, 0xce, 0xde, 0x95, 0xcf, 0xc1, - 0xcc, 0xde, 0xb1, 0x23, 0x53, 0x41, 0xe9, 0x17, 0x41, 0xf7, 0xc7, 0xce, 0x39, 0x29, 0x52, 0xd1, - 0x6f, 0xcb, 0xbd, 0xcf, 0xf3, 0x3e, 0xf7, 0xe4, 0xbd, 0xe7, 0x95, 0xc1, 0x87, 0x0c, 0x27, 0x01, - 0xce, 0xe6, 0x51, 0xc2, 0x06, 0x94, 0x21, 0x86, 0x07, 0xec, 0x3e, 0xc5, 0xd4, 0x49, 0x33, 0xc2, - 0x08, 0xdc, 0x5b, 0xa1, 0x8e, 0x40, 0xdb, 0x87, 0x33, 0x32, 0x23, 0x02, 0x1c, 0xf0, 0x5f, 0x92, - 0xd7, 0x3e, 0xd6, 0x54, 0xd0, 0xc4, 0x8f, 0x74, 0x91, 0xb6, 0x7e, 0x85, 0xa8, 0x97, 0xd0, 0xe3, - 0x0d, 0x34, 0x40, 0x34, 0x54, 0x60, 0x77, 0x03, 0xbc, 0x43, 0x71, 0x14, 0x20, 0x46, 0x32, 0xc5, - 0x38, 0xd9, 0x60, 0xa4, 0x28, 0x43, 0xf3, 0x5c, 0xdd, 0xd6, 0xe0, 0x3b, 0x9c, 0xd1, 0x88, 0x24, - 0xa5, 0xdb, 0x3b, 0x33, 0x42, 0x66, 0x31, 0x1e, 0x88, 0xd3, 0x64, 0x31, 0x1d, 0xb0, 0x68, 0x8e, - 0x29, 0x43, 0xf3, 0x54, 0x12, 0x7a, 0x7f, 0x18, 0xc0, 0x3c, 0x1b, 0x8e, 0xc6, 0x2e, 0xa6, 0x29, - 0x49, 0x28, 0xa6, 0xf0, 0x06, 0xec, 0xa5, 0x19, 0xf1, 0x31, 0xa5, 0x5e, 0x9a, 0x91, 0x94, 0x50, - 0x14, 0x5b, 0x46, 0xd7, 0xe8, 0x37, 0x3e, 0xef, 0x3b, 0xda, 0xb8, 0xf8, 0x18, 0x9c, 0xbc, 0xeb, - 0x5a, 0x36, 0x5c, 0x2b, 0xbe, 0xdb, 0x4a, 0xcb, 0x05, 0xf8, 0x3d, 0x68, 0x4e, 0xa3, 0x04, 0xc5, - 0xd1, 0xaf, 0xd8, 0x9b, 0xc4, 0xc4, 0xff, 0xc9, 0xfa, 0x40, 0x48, 0x9e, 0xfe, 0xa7, 0xe4, 0x37, - 0x8a, 0x3e, 0xe4, 0x6c, 0xd7, 0x9c, 0xea, 0xc7, 0xde, 0x6f, 0x06, 0x68, 0xde, 0xe6, 0x93, 0xa2, - 0xe3, 0x64, 0x4a, 0xe0, 0x08, 0x98, 0xc5, 0xec, 0x3c, 0x8a, 0x99, 0xf2, 0x6c, 0xeb, 0x17, 0xc8, - 0xc9, 0x14, 0x8d, 0x37, 0x98, 0xb9, 0xbb, 0x77, 0xda, 0x09, 0x3a, 0xe0, 0x20, 0x46, 0x94, 0x79, - 0x21, 0x8e, 0x66, 0x21, 0xf3, 0xfc, 0x10, 0x25, 0x33, 0x1c, 0x08, 0xaf, 0xef, 0xdc, 0x7d, 0x0e, - 0x5d, 0x08, 0x64, 0x24, 0x81, 0xde, 0xef, 0x06, 0x38, 0x18, 0x71, 0xb7, 0x09, 0x5d, 0xd0, 0x6b, - 0xf1, 0x30, 0xc2, 0x8c, 0x0b, 0xf6, 0xfc, 0xbc, 0xec, 0xc9, 0x07, 0x53, 0x7e, 0x3e, 0xda, 0xf4, - 0xb3, 0x26, 0x30, 0xdc, 0x7a, 0x7c, 0xee, 0x54, 0xdc, 0x96, 0x5f, 0x2e, 0xff, 0x6f, 0x6f, 0x21, - 0xa8, 0xde, 0xca, 0x44, 0xc0, 0x33, 0x50, 0x2f, 0xd4, 0x94, 0x8f, 0x13, 0xdd, 0x87, 0x4a, 0xce, - 0xca, 0x89, 0xf2, 0xb0, 0xea, 0x82, 0x6d, 0x50, 0xa3, 0x64, 0xca, 0x7e, 0x46, 0x19, 0x16, 0x57, - 0xd6, 0xdd, 0xe2, 0xdc, 0xfb, 0xb3, 0x0a, 0xb6, 0x6f, 0xf8, 0xf6, 0xc0, 0xaf, 0x40, 0x55, 0x69, - 0xa9, 0x6b, 0x8e, 0x9c, 0xf5, 0x0d, 0x73, 0x94, 0x29, 0x75, 0x45, 0xce, 0x87, 0xa7, 0xa0, 0xe6, - 0x87, 0x28, 0x4a, 0xbc, 0x48, 0xfe, 0xa7, 0xfa, 0xb0, 0xb1, 0x7c, 0xee, 0x54, 0x47, 0xbc, 0x36, - 0x3e, 0x77, 0xab, 0x02, 0x1c, 0x07, 0xf0, 0x53, 0xd0, 0x8c, 0x92, 0x88, 0x45, 0x28, 0x56, 0x93, - 0xb0, 0xde, 0x89, 0x09, 0x98, 0xaa, 0x2a, 0x87, 0x00, 0x3f, 0x03, 0x62, 0x24, 0x32, 0x6c, 0x39, - 0x73, 0x4b, 0x30, 0x5b, 0x1c, 0x10, 0x39, 0x52, 0x5c, 0x17, 0x98, 0x1a, 0x37, 0x0a, 0xac, 0xed, - 0x4d, 0xef, 0xf2, 0xa9, 0x44, 0xd7, 0xf8, 0x7c, 0x78, 0xc0, 0xbd, 0x2f, 0x9f, 0x3b, 0x8d, 0xcb, - 0x5c, 0x6a, 0x7c, 0xee, 0x36, 0x0a, 0xdd, 0x71, 0x00, 0x2f, 0x41, 0x4b, 0xd3, 0xe4, 0x5b, 0x67, - 0xed, 0x08, 0xd5, 0xb6, 0x23, 0x57, 0xd2, 0xc9, 0x57, 0xd2, 0xf9, 0x21, 0x5f, 0xc9, 0x61, 0x8d, - 0xcb, 0x3e, 0xfc, 0xdd, 0x31, 0x5c, 0xb3, 0xd0, 0xe2, 0x28, 0xfc, 0x1a, 0x80, 0x22, 0xa7, 0xd4, - 0xaa, 0xbe, 0x29, 0xd9, 0x5a, 0x07, 0xfc, 0x56, 0xb9, 0xd1, 0x44, 0x6a, 0x6f, 0x12, 0x69, 0xf2, - 0xb6, 0xd5, 0xa6, 0xc1, 0x11, 0xb0, 0xf5, 0x10, 0xae, 0xf4, 0x8a, 0x3c, 0xd6, 0xc5, 0x8c, 0x8f, - 0x57, 0x79, 0x5c, 0x75, 0xab, 0x64, 0xbe, 0xba, 0x1d, 0xe0, 0x3d, 0xb7, 0xe3, 0x0a, 0x7c, 0x52, - 0xda, 0x8e, 0x35, 0xfd, 0xc2, 0x5e, 0x43, 0xd8, 0xeb, 0x6a, 0xeb, 0x52, 0x16, 0xca, 0x3d, 0xe6, - 0xf9, 0xc9, 0x30, 0x5d, 0xc4, 0x8c, 0x7a, 0x21, 0xa2, 0xa1, 0xb5, 0xdb, 0x35, 0xfa, 0xbb, 0x32, - 0x3f, 0xae, 0xac, 0x5f, 0x20, 0x1a, 0xc2, 0x23, 0x50, 0x43, 0x69, 0x2a, 0x29, 0xa6, 0xa0, 0x54, - 0x51, 0x9a, 0x0a, 0xe8, 0x3b, 0xf0, 0xb1, 0x90, 0xf1, 0x49, 0x86, 0x3d, 0x99, 0x6f, 0xfe, 0xa8, - 0x38, 0x28, 0x07, 0xb3, 0xd9, 0x35, 0xfa, 0xa6, 0x2b, 0x46, 0x3b, 0x22, 0x19, 0x16, 0xa1, 0xbf, - 0x14, 0x3c, 0x3d, 0xa7, 0xb7, 0xe0, 0x30, 0xc1, 0xbf, 0x6c, 0x88, 0x59, 0x2d, 0x31, 0xbb, 0xce, - 0x6b, 0xb3, 0xd3, 0xb4, 0xc4, 0xe4, 0x0c, 0x77, 0x9f, 0x4b, 0x94, 0x81, 0xab, 0xc7, 0xa5, 0x6d, - 0x3c, 0x2d, 0x6d, 0xe3, 0x9f, 0xa5, 0x6d, 0x3c, 0xbc, 0xd8, 0x95, 0xa7, 0x17, 0xbb, 0xf2, 0xd7, - 0x8b, 0x5d, 0xf9, 0xf1, 0xcb, 0x59, 0xc4, 0xc2, 0xc5, 0xc4, 0xf1, 0xc9, 0x5c, 0x7c, 0xb6, 0x52, - 0x74, 0x3f, 0x90, 0xb7, 0xf0, 0x93, 0xfc, 0xaa, 0x0c, 0xd6, 0xbf, 0xb0, 0x93, 0x1d, 0x51, 0xff, - 0xe2, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x72, 0x4b, 0x4a, 0xb6, 0x7c, 0x07, 0x00, 0x00, + // 771 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcd, 0x4e, 0xdb, 0x4c, + 0x14, 0x8d, 0x3f, 0x7e, 0x92, 0x4c, 0x48, 0x02, 0x03, 0x0b, 0x13, 0x3e, 0x9c, 0x34, 0xfd, 0x51, + 0xd4, 0x85, 0x23, 0xb5, 0xdd, 0x74, 0x53, 0x89, 0x04, 0xa9, 0x44, 0x45, 0x08, 0x99, 0x8a, 0x45, + 0x37, 0xd6, 0xc4, 0x1e, 0x62, 0xab, 0x89, 0xc7, 0xf2, 0x4c, 0x68, 0x79, 0x80, 0xee, 0xd9, 0xf6, + 0x61, 0xba, 0x67, 0xc9, 0xb2, 0x2b, 0x5a, 0x85, 0x17, 0xa9, 0xe6, 0xc7, 0xce, 0x38, 0x61, 0x41, + 0xd5, 0x9d, 0xe7, 0x9e, 0x7b, 0xcf, 0x3d, 0x73, 0xe7, 0x5c, 0x19, 0xfc, 0xcf, 0x70, 0xe4, 0xe3, + 0x64, 0x12, 0x46, 0xac, 0x4b, 0x19, 0x62, 0xb8, 0xcb, 0xae, 0x62, 0x4c, 0xed, 0x38, 0x21, 0x8c, + 0xc0, 0xcd, 0x39, 0x6a, 0x0b, 0xb4, 0xb1, 0x33, 0x22, 0x23, 0x22, 0xc0, 0x2e, 0xff, 0x92, 0x79, + 0x8d, 0x3d, 0x8d, 0x05, 0x0d, 0xbd, 0x50, 0x27, 0x69, 0xe8, 0x2d, 0x44, 0x3c, 0x87, 0xee, 0x2d, + 0xa1, 0x3e, 0xa2, 0x81, 0x02, 0x5b, 0x4b, 0xe0, 0x25, 0x1a, 0x87, 0x3e, 0x62, 0x24, 0x51, 0x19, + 0xfb, 0x4b, 0x19, 0x31, 0x4a, 0xd0, 0x24, 0x65, 0xb7, 0x34, 0xf8, 0x12, 0x27, 0x34, 0x24, 0x51, + 0xae, 0x7b, 0x73, 0x44, 0xc8, 0x68, 0x8c, 0xbb, 0xe2, 0x34, 0x9c, 0x5e, 0x74, 0x59, 0x38, 0xc1, + 0x94, 0xa1, 0x49, 0x2c, 0x13, 0xda, 0x3e, 0xa8, 0x1e, 0xf4, 0xfa, 0x03, 0x07, 0xd3, 0x98, 0x44, + 0x14, 0x53, 0x78, 0x06, 0x36, 0xe3, 0x84, 0x78, 0x98, 0x52, 0x37, 0x4e, 0x48, 0x4c, 0x28, 0x1a, + 0x9b, 0x46, 0xcb, 0xe8, 0x54, 0x5e, 0x75, 0x6c, 0x6d, 0x5a, 0x7c, 0x0a, 0x76, 0x5a, 0x75, 0x2a, + 0x0b, 0x4e, 0x55, 0xbe, 0x53, 0x8f, 0xf3, 0x81, 0xf6, 0x37, 0x03, 0xd4, 0xce, 0xd3, 0x9b, 0xd1, + 0x41, 0x74, 0x41, 0x60, 0x1f, 0x54, 0xb3, 0xbb, 0xba, 0x14, 0x33, 0xd5, 0xc4, 0xd2, 0x9b, 0xc8, + 0x9b, 0x64, 0x85, 0x67, 0x98, 0x39, 0x1b, 0x97, 0xda, 0x09, 0xda, 0x60, 0x7b, 0x8c, 0x28, 0x73, + 0x03, 0x1c, 0x8e, 0x02, 0xe6, 0x7a, 0x01, 0x8a, 0x46, 0xd8, 0x37, 0xff, 0x6b, 0x19, 0x9d, 0x15, + 0x67, 0x8b, 0x43, 0x47, 0x02, 0xe9, 0x4b, 0xa0, 0xfd, 0xdd, 0x00, 0xdb, 0x7d, 0xae, 0x38, 0xa2, + 0x53, 0x7a, 0x2a, 0x06, 0x29, 0xc4, 0x38, 0x60, 0xd3, 0x4b, 0xc3, 0xae, 0x1c, 0xb0, 0xd2, 0xf3, + 0x64, 0x59, 0xcf, 0x02, 0x41, 0x6f, 0xf5, 0xe6, 0xae, 0x59, 0x70, 0xea, 0x5e, 0x3e, 0xfc, 0xd7, + 0xda, 0x02, 0x50, 0x3c, 0x97, 0x2f, 0x08, 0x0f, 0x40, 0x39, 0x63, 0x53, 0x3a, 0xf6, 0x75, 0x1d, + 0xea, 0xa5, 0xe7, 0x4a, 0x94, 0x86, 0x79, 0x15, 0x6c, 0x80, 0x12, 0x25, 0x17, 0xec, 0x0b, 0x4a, + 0xb0, 0x68, 0x59, 0x76, 0xb2, 0x73, 0xfb, 0x47, 0x11, 0xac, 0x9d, 0x71, 0xb7, 0xc3, 0xb7, 0xa0, + 0xa8, 0xb8, 0x54, 0x9b, 0x5d, 0x7b, 0x71, 0x23, 0x6c, 0x25, 0x4a, 0xb5, 0x48, 0xf3, 0xe1, 0x0b, + 0x50, 0xf2, 0x02, 0x14, 0x46, 0x6e, 0x28, 0xef, 0x54, 0xee, 0x55, 0x66, 0x77, 0xcd, 0x62, 0x9f, + 0xc7, 0x06, 0x87, 0x4e, 0x51, 0x80, 0x03, 0x1f, 0x3e, 0x07, 0xb5, 0x30, 0x0a, 0x59, 0x88, 0xc6, + 0x6a, 0x12, 0xe6, 0x8a, 0x98, 0x40, 0x55, 0x45, 0xe5, 0x10, 0xe0, 0x4b, 0x20, 0x46, 0xe2, 0x0e, + 0xc7, 0xc4, 0xfb, 0x9c, 0x66, 0xae, 0x8a, 0xcc, 0x3a, 0x07, 0x7a, 0x3c, 0xae, 0x72, 0x1d, 0x50, + 0xd5, 0x72, 0x43, 0xdf, 0x5c, 0x5b, 0xd6, 0x2e, 0x9f, 0x4a, 0x54, 0x0d, 0x0e, 0x7b, 0xdb, 0x5c, + 0xfb, 0xec, 0xae, 0x59, 0x39, 0x4e, 0xa9, 0x06, 0x87, 0x4e, 0x25, 0xe3, 0x1d, 0xf8, 0xf0, 0x18, + 0xd4, 0x35, 0x4e, 0xbe, 0x25, 0xe6, 0xba, 0x60, 0x6d, 0xd8, 0x72, 0x85, 0xec, 0x74, 0x85, 0xec, + 0x8f, 0xe9, 0x0a, 0xf5, 0x4a, 0x9c, 0xf6, 0xfa, 0x57, 0xd3, 0x70, 0xaa, 0x19, 0x17, 0x47, 0xe1, + 0x3b, 0x00, 0x32, 0x9f, 0x52, 0xb3, 0xf8, 0x28, 0x67, 0x6b, 0x15, 0xf0, 0xbd, 0x52, 0xa3, 0x91, + 0x94, 0x1e, 0x45, 0x52, 0xe3, 0x65, 0xf3, 0x4d, 0x83, 0x7d, 0x60, 0xe9, 0x26, 0x9c, 0xf3, 0x65, + 0x7e, 0x2c, 0x8b, 0x19, 0xef, 0xcd, 0xfd, 0x38, 0xaf, 0x56, 0xce, 0x7c, 0x70, 0x3b, 0xc0, 0x3f, + 0x6e, 0xc7, 0x09, 0x78, 0x96, 0xdb, 0x8e, 0x05, 0xfe, 0x4c, 0x5e, 0x45, 0xc8, 0x6b, 0x69, 0xeb, + 0x92, 0x27, 0x4a, 0x35, 0xa6, 0xfe, 0x49, 0x30, 0x9d, 0x8e, 0x19, 0x75, 0x03, 0x44, 0x03, 0x73, + 0xa3, 0x65, 0x74, 0x36, 0xa4, 0x7f, 0x1c, 0x19, 0x3f, 0x42, 0x34, 0x80, 0xbb, 0xa0, 0x84, 0xe2, + 0x58, 0xa6, 0x54, 0x45, 0x4a, 0x11, 0xc5, 0xb1, 0x80, 0x3e, 0x80, 0xa7, 0x82, 0xc6, 0x23, 0x09, + 0x76, 0xa5, 0xbf, 0xf9, 0xa3, 0x62, 0x3f, 0x6f, 0xcc, 0x5a, 0xcb, 0xe8, 0x54, 0x1d, 0x31, 0xda, + 0x3e, 0x49, 0xb0, 0x30, 0xfd, 0xb1, 0xc8, 0xd3, 0x7d, 0x7a, 0x0e, 0x76, 0x22, 0xfc, 0x75, 0x89, + 0xcc, 0xac, 0x8b, 0xd9, 0x35, 0x1f, 0x9a, 0x9d, 0xc6, 0x25, 0x26, 0x67, 0x38, 0x5b, 0x9c, 0x22, + 0x0f, 0x9c, 0xdc, 0xcc, 0x2c, 0xe3, 0x76, 0x66, 0x19, 0xbf, 0x67, 0x96, 0x71, 0x7d, 0x6f, 0x15, + 0x6e, 0xef, 0xad, 0xc2, 0xcf, 0x7b, 0xab, 0xf0, 0xe9, 0xcd, 0x28, 0x64, 0xc1, 0x74, 0x68, 0x7b, + 0x64, 0x22, 0x7e, 0x33, 0x31, 0xba, 0xea, 0xca, 0x2e, 0xfc, 0x24, 0xff, 0x02, 0xdd, 0xc5, 0x3f, + 0xe2, 0x70, 0x5d, 0xc4, 0x5f, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x4e, 0x41, 0x27, 0x2c, + 0x07, 0x00, 0x00, } func (m *ABCIResponses) Marshal() (dAtA []byte, err error) { @@ -494,18 +485,6 @@ func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.FinalizeBlock != nil { - { - size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } if m.ProcessProposal != nil { { size, err := m.ProcessProposal.MarshalToSizedBuffer(dAtA[:i]) @@ -734,12 +713,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x3a } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) - if err10 != nil { - return 0, err10 + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):]) + if err9 != nil { + return 0, err9 } - i -= n10 - i = encodeVarintTypes(dAtA, i, uint64(n10)) + i -= n9 + i = encodeVarintTypes(dAtA, i, uint64(n9)) i-- dAtA[i] = 0x32 { @@ -803,10 +782,6 @@ func (m *ABCIResponses) Size() (n int) { l = m.ProcessProposal.Size() n += 1 + l + sovTypes(uint64(l)) } - if m.FinalizeBlock != nil { - l = m.FinalizeBlock.Size() - n += 1 + l + sovTypes(uint64(l)) - } return n } @@ -982,42 +957,6 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FinalizeBlock == nil { - m.FinalizeBlock = &types.ResponseFinalizeBlock{} - } - if err := m.FinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/proto/tendermint/state/types.proto b/proto/tendermint/state/types.proto index 5445ac9922..7b87241a36 100644 --- a/proto/tendermint/state/types.proto +++ b/proto/tendermint/state/types.proto @@ -17,7 +17,6 @@ import "google/protobuf/timestamp.proto"; // It is persisted to disk for each height before calling Commit. message ABCIResponses { tendermint.abci.ResponseProcessProposal process_proposal = 1; - tendermint.abci.ResponseFinalizeBlock finalize_block = 2; } // ValidatorsInfo represents the latest validator set, or the last height it changed diff --git a/proto/tendermint/types/dash.go b/proto/tendermint/types/dash.go index 46e0f7f41b..7ac0d9939a 100644 --- a/proto/tendermint/types/dash.go +++ b/proto/tendermint/types/dash.go @@ -1,4 +1,152 @@ package types +import ( + "bytes" + "errors" + fmt "fmt" + + "github.com/dashpay/tenderdash/crypto" + "github.com/dashpay/tenderdash/crypto/bls12381" +) + // VoteExtensions is a container type for grouped vote extensions by type -type VoteExtensions map[VoteExtensionType][]*VoteExtension +type VoteExtensions []*VoteExtension + +var ( + errExtensionNil = errors.New("vote extension is nil") + errExtensionSignEmpty = errors.New("vote extension signature is missing") + errExtensionSignTooBig = fmt.Errorf("vote extension signature is too big (max: %d)", bls12381.SignatureSize) + errExtensionSignRequestIDNotSupported = errors.New("vote extension sign request id is not supported") + errExtensionSignRequestIDWrongPrefix = errors.New("vote extension sign request id must have dpevote or \\x06plwdtx prefix") +) + +// Clone returns a shallow copy of current vote-extension +// +// Clone of nil will panic + +func (v *VoteExtension) Clone() VoteExtension { + if v == nil { + panic("cannot clone nil vote-extension") + } + + ve := VoteExtension{ + Type: v.Type, + Extension: v.Extension, + Signature: v.Signature, + } + + if v.XSignRequestId != nil && v.XSignRequestId.Size() > 0 { + ve.XSignRequestId = &VoteExtension_SignRequestId{ + SignRequestId: v.GetSignRequestId(), + } + } + + return ve +} + +// Copy returns a deep copy of current vote-extension. +func (v *VoteExtension) Copy() VoteExtension { + if v == nil { + panic("cannot copy nil vote-extension") + } + + ve := VoteExtension{ + Type: v.Type, + Extension: bytes.Clone(v.Extension), + Signature: bytes.Clone(v.Signature), + } + + if v.XSignRequestId != nil && v.XSignRequestId.Size() > 0 { + ve.XSignRequestId = &VoteExtension_SignRequestId{ + SignRequestId: bytes.Clone(v.GetSignRequestId()), + } + } + + return ve +} + +func (v *VoteExtension) Equal(other *VoteExtension) bool { + if v == nil || other == nil { + return false + } + + if v.Type != other.Type { + return false + } + + if !bytes.Equal(v.Extension, other.Extension) { + return false + } + + if !bytes.Equal(v.Signature, other.Signature) { + return false + } + + // one of them is nil, but not both + if (v.XSignRequestId != nil) != (other.XSignRequestId != nil) { + return false + } + + if v.XSignRequestId != nil && other.XSignRequestId != nil { + if !bytes.Equal(v.GetSignRequestId(), other.GetSignRequestId()) { + return false + } + } + + return true +} + +// Validate checks the validity of the vote-extension +func (v *VoteExtension) Validate() error { + if v == nil { + return errExtensionNil + } + + if v.Type == VoteExtensionType_DEFAULT { + return fmt.Errorf("vote extension type %s is not supported", v.Type.String()) + } + + if v.Type == VoteExtensionType_THRESHOLD_RECOVER_RAW { + if len(v.Extension) != crypto.HashSize { + return fmt.Errorf("invalid %s vote extension size: got %d, expected %d", + v.Type.String(), len(v.Extension), crypto.HashSize) + } + } + + if len(v.Extension) > 0 && len(v.Signature) == 0 { + return errExtensionSignEmpty + } + if len(v.Signature) > bls12381.SignatureSize { + return errExtensionSignTooBig + } + + if v.XSignRequestId != nil && v.XSignRequestId.Size() > 0 { + if v.Type != VoteExtensionType_THRESHOLD_RECOVER_RAW { + return errExtensionSignRequestIDNotSupported + } + var validPrefixes = []string{"\x06plwdtx", "dpevote"} + requestID := v.GetSignRequestId() + + var validPrefix bool + for _, prefix := range validPrefixes { + if bytes.HasPrefix(requestID, []byte(prefix)) { + validPrefix = true + break + } + } + + if !validPrefix { + return errExtensionSignRequestIDWrongPrefix + } + } + + return nil +} +func (v VoteExtensions) Contains(other VoteExtension) bool { + for _, ext := range v { + if ext.Equal(&other) { + return true + } + } + return false +} diff --git a/proto/tendermint/types/dash.pb.go b/proto/tendermint/types/dash.pb.go index 661526d33a..ad693ce5ab 100644 --- a/proto/tendermint/types/dash.pb.go +++ b/proto/tendermint/types/dash.pb.go @@ -29,18 +29,34 @@ type VoteExtensionType int32 const ( // Unsupported VoteExtensionType_DEFAULT VoteExtensionType = 0 + // Sign canonical form of vote extension and threshold-recover signatures. + // // Deterministic vote extension - each validator in a quorum must provide the same vote extension data. VoteExtensionType_THRESHOLD_RECOVER VoteExtensionType = 1 + // Sign raw form of vote extension and threshold-recover signatures. + // + // Deterministic vote extension - each validator in a quorum must provide the same vote extension data. + // Use with caution - it can have severe security consequences, like replay attacks. + // + // THRESHOLD_RECOVER_RAW alows overriding sign request ID with `sign_request_id` field + // of ExtendVoteExtension.sign_request_id. If sign_request_id is provided, SHA256(sign_request_id) will be used as + // a sign request ID. + // + // It also changes how threshold-recover signatures are generated. Instead of signing canonical form of + // threshold-recover signatures, it signs SHA256 of raw form of the vote extension (`ExtendVoteExtension.extension`). + VoteExtensionType_THRESHOLD_RECOVER_RAW VoteExtensionType = 2 ) var VoteExtensionType_name = map[int32]string{ 0: "DEFAULT", 1: "THRESHOLD_RECOVER", + 2: "THRESHOLD_RECOVER_RAW", } var VoteExtensionType_value = map[string]int32{ - "DEFAULT": 0, - "THRESHOLD_RECOVER": 1, + "DEFAULT": 0, + "THRESHOLD_RECOVER": 1, + "THRESHOLD_RECOVER_RAW": 2, } func (x VoteExtensionType) String() string { @@ -116,6 +132,9 @@ type VoteExtension struct { Type VoteExtensionType `protobuf:"varint,1,opt,name=type,proto3,enum=tendermint.types.VoteExtensionType" json:"type,omitempty"` Extension []byte `protobuf:"bytes,2,opt,name=extension,proto3" json:"extension,omitempty"` Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"` + // Types that are valid to be assigned to XSignRequestId: + // *VoteExtension_SignRequestId + XSignRequestId isVoteExtension_XSignRequestId `protobuf_oneof:"_sign_request_id"` } func (m *VoteExtension) Reset() { *m = VoteExtension{} } @@ -151,6 +170,25 @@ func (m *VoteExtension) XXX_DiscardUnknown() { var xxx_messageInfo_VoteExtension proto.InternalMessageInfo +type isVoteExtension_XSignRequestId interface { + isVoteExtension_XSignRequestId() + MarshalTo([]byte) (int, error) + Size() int +} + +type VoteExtension_SignRequestId struct { + SignRequestId []byte `protobuf:"bytes,4,opt,name=sign_request_id,json=signRequestId,proto3,oneof" json:"sign_request_id,omitempty"` +} + +func (*VoteExtension_SignRequestId) isVoteExtension_XSignRequestId() {} + +func (m *VoteExtension) GetXSignRequestId() isVoteExtension_XSignRequestId { + if m != nil { + return m.XSignRequestId + } + return nil +} + func (m *VoteExtension) GetType() VoteExtensionType { if m != nil { return m.Type @@ -172,6 +210,20 @@ func (m *VoteExtension) GetSignature() []byte { return nil } +func (m *VoteExtension) GetSignRequestId() []byte { + if x, ok := m.GetXSignRequestId().(*VoteExtension_SignRequestId); ok { + return x.SignRequestId + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*VoteExtension) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*VoteExtension_SignRequestId)(nil), + } +} + func init() { proto.RegisterEnum("tendermint.types.VoteExtensionType", VoteExtensionType_name, VoteExtensionType_value) proto.RegisterType((*CoreChainLock)(nil), "tendermint.types.CoreChainLock") @@ -181,28 +233,31 @@ func init() { func init() { proto.RegisterFile("tendermint/types/dash.proto", fileDescriptor_098b09a14a95d15e) } var fileDescriptor_098b09a14a95d15e = []byte{ - // 332 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2e, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x49, 0x2c, - 0xce, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x40, 0x48, 0xea, 0x81, 0x25, 0xa5, 0x44, - 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x3b, 0x23, 0x17, 0xaf, - 0x73, 0x7e, 0x51, 0xaa, 0x73, 0x46, 0x62, 0x66, 0x9e, 0x4f, 0x7e, 0x72, 0xb6, 0x90, 0x16, 0x97, - 0x60, 0x72, 0x7e, 0x51, 0x6a, 0x7c, 0x52, 0x4e, 0x7e, 0x72, 0x76, 0x7c, 0x46, 0x6a, 0x66, 0x7a, - 0x46, 0x89, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x6f, 0x10, 0x3f, 0x48, 0xc2, 0x09, 0x24, 0xee, 0x01, - 0x16, 0x16, 0x52, 0xe3, 0xe2, 0x47, 0x56, 0x9b, 0x58, 0x9c, 0x21, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, - 0x13, 0xc4, 0x8b, 0x50, 0x99, 0x58, 0x9c, 0x21, 0x24, 0xc3, 0xc5, 0x59, 0x9c, 0x99, 0x9e, 0x97, - 0x58, 0x52, 0x5a, 0x94, 0x2a, 0xc1, 0x0c, 0x56, 0x81, 0x10, 0xb0, 0x62, 0x79, 0xb1, 0x40, 0x9e, - 0x51, 0xa9, 0x85, 0x91, 0x8b, 0x37, 0x2c, 0xbf, 0x24, 0xd5, 0xb5, 0xa2, 0x24, 0x35, 0xaf, 0x38, - 0x33, 0x3f, 0x4f, 0xc8, 0x9c, 0x8b, 0x05, 0xe4, 0x74, 0xb0, 0xe5, 0x7c, 0x46, 0xca, 0x7a, 0xe8, - 0x5e, 0xd2, 0x43, 0x51, 0x1e, 0x52, 0x59, 0x90, 0x1a, 0x04, 0xd6, 0x00, 0xb2, 0x2e, 0x15, 0x26, - 0x0c, 0x75, 0x10, 0x42, 0x00, 0xbf, 0x63, 0xb4, 0xcc, 0xb9, 0x04, 0x31, 0x8c, 0x15, 0xe2, 0xe6, - 0x62, 0x77, 0x71, 0x75, 0x73, 0x0c, 0xf5, 0x09, 0x11, 0x60, 0x10, 0x12, 0xe5, 0x12, 0x0c, 0xf1, - 0x08, 0x72, 0x0d, 0xf6, 0xf0, 0xf7, 0x71, 0x89, 0x0f, 0x72, 0x75, 0xf6, 0x0f, 0x73, 0x0d, 0x12, - 0x60, 0x74, 0xf2, 0x3b, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, - 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0x93, 0xf4, - 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x70, 0x14, 0x15, 0x24, 0x56, 0xea, 0x43, - 0xfc, 0x02, 0xe2, 0xe9, 0x43, 0xe2, 0x04, 0x3d, 0x32, 0x93, 0xd8, 0xc0, 0xe2, 0xc6, 0x80, 0x00, - 0x00, 0x00, 0xff, 0xff, 0x57, 0xe9, 0x12, 0xcd, 0xe7, 0x01, 0x00, 0x00, + // 384 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0x4a, 0xe3, 0x40, + 0x1c, 0xc7, 0x33, 0xdd, 0xb2, 0xcb, 0xce, 0x6e, 0xb6, 0xe9, 0xb0, 0x85, 0xf8, 0x87, 0x58, 0x2a, + 0x48, 0xa9, 0x90, 0x80, 0x0a, 0x82, 0xb7, 0xfe, 0x89, 0x44, 0x28, 0x16, 0xc6, 0x5a, 0xc1, 0x4b, + 0x48, 0x93, 0x21, 0x09, 0xb5, 0x99, 0x98, 0x4c, 0xc1, 0x3e, 0x81, 0x1e, 0x7d, 0x04, 0x5f, 0x46, + 0xf0, 0xd8, 0xa3, 0x47, 0x69, 0x2f, 0x3e, 0x86, 0xcc, 0x44, 0x89, 0xb6, 0xe0, 0x2d, 0xf3, 0xf9, + 0x7e, 0xc2, 0xef, 0x9b, 0xfc, 0x06, 0x6e, 0x30, 0x12, 0x79, 0x24, 0x19, 0x87, 0x11, 0x33, 0xd8, + 0x34, 0x26, 0xa9, 0xe1, 0x39, 0x69, 0xa0, 0xc7, 0x09, 0x65, 0x14, 0x29, 0x79, 0xa8, 0x8b, 0x70, + 0xfd, 0xbf, 0x4f, 0x7d, 0x2a, 0x42, 0x83, 0x3f, 0x65, 0x5e, 0xed, 0x16, 0x40, 0xb9, 0x4d, 0x13, + 0xd2, 0x0e, 0x9c, 0x30, 0xea, 0x52, 0x77, 0x84, 0x1a, 0xb0, 0xec, 0xd2, 0x84, 0xd8, 0xc3, 0x2b, + 0xea, 0x8e, 0xec, 0x80, 0x84, 0x7e, 0xc0, 0x54, 0x50, 0x05, 0x75, 0x19, 0x97, 0x78, 0xd0, 0xe2, + 0xdc, 0x12, 0x18, 0xed, 0xc0, 0xd2, 0x67, 0xd7, 0x49, 0x03, 0xb5, 0x50, 0x05, 0xf5, 0xbf, 0x58, + 0xce, 0x4d, 0x27, 0x0d, 0xd0, 0x26, 0xfc, 0x9d, 0x86, 0x7e, 0xe4, 0xb0, 0x49, 0x42, 0xd4, 0x1f, + 0xc2, 0xc8, 0xc1, 0x51, 0xf1, 0xf5, 0x61, 0x0b, 0xd4, 0x1e, 0x01, 0x94, 0x07, 0x94, 0x11, 0xf3, + 0x86, 0x91, 0x28, 0x0d, 0x69, 0x84, 0x0e, 0x61, 0x91, 0x57, 0x17, 0xc3, 0xff, 0xed, 0x6d, 0xeb, + 0xcb, 0x9f, 0xa4, 0x7f, 0xd1, 0xfb, 0xd3, 0x98, 0x60, 0xf1, 0x02, 0x1f, 0x47, 0x3e, 0xf0, 0x7b, + 0xa1, 0x1c, 0x7c, 0x5f, 0x06, 0xed, 0xc2, 0x12, 0x3f, 0xd8, 0x09, 0xb9, 0x9e, 0x90, 0x94, 0xd9, + 0xa1, 0xa7, 0x16, 0xb9, 0x63, 0x49, 0x58, 0xe6, 0x01, 0xce, 0xf8, 0x89, 0x77, 0x07, 0x40, 0x0b, + 0x41, 0xc5, 0x5e, 0xb2, 0x1b, 0x18, 0x96, 0x57, 0x7a, 0xa1, 0x3f, 0xf0, 0x57, 0xc7, 0x3c, 0x6e, + 0x9e, 0x77, 0xfb, 0x8a, 0x84, 0x2a, 0xb0, 0xdc, 0xb7, 0xb0, 0x79, 0x66, 0xf5, 0xba, 0x1d, 0x1b, + 0x9b, 0xed, 0xde, 0xc0, 0xc4, 0x0a, 0x40, 0x6b, 0xb0, 0xb2, 0x82, 0x6d, 0xdc, 0xbc, 0x50, 0x0a, + 0xad, 0xd3, 0xa7, 0xb9, 0x06, 0x66, 0x73, 0x0d, 0xbc, 0xcc, 0x35, 0x70, 0xbf, 0xd0, 0xa4, 0xd9, + 0x42, 0x93, 0x9e, 0x17, 0x9a, 0x74, 0x79, 0xe0, 0x87, 0x2c, 0x98, 0x0c, 0x75, 0x97, 0x8e, 0xc5, + 0xfa, 0x63, 0x67, 0x6a, 0x64, 0xff, 0x89, 0x9f, 0x8c, 0x6c, 0xdf, 0xcb, 0x17, 0x65, 0xf8, 0x53, + 0xf0, 0xfd, 0xb7, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1b, 0x70, 0x1d, 0xe4, 0x43, 0x02, 0x00, 0x00, } func (this *CoreChainLock) Equal(that interface{}) bool { @@ -297,6 +352,15 @@ func (m *VoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.XSignRequestId != nil { + { + size := m.XSignRequestId.Size() + i -= size + if _, err := m.XSignRequestId.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } if len(m.Signature) > 0 { i -= len(m.Signature) copy(dAtA[i:], m.Signature) @@ -319,6 +383,22 @@ func (m *VoteExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *VoteExtension_SignRequestId) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VoteExtension_SignRequestId) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SignRequestId != nil { + i -= len(m.SignRequestId) + copy(dAtA[i:], m.SignRequestId) + i = encodeVarintDash(dAtA, i, uint64(len(m.SignRequestId))) + i-- + dAtA[i] = 0x22 + } + return len(dAtA) - i, nil +} func encodeVarintDash(dAtA []byte, offset int, v uint64) int { offset -= sovDash(v) base := offset @@ -367,6 +447,22 @@ func (m *VoteExtension) Size() (n int) { if l > 0 { n += 1 + l + sovDash(uint64(l)) } + if m.XSignRequestId != nil { + n += m.XSignRequestId.Size() + } + return n +} + +func (m *VoteExtension_SignRequestId) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SignRequestId != nil { + l = len(m.SignRequestId) + n += 1 + l + sovDash(uint64(l)) + } return n } @@ -629,6 +725,39 @@ func (m *VoteExtension) Unmarshal(dAtA []byte) error { m.Signature = []byte{} } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignRequestId", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDash + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDash + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDash + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := make([]byte, postIndex-iNdEx) + copy(v, dAtA[iNdEx:postIndex]) + m.XSignRequestId = &VoteExtension_SignRequestId{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipDash(dAtA[iNdEx:]) diff --git a/proto/tendermint/types/dash.proto b/proto/tendermint/types/dash.proto index 237c3ed768..2e42a12fbb 100644 --- a/proto/tendermint/types/dash.proto +++ b/proto/tendermint/types/dash.proto @@ -16,12 +16,36 @@ message CoreChainLock { enum VoteExtensionType { // Unsupported DEFAULT = 0; + // Sign canonical form of vote extension and threshold-recover signatures. + // // Deterministic vote extension - each validator in a quorum must provide the same vote extension data. THRESHOLD_RECOVER = 1; + // Sign raw form of vote extension and threshold-recover signatures. + // + // Deterministic vote extension - each validator in a quorum must provide the same vote extension data. + // Use with caution - it can have severe security consequences, like replay attacks. + // + // THRESHOLD_RECOVER_RAW alows overriding sign request ID with `sign_request_id` field + // of ExtendVoteExtension.sign_request_id. If sign_request_id is provided, SHA256(sign_request_id) will be used as + // a sign request ID. + // + // It also changes how threshold-recover signatures are generated. Instead of signing canonical form of + // threshold-recover signatures, it signs SHA256 of raw form of the vote extension (`ExtendVoteExtension.extension`). + THRESHOLD_RECOVER_RAW = 2; } message VoteExtension { VoteExtensionType type = 1; bytes extension = 2; bytes signature = 3; + // Sign request ID that will be used to sign the vote extensions. + // Tenderdash will use checksum of `sign_request_id` when generating quorum signatures of + // THRESHOLD_RECOVER vote extensions. + + // If not set, Tenderdash will generate it based on height and round. + // + // If set, it SHOULD be unique per voting round, and it MUST start with `dpevote` or `\x06plwdtx` string. + // + // Use with caution - it can have severe security consequences. + optional bytes sign_request_id = 4; } diff --git a/proto/tendermint/types/dash_test.go b/proto/tendermint/types/dash_test.go new file mode 100644 index 0000000000..0191df6952 --- /dev/null +++ b/proto/tendermint/types/dash_test.go @@ -0,0 +1,75 @@ +package types_test + +import ( + "testing" + + "github.com/dashpay/tenderdash/proto/tendermint/types" + "github.com/stretchr/testify/assert" +) + +func TestMarshalVoteExtension(t *testing.T) { + testCases := []struct { + extension types.VoteExtension + expectPanic bool + }{ + { + extension: types.VoteExtension{ + Type: types.VoteExtensionType_THRESHOLD_RECOVER_RAW, + Extension: []byte("threshold"), + XSignRequestId: &types.VoteExtension_SignRequestId{ + SignRequestId: []byte("sign-request-id"), + }}}, + { + extension: types.VoteExtension{ + Type: types.VoteExtensionType_THRESHOLD_RECOVER_RAW, + Extension: []byte("threshold"), + XSignRequestId: nil, + }}, + { + extension: types.VoteExtension{ + Type: types.VoteExtensionType_THRESHOLD_RECOVER_RAW, + Extension: []byte("threshold"), + XSignRequestId: &types.VoteExtension_SignRequestId{nil}, + }}, + // Test below panics because of nil pointer dereference bug in gogoproto + // FIXME: remove expectPanic when we replace gogoproto + { + expectPanic: true, + extension: types.VoteExtension{ + Type: types.VoteExtensionType_THRESHOLD_RECOVER_RAW, + Extension: []byte("threshold"), + XSignRequestId: (*types.VoteExtension_SignRequestId)(nil), + }}, + { + extension: (&types.VoteExtension{ + Type: types.VoteExtensionType_THRESHOLD_RECOVER_RAW, + Extension: []byte("threshold"), + XSignRequestId: (*types.VoteExtension_SignRequestId)(nil), + }).Clone()}, + } + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + // marshaled, err := protoio.MarshalDelimited(tc) + // assert.NoError(t, err) + // assert.NotEmpty(t, marshaled) + + v := types.Vote{ + Type: types.PrecommitType, + VoteExtensions: []*types.VoteExtension{&tc.extension}, + } + f := func() { + marshaled, err := v.Marshal() + assert.NoError(t, err) + assert.NotEmpty(t, marshaled) + } + + if tc.expectPanic { + assert.Panics(t, f) + } else { + assert.NotPanics(t, f) + } + }) + } + +} diff --git a/proto/tendermint/types/params.pb.go b/proto/tendermint/types/params.pb.go index 1d48708289..0ae29224bd 100644 --- a/proto/tendermint/types/params.pb.go +++ b/proto/tendermint/types/params.pb.go @@ -33,6 +33,8 @@ type ConsensusParams struct { Block *BlockParams `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` Evidence *EvidenceParams `protobuf:"bytes,2,opt,name=evidence,proto3" json:"evidence,omitempty"` Validator *ValidatorParams `protobuf:"bytes,3,opt,name=validator,proto3" json:"validator,omitempty"` + // DEPRECATED. This will be removed in a future release. + // Replaced by ResponsePrepareProposal.app_version Version *VersionParams `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` Synchrony *SynchronyParams `protobuf:"bytes,5,opt,name=synchrony,proto3" json:"synchrony,omitempty"` Timeout *TimeoutParams `protobuf:"bytes,6,opt,name=timeout,proto3" json:"timeout,omitempty"` @@ -298,6 +300,7 @@ func (m *ValidatorParams) GetPubKeyTypes() []string { } // VersionParams contains the ABCI application version. +// DEPRECATED. This will be removed in a future release. type VersionParams struct { AppVersion uint64 `protobuf:"varint,1,opt,name=app_version,json=appVersion,proto3" json:"app_version,omitempty"` } @@ -487,16 +490,6 @@ type TimeoutParams struct { // to the next step immediately. Vote *time.Duration `protobuf:"bytes,3,opt,name=vote,proto3,stdduration" json:"vote,omitempty"` VoteDelta *time.Duration `protobuf:"bytes,4,opt,name=vote_delta,json=voteDelta,proto3,stdduration" json:"vote_delta,omitempty"` - // commit configures how long Tendermint will wait after receiving a quorum of - // precommits before beginning consensus for the next height. This can be - // used to allow slow precommits to arrive for inclusion in the next height before progressing. - Commit *time.Duration `protobuf:"bytes,5,opt,name=commit,proto3,stdduration" json:"commit,omitempty"` - // bypass_commit_timeout configures the node to proceed immediately to - // the next height once the node has received all precommits for a block, forgoing - // the remaining commit timeout. - // Setting bypass_commit_timeout false (the default) causes Tendermint to wait - // for the full commit timeout. - BypassCommitTimeout bool `protobuf:"varint,6,opt,name=bypass_commit_timeout,json=bypassCommitTimeout,proto3" json:"bypass_commit_timeout,omitempty"` } func (m *TimeoutParams) Reset() { *m = TimeoutParams{} } @@ -560,20 +553,6 @@ func (m *TimeoutParams) GetVoteDelta() *time.Duration { return nil } -func (m *TimeoutParams) GetCommit() *time.Duration { - if m != nil { - return m.Commit - } - return nil -} - -func (m *TimeoutParams) GetBypassCommitTimeout() bool { - if m != nil { - return m.BypassCommitTimeout - } - return false -} - // ABCIParams configure functionality specific to the Application Blockchain Interface. type ABCIParams struct { // Indicates if CheckTx should be called on all the transactions @@ -636,53 +615,53 @@ func init() { func init() { proto.RegisterFile("tendermint/types/params.proto", fileDescriptor_e12598271a686f57) } var fileDescriptor_e12598271a686f57 = []byte{ - // 731 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xcf, 0x6e, 0xd3, 0x4a, - 0x14, 0xc6, 0xe3, 0x26, 0x4d, 0x93, 0x93, 0xa6, 0xa9, 0xe6, 0xde, 0xab, 0xeb, 0xdb, 0x4b, 0x9d, - 0xe2, 0x05, 0xaa, 0x54, 0xc9, 0xae, 0x5a, 0x10, 0x42, 0xe2, 0x8f, 0x9a, 0x04, 0x01, 0x42, 0x45, - 0xc8, 0x54, 0x2c, 0xba, 0xb1, 0xc6, 0xce, 0xe0, 0x58, 0x8d, 0x3d, 0x96, 0xc7, 0x8e, 0xe2, 0xb7, - 0x60, 0x85, 0x78, 0x04, 0xd8, 0xf0, 0x1c, 0x5d, 0x76, 0xc9, 0x0a, 0x50, 0xfa, 0x06, 0x3c, 0x01, - 0x9a, 0xf1, 0xb8, 0x69, 0x52, 0x4a, 0xb3, 0x8a, 0x3d, 0xe7, 0xfb, 0xf9, 0xf3, 0x7c, 0xe7, 0x78, - 0x02, 0x9b, 0x09, 0x09, 0xfb, 0x24, 0x0e, 0xfc, 0x30, 0x31, 0x93, 0x2c, 0x22, 0xcc, 0x8c, 0x70, - 0x8c, 0x03, 0x66, 0x44, 0x31, 0x4d, 0x28, 0x5a, 0x9f, 0x96, 0x0d, 0x51, 0xde, 0xf8, 0xdb, 0xa3, - 0x1e, 0x15, 0x45, 0x93, 0x5f, 0xe5, 0xba, 0x0d, 0xcd, 0xa3, 0xd4, 0x1b, 0x12, 0x53, 0xdc, 0x39, - 0xe9, 0x3b, 0xb3, 0x9f, 0xc6, 0x38, 0xf1, 0x69, 0x98, 0xd7, 0xf5, 0x2f, 0x65, 0x68, 0x75, 0x69, - 0xc8, 0x48, 0xc8, 0x52, 0xf6, 0x5a, 0x38, 0xa0, 0x7d, 0x58, 0x76, 0x86, 0xd4, 0x3d, 0x51, 0x95, - 0x2d, 0x65, 0xbb, 0xb1, 0xb7, 0x69, 0xcc, 0x7b, 0x19, 0x1d, 0x5e, 0xce, 0xd5, 0x56, 0xae, 0x45, - 0x0f, 0xa1, 0x46, 0x46, 0x7e, 0x9f, 0x84, 0x2e, 0x51, 0x97, 0x04, 0xb7, 0x75, 0x95, 0x7b, 0x2a, - 0x15, 0x12, 0xbd, 0x20, 0xd0, 0x13, 0xa8, 0x8f, 0xf0, 0xd0, 0xef, 0xe3, 0x84, 0xc6, 0x6a, 0x59, - 0xe0, 0xb7, 0xaf, 0xe2, 0x6f, 0x0b, 0x89, 0xe4, 0xa7, 0x0c, 0x7a, 0x00, 0x2b, 0x23, 0x12, 0x33, - 0x9f, 0x86, 0x6a, 0x45, 0xe0, 0xed, 0xdf, 0xe0, 0xb9, 0x40, 0xc2, 0x85, 0x9e, 0x7b, 0xb3, 0x2c, - 0x74, 0x07, 0x31, 0x0d, 0x33, 0x75, 0xf9, 0x3a, 0xef, 0x37, 0x85, 0xa4, 0xf0, 0xbe, 0x60, 0xb8, - 0x77, 0xe2, 0x07, 0x84, 0xa6, 0x89, 0x5a, 0xbd, 0xce, 0xfb, 0x28, 0x17, 0x14, 0xde, 0x52, 0x8f, - 0x76, 0xa1, 0x82, 0x1d, 0xd7, 0x57, 0x57, 0x04, 0x77, 0xeb, 0x2a, 0x77, 0xd0, 0xe9, 0xbe, 0x90, - 0x90, 0x50, 0xea, 0x5d, 0x68, 0x5c, 0x4a, 0x1f, 0xfd, 0x0f, 0xf5, 0x00, 0x8f, 0x6d, 0x27, 0x4b, - 0x08, 0x13, 0xfd, 0x2a, 0x5b, 0xb5, 0x00, 0x8f, 0x3b, 0xfc, 0x1e, 0xfd, 0x0b, 0x2b, 0xbc, 0xe8, - 0x61, 0x26, 0x5a, 0x52, 0xb6, 0xaa, 0x01, 0x1e, 0x3f, 0xc3, 0x4c, 0xff, 0xac, 0xc0, 0xda, 0x6c, - 0x2f, 0xd0, 0x0e, 0x20, 0xae, 0xc5, 0x1e, 0xb1, 0xc3, 0x34, 0xb0, 0x45, 0x53, 0x8b, 0x27, 0xb6, - 0x02, 0x3c, 0x3e, 0xf0, 0xc8, 0xab, 0x34, 0x10, 0xd6, 0x0c, 0x1d, 0xc2, 0x7a, 0x21, 0x2e, 0xe6, - 0x49, 0x36, 0xfd, 0x3f, 0x23, 0x1f, 0x38, 0xa3, 0x18, 0x38, 0xa3, 0x27, 0x05, 0x9d, 0xda, 0xe9, - 0xb7, 0x76, 0xe9, 0xe3, 0xf7, 0xb6, 0x62, 0xad, 0xe5, 0xcf, 0x2b, 0x2a, 0xb3, 0x9b, 0x28, 0xcf, - 0x6e, 0x42, 0xbf, 0x07, 0xad, 0xb9, 0xbe, 0x23, 0x1d, 0x9a, 0x51, 0xea, 0xd8, 0x27, 0x24, 0xb3, - 0x45, 0x4a, 0xaa, 0xb2, 0x55, 0xde, 0xae, 0x5b, 0x8d, 0x28, 0x75, 0x5e, 0x92, 0xec, 0x88, 0x2f, - 0xe9, 0xbb, 0xd0, 0x9c, 0xe9, 0x37, 0x6a, 0x43, 0x03, 0x47, 0x91, 0x5d, 0x4c, 0x09, 0xdf, 0x59, - 0xc5, 0x02, 0x1c, 0x45, 0x52, 0xa6, 0x1f, 0xc3, 0xea, 0x73, 0xcc, 0x06, 0xa4, 0x2f, 0x81, 0x3b, - 0xd0, 0x12, 0x29, 0xd8, 0xf3, 0x01, 0x37, 0xc5, 0xf2, 0x61, 0x91, 0xb2, 0x0e, 0xcd, 0xa9, 0x6e, - 0x9a, 0x75, 0xa3, 0x50, 0xf1, 0xc0, 0x3f, 0x28, 0xd0, 0x9a, 0x9b, 0x20, 0xd4, 0x83, 0x66, 0x40, - 0x18, 0x13, 0x21, 0x92, 0x21, 0xce, 0xe4, 0xe7, 0xf6, 0x87, 0x04, 0x2b, 0x22, 0xbd, 0x55, 0x49, - 0xf5, 0x38, 0x84, 0x1e, 0x41, 0x3d, 0x8a, 0x89, 0xeb, 0xb3, 0x85, 0x7a, 0x90, 0x3f, 0x61, 0x4a, - 0xe8, 0x3f, 0x97, 0xa0, 0x39, 0x33, 0x9b, 0x7c, 0x9a, 0xa3, 0x98, 0x46, 0x94, 0x91, 0x45, 0x5f, - 0xa8, 0xd0, 0xf3, 0x1d, 0xc9, 0x4b, 0xbe, 0xa3, 0x04, 0x2f, 0xfa, 0x3e, 0xab, 0x92, 0xea, 0x71, - 0x08, 0xed, 0x43, 0x65, 0x44, 0x13, 0x22, 0x8f, 0x81, 0x1b, 0x61, 0x21, 0x46, 0x8f, 0x01, 0xf8, - 0xaf, 0xf4, 0xad, 0x2c, 0x98, 0x03, 0x47, 0x72, 0xd3, 0xfb, 0x50, 0x75, 0x69, 0x10, 0xf8, 0x89, - 0x3c, 0x01, 0x6e, 0x64, 0xa5, 0x1c, 0xed, 0xc1, 0x3f, 0x4e, 0x16, 0x61, 0xc6, 0xec, 0x7c, 0xc1, - 0xbe, 0x7c, 0x14, 0xd4, 0xac, 0xbf, 0xf2, 0x62, 0x57, 0xd4, 0x64, 0xd0, 0xfa, 0x0e, 0xc0, 0xf4, - 0xbb, 0x46, 0x9b, 0x00, 0x31, 0x71, 0x07, 0xc4, 0x3d, 0xb1, 0x93, 0xb1, 0xc8, 0xbc, 0x66, 0xd5, - 0xe5, 0xca, 0xd1, 0xb8, 0x63, 0x7d, 0x9a, 0x68, 0xca, 0xe9, 0x44, 0x53, 0xce, 0x26, 0x9a, 0xf2, - 0x63, 0xa2, 0x29, 0xef, 0xcf, 0xb5, 0xd2, 0xd9, 0xb9, 0x56, 0xfa, 0x7a, 0xae, 0x95, 0x8e, 0xef, - 0x7a, 0x7e, 0x32, 0x48, 0x1d, 0xc3, 0xa5, 0x81, 0xd9, 0xc7, 0x6c, 0x10, 0xe1, 0xcc, 0xcc, 0x0f, - 0x11, 0x7e, 0x97, 0x1f, 0xfb, 0xe6, 0xfc, 0x5f, 0x89, 0x53, 0x15, 0xeb, 0xfb, 0xbf, 0x02, 0x00, - 0x00, 0xff, 0xff, 0xaa, 0x0f, 0x78, 0xde, 0x65, 0x06, 0x00, 0x00, + // 724 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0x4f, 0x6f, 0xd3, 0x48, + 0x18, 0xc6, 0xe3, 0xc6, 0xcd, 0x9f, 0x37, 0x4d, 0x13, 0x8d, 0x76, 0xb5, 0xde, 0xee, 0xd6, 0xe9, + 0xfa, 0xb0, 0xaa, 0x54, 0xc9, 0xae, 0xb6, 0xbb, 0x87, 0x95, 0xf8, 0xa3, 0xa6, 0x41, 0x40, 0x51, + 0x11, 0x32, 0x15, 0x87, 0x5e, 0xac, 0xb1, 0x33, 0x38, 0x56, 0x63, 0x8f, 0xe5, 0xb1, 0xa3, 0xf8, + 0x5b, 0x70, 0x42, 0x7c, 0x04, 0xb8, 0x20, 0x3e, 0x46, 0x8f, 0x3d, 0x72, 0x02, 0x94, 0x7e, 0x11, + 0x34, 0xe3, 0x71, 0x43, 0x52, 0x0a, 0x39, 0xc5, 0x33, 0xef, 0xf3, 0xf3, 0xe3, 0x79, 0xde, 0xd7, + 0x0e, 0x6c, 0xa7, 0x24, 0x1a, 0x92, 0x24, 0x0c, 0xa2, 0xd4, 0x4a, 0xf3, 0x98, 0x30, 0x2b, 0xc6, + 0x09, 0x0e, 0x99, 0x19, 0x27, 0x34, 0xa5, 0xa8, 0x3b, 0x2f, 0x9b, 0xa2, 0xbc, 0xf5, 0x8b, 0x4f, + 0x7d, 0x2a, 0x8a, 0x16, 0xbf, 0x2a, 0x74, 0x5b, 0xba, 0x4f, 0xa9, 0x3f, 0x26, 0x96, 0x58, 0xb9, + 0xd9, 0x4b, 0x6b, 0x98, 0x25, 0x38, 0x0d, 0x68, 0x54, 0xd4, 0x8d, 0xf7, 0x55, 0xe8, 0x1c, 0xd1, + 0x88, 0x91, 0x88, 0x65, 0xec, 0x99, 0x70, 0x40, 0x07, 0xb0, 0xee, 0x8e, 0xa9, 0x77, 0xae, 0x29, + 0x3b, 0xca, 0x6e, 0xeb, 0x9f, 0x6d, 0x73, 0xd9, 0xcb, 0xec, 0xf3, 0x72, 0xa1, 0xb6, 0x0b, 0x2d, + 0xba, 0x03, 0x0d, 0x32, 0x09, 0x86, 0x24, 0xf2, 0x88, 0xb6, 0x26, 0xb8, 0x9d, 0x9b, 0xdc, 0x03, + 0xa9, 0x90, 0xe8, 0x35, 0x81, 0xee, 0x43, 0x73, 0x82, 0xc7, 0xc1, 0x10, 0xa7, 0x34, 0xd1, 0xaa, + 0x02, 0xff, 0xeb, 0x26, 0xfe, 0xa2, 0x94, 0x48, 0x7e, 0xce, 0xa0, 0xff, 0xa1, 0x3e, 0x21, 0x09, + 0x0b, 0x68, 0xa4, 0xa9, 0x02, 0xef, 0x7d, 0x07, 0x2f, 0x04, 0x12, 0x2e, 0xf5, 0xdc, 0x9b, 0xe5, + 0x91, 0x37, 0x4a, 0x68, 0x94, 0x6b, 0xeb, 0xb7, 0x79, 0x3f, 0x2f, 0x25, 0xa5, 0xf7, 0x35, 0xc3, + 0xbd, 0xd3, 0x20, 0x24, 0x34, 0x4b, 0xb5, 0xda, 0x6d, 0xde, 0xa7, 0x85, 0xa0, 0xf4, 0x96, 0x7a, + 0xb4, 0x0f, 0x2a, 0x76, 0xbd, 0x40, 0xab, 0x0b, 0xee, 0xcf, 0x9b, 0xdc, 0x61, 0xff, 0xe8, 0xb1, + 0x84, 0x84, 0xd2, 0x38, 0x82, 0xd6, 0x37, 0xe9, 0xa3, 0x3f, 0xa0, 0x19, 0xe2, 0xa9, 0xe3, 0xe6, + 0x29, 0x61, 0xa2, 0x5f, 0x55, 0xbb, 0x11, 0xe2, 0x69, 0x9f, 0xaf, 0xd1, 0x6f, 0x50, 0xe7, 0x45, + 0x1f, 0x33, 0xd1, 0x92, 0xaa, 0x5d, 0x0b, 0xf1, 0xf4, 0x21, 0x66, 0xc6, 0x3b, 0x05, 0x36, 0x17, + 0x7b, 0x81, 0xf6, 0x00, 0x71, 0x2d, 0xf6, 0x89, 0x13, 0x65, 0xa1, 0x23, 0x9a, 0x5a, 0xde, 0xb1, + 0x13, 0xe2, 0xe9, 0xa1, 0x4f, 0x9e, 0x66, 0xa1, 0xb0, 0x66, 0xe8, 0x04, 0xba, 0xa5, 0xb8, 0x9c, + 0x27, 0xd9, 0xf4, 0xdf, 0xcd, 0x62, 0xe0, 0xcc, 0x72, 0xe0, 0xcc, 0x81, 0x14, 0xf4, 0x1b, 0x17, + 0x9f, 0x7a, 0x95, 0x37, 0x9f, 0x7b, 0x8a, 0xbd, 0x59, 0xdc, 0xaf, 0xac, 0x2c, 0x1e, 0xa2, 0xba, + 0x78, 0x08, 0xe3, 0x3f, 0xe8, 0x2c, 0xf5, 0x1d, 0x19, 0xd0, 0x8e, 0x33, 0xd7, 0x39, 0x27, 0xb9, + 0x23, 0x52, 0xd2, 0x94, 0x9d, 0xea, 0x6e, 0xd3, 0x6e, 0xc5, 0x99, 0xfb, 0x84, 0xe4, 0xa7, 0x7c, + 0xcb, 0xd8, 0x87, 0xf6, 0x42, 0xbf, 0x51, 0x0f, 0x5a, 0x38, 0x8e, 0x9d, 0x72, 0x4a, 0xf8, 0xc9, + 0x54, 0x1b, 0x70, 0x1c, 0x4b, 0x99, 0x71, 0x06, 0x1b, 0x8f, 0x30, 0x1b, 0x91, 0xa1, 0x04, 0xfe, + 0x86, 0x8e, 0x48, 0xc1, 0x59, 0x0e, 0xb8, 0x2d, 0xb6, 0x4f, 0xca, 0x94, 0x0d, 0x68, 0xcf, 0x75, + 0xf3, 0xac, 0x5b, 0xa5, 0x8a, 0x07, 0xfe, 0x5a, 0x81, 0xce, 0xd2, 0x04, 0xa1, 0x01, 0xb4, 0x43, + 0xc2, 0x98, 0x08, 0x91, 0x8c, 0x71, 0x2e, 0x5f, 0xb7, 0x1f, 0x24, 0xa8, 0x8a, 0xf4, 0x36, 0x24, + 0x35, 0xe0, 0x10, 0xba, 0x0b, 0xcd, 0x38, 0x21, 0x5e, 0xc0, 0x56, 0xea, 0x41, 0x71, 0x87, 0x39, + 0x61, 0x7c, 0x58, 0x83, 0xf6, 0xc2, 0x6c, 0xf2, 0x69, 0x8e, 0x13, 0x1a, 0x53, 0x46, 0x56, 0x7d, + 0xa0, 0x52, 0xcf, 0x4f, 0x24, 0x2f, 0xf9, 0x89, 0x52, 0xbc, 0xea, 0xf3, 0x6c, 0x48, 0x6a, 0xc0, + 0x21, 0x74, 0x00, 0xea, 0x84, 0xa6, 0x44, 0x7e, 0x06, 0x7e, 0x0a, 0x0b, 0x31, 0xba, 0x07, 0xc0, + 0x7f, 0xa5, 0xaf, 0xba, 0x62, 0x0e, 0x1c, 0x11, 0xa6, 0xc7, 0x6a, 0x63, 0xbd, 0x5b, 0x3b, 0x56, + 0x1b, 0xb5, 0x6e, 0xdd, 0xae, 0x79, 0x34, 0x0c, 0x83, 0xd4, 0xfe, 0xd5, 0xcd, 0x63, 0xcc, 0x98, + 0x53, 0x2c, 0x1d, 0xf9, 0xce, 0x1a, 0x7b, 0x00, 0xf3, 0xb7, 0x12, 0x6d, 0x03, 0x24, 0xc4, 0x1b, + 0x11, 0xef, 0xdc, 0x49, 0xa7, 0x22, 0xb1, 0x86, 0xdd, 0x94, 0x3b, 0xa7, 0xd3, 0xbe, 0xfd, 0x76, + 0xa6, 0x2b, 0x17, 0x33, 0x5d, 0xb9, 0x9c, 0xe9, 0xca, 0x97, 0x99, 0xae, 0xbc, 0xba, 0xd2, 0x2b, + 0x97, 0x57, 0x7a, 0xe5, 0xe3, 0x95, 0x5e, 0x39, 0xfb, 0xd7, 0x0f, 0xd2, 0x51, 0xe6, 0x9a, 0x1e, + 0x0d, 0xad, 0x21, 0x66, 0xa3, 0x18, 0xe7, 0x56, 0xf1, 0x09, 0xe0, 0xab, 0xe2, 0xa3, 0x6d, 0x2d, + 0xff, 0x11, 0xb8, 0x35, 0xb1, 0x7f, 0xf0, 0x35, 0x00, 0x00, 0xff, 0xff, 0x17, 0x03, 0x59, 0x60, + 0x23, 0x06, 0x00, 0x00, } func (this *ConsensusParams) Equal(that interface{}) bool { @@ -958,18 +937,6 @@ func (this *TimeoutParams) Equal(that interface{}) bool { } else if that1.VoteDelta != nil { return false } - if this.Commit != nil && that1.Commit != nil { - if *this.Commit != *that1.Commit { - return false - } - } else if this.Commit != nil { - return false - } else if that1.Commit != nil { - return false - } - if this.BypassCommitTimeout != that1.BypassCommitTimeout { - return false - } return true } func (this *ABCIParams) Equal(that interface{}) bool { @@ -1333,64 +1300,44 @@ func (m *TimeoutParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.BypassCommitTimeout { - i-- - if m.BypassCommitTimeout { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if m.Commit != nil { - n11, err11 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Commit, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Commit):]) + if m.VoteDelta != nil { + n11, err11 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.VoteDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.VoteDelta):]) if err11 != nil { return 0, err11 } i -= n11 i = encodeVarintParams(dAtA, i, uint64(n11)) i-- - dAtA[i] = 0x2a + dAtA[i] = 0x22 } - if m.VoteDelta != nil { - n12, err12 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.VoteDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.VoteDelta):]) + if m.Vote != nil { + n12, err12 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Vote, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Vote):]) if err12 != nil { return 0, err12 } i -= n12 i = encodeVarintParams(dAtA, i, uint64(n12)) i-- - dAtA[i] = 0x22 + dAtA[i] = 0x1a } - if m.Vote != nil { - n13, err13 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Vote, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Vote):]) + if m.ProposeDelta != nil { + n13, err13 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.ProposeDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.ProposeDelta):]) if err13 != nil { return 0, err13 } i -= n13 i = encodeVarintParams(dAtA, i, uint64(n13)) i-- - dAtA[i] = 0x1a + dAtA[i] = 0x12 } - if m.ProposeDelta != nil { - n14, err14 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.ProposeDelta, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.ProposeDelta):]) + if m.Propose != nil { + n14, err14 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Propose, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Propose):]) if err14 != nil { return 0, err14 } i -= n14 i = encodeVarintParams(dAtA, i, uint64(n14)) i-- - dAtA[i] = 0x12 - } - if m.Propose != nil { - n15, err15 := github_com_gogo_protobuf_types.StdDurationMarshalTo(*m.Propose, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Propose):]) - if err15 != nil { - return 0, err15 - } - i -= n15 - i = encodeVarintParams(dAtA, i, uint64(n15)) - i-- dAtA[i] = 0xa } return len(dAtA) - i, nil @@ -1590,13 +1537,6 @@ func (m *TimeoutParams) Size() (n int) { l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.VoteDelta) n += 1 + l + sovParams(uint64(l)) } - if m.Commit != nil { - l = github_com_gogo_protobuf_types.SizeOfStdDuration(*m.Commit) - n += 1 + l + sovParams(uint64(l)) - } - if m.BypassCommitTimeout { - n += 2 - } return n } @@ -2663,62 +2603,6 @@ func (m *TimeoutParams) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Commit == nil { - m.Commit = new(time.Duration) - } - if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(m.Commit, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BypassCommitTimeout", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.BypassCommitTimeout = bool(v != 0) default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) diff --git a/proto/tendermint/types/params.proto b/proto/tendermint/types/params.proto index 302ca71971..e69beb9364 100644 --- a/proto/tendermint/types/params.proto +++ b/proto/tendermint/types/params.proto @@ -14,6 +14,8 @@ message ConsensusParams { BlockParams block = 1; EvidenceParams evidence = 2; ValidatorParams validator = 3; + // DEPRECATED. This will be removed in a future release. + // Replaced by ResponsePrepareProposal.app_version VersionParams version = 4; SynchronyParams synchrony = 5; TimeoutParams timeout = 6; @@ -59,6 +61,7 @@ message ValidatorParams { } // VersionParams contains the ABCI application version. +// DEPRECATED. This will be removed in a future release. message VersionParams { uint64 app_version = 1; } @@ -116,17 +119,9 @@ message TimeoutParams { google.protobuf.Duration vote = 3 [(gogoproto.stdduration) = true]; google.protobuf.Duration vote_delta = 4 [(gogoproto.stdduration) = true]; - // commit configures how long Tendermint will wait after receiving a quorum of - // precommits before beginning consensus for the next height. This can be - // used to allow slow precommits to arrive for inclusion in the next height before progressing. - google.protobuf.Duration commit = 5 [(gogoproto.stdduration) = true]; - - // bypass_commit_timeout configures the node to proceed immediately to - // the next height once the node has received all precommits for a block, forgoing - // the remaining commit timeout. - // Setting bypass_commit_timeout false (the default) causes Tendermint to wait - // for the full commit timeout. - bool bypass_commit_timeout = 6; + // removed fields in 0.14 + reserved "commit", "bypass_commit_timeout"; + reserved 5, 6; } // ABCIParams configure functionality specific to the Application Blockchain Interface. diff --git a/proto/tendermint/types/types.go b/proto/tendermint/types/types.go index c32a9a1406..0a27c46b8a 100644 --- a/proto/tendermint/types/types.go +++ b/proto/tendermint/types/types.go @@ -39,14 +39,6 @@ func (m *PartSetHeader) IsZero() bool { return m == nil || len(m.Hash) == 0 } -// VoteExtensionsToMap creates a map where a key is vote-extension type and value is the extensions grouped by type -func (m *Vote) VoteExtensionsToMap() VoteExtensions { - if m == nil { - return nil - } - return VoteExtensionsToMap(m.VoteExtensions) -} - // SignBytes represent data to be signed for the given vote. // It's a 64-byte slice containing concatenation of: // * Checksum of CanonicalVote @@ -171,11 +163,3 @@ func (s StateID) ValidateBasic() error { return nil } - -func VoteExtensionsToMap(voteExtensions []*VoteExtension) VoteExtensions { - res := make(map[VoteExtensionType][]*VoteExtension) - for _, ext := range voteExtensions { - res[ext.Type] = append(res[ext.Type], ext) - } - return res -} diff --git a/rpc/client/http/ws.go b/rpc/client/http/ws.go index 088beb2aae..3b43a7ee9b 100644 --- a/rpc/client/http/ws.go +++ b/rpc/client/http/ws.go @@ -77,7 +77,7 @@ func (w *wsEvents) Stop() error { return w.ws.Stop() } // event. // // It returns an error if wsEvents is not running. -func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, +func (w *wsEvents) Subscribe(ctx context.Context, _subscriber, query string, outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { if err := w.ws.Subscribe(ctx, query); err != nil { return nil, err @@ -102,7 +102,7 @@ func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, // given subscriber from query. // // It returns an error if wsEvents is not running. -func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) error { +func (w *wsEvents) Unsubscribe(ctx context.Context, _subscriber, query string) error { if err := w.ws.Unsubscribe(ctx, query); err != nil { return err } @@ -124,7 +124,7 @@ func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) er // unsubscribe given subscriber from all the queries. // // It returns an error if wsEvents is not running. -func (w *wsEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { +func (w *wsEvents) UnsubscribeAll(ctx context.Context, _subscriber string) error { if err := w.ws.UnsubscribeAll(ctx); err != nil { return err } diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index bfcb39d590..6408a377ba 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -114,7 +114,7 @@ func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultChec return c.env.CheckTx(ctx, &coretypes.RequestCheckTx{Tx: tx}) } -func (c *Local) RemoveTx(ctx context.Context, txKey types.TxKey) error { +func (c *Local) RemoveTx(_ctx context.Context, txKey types.TxKey) error { return c.env.Mempool.RemoveTxByKey(txKey) } diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 1bdbfe2d8c..fea049ad11 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -3,13 +3,16 @@ package mock import ( "context" + "github.com/dashpay/tenderdash/abci/example/kvstore" abci "github.com/dashpay/tenderdash/abci/types" "github.com/dashpay/tenderdash/internal/proxy" "github.com/dashpay/tenderdash/libs/bytes" tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" + "github.com/dashpay/tenderdash/proto/tendermint/version" "github.com/dashpay/tenderdash/rpc/client" "github.com/dashpay/tenderdash/rpc/coretypes" "github.com/dashpay/tenderdash/types" + tmversion "github.com/dashpay/tenderdash/version" ) // ABCIApp will send all abci related request to the named app, @@ -65,7 +68,11 @@ func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes if res.CheckTx.IsErr() { return res, nil } - propResp, err := a.App.ProcessProposal(ctx, &abci.RequestProcessProposal{Height: 1, Txs: [][]byte{tx}}) + propResp, err := a.App.ProcessProposal(ctx, &abci.RequestProcessProposal{ + Height: 1, + Txs: [][]byte{tx}, + Version: &version.Consensus{Block: tmversion.BlockProtocol, App: kvstore.ProtocolVersion}, + }) if err != nil { return nil, err } @@ -146,7 +153,7 @@ type ABCIMock struct { Broadcast Call } -func (m ABCIMock) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { +func (m ABCIMock) ABCIInfo(_ctx context.Context) (*coretypes.ResultABCIInfo, error) { res, err := m.Info.GetResponse(nil) if err != nil { return nil, err @@ -158,7 +165,7 @@ func (m ABCIMock) ABCIQuery(ctx context.Context, path string, data bytes.HexByte return m.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } -func (m ABCIMock) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { +func (m ABCIMock) ABCIQueryWithOptions(_ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove}) if err != nil { return nil, err @@ -167,7 +174,7 @@ func (m ABCIMock) ABCIQueryWithOptions(ctx context.Context, path string, data by return &coretypes.ResultABCIQuery{Response: resQuery}, nil } -func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { +func (m ABCIMock) BroadcastTxCommit(_ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { res, err := m.BroadcastCommit.GetResponse(tx) if err != nil { return nil, err @@ -175,7 +182,7 @@ func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretype return res.(*coretypes.ResultBroadcastTxCommit), nil } -func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxAsync(_ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err @@ -183,7 +190,7 @@ func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes return res.(*coretypes.ResultBroadcastTx), nil } -func (m ABCIMock) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTx(_ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err @@ -191,7 +198,7 @@ func (m ABCIMock) BroadcastTx(ctx context.Context, tx types.Tx) (*coretypes.Resu return res.(*coretypes.ResultBroadcastTx), nil } -func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxSync(_ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go index 1a446896e8..3e7f16db43 100644 --- a/rpc/client/mock/status.go +++ b/rpc/client/mock/status.go @@ -17,7 +17,7 @@ var ( _ client.StatusClient = (*StatusRecorder)(nil) ) -func (m *StatusMock) Status(ctx context.Context) (*coretypes.ResultStatus, error) { +func (m *StatusMock) Status(_ctx context.Context) (*coretypes.ResultStatus, error) { res, err := m.GetResponse(nil) if err != nil { return nil, err diff --git a/rpc/client/mocks/abci_client.go b/rpc/client/mocks/abci_client.go index fc06ed551d..3d0cc35ca5 100644 --- a/rpc/client/mocks/abci_client.go +++ b/rpc/client/mocks/abci_client.go @@ -24,6 +24,10 @@ type ABCIClient struct { func (_m *ABCIClient) ABCIInfo(_a0 context.Context) (*coretypes.ResultABCIInfo, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ABCIInfo") + } + var r0 *coretypes.ResultABCIInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultABCIInfo, error)); ok { @@ -50,6 +54,10 @@ func (_m *ABCIClient) ABCIInfo(_a0 context.Context) (*coretypes.ResultABCIInfo, func (_m *ABCIClient) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { ret := _m.Called(ctx, path, data) + if len(ret) == 0 { + panic("no return value specified for ABCIQuery") + } + var r0 *coretypes.ResultABCIQuery var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes) (*coretypes.ResultABCIQuery, error)); ok { @@ -76,6 +84,10 @@ func (_m *ABCIClient) ABCIQuery(ctx context.Context, path string, data bytes.Hex func (_m *ABCIClient) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { ret := _m.Called(ctx, path, data, opts) + if len(ret) == 0 { + panic("no return value specified for ABCIQueryWithOptions") + } + var r0 *coretypes.ResultABCIQuery var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error)); ok { @@ -102,6 +114,10 @@ func (_m *ABCIClient) ABCIQueryWithOptions(ctx context.Context, path string, dat func (_m *ABCIClient) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTx") + } + var r0 *coretypes.ResultBroadcastTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error)); ok { @@ -128,6 +144,10 @@ func (_m *ABCIClient) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretypes func (_m *ABCIClient) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTxAsync") + } + var r0 *coretypes.ResultBroadcastTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error)); ok { @@ -154,6 +174,10 @@ func (_m *ABCIClient) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*core func (_m *ABCIClient) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTxCommit") + } + var r0 *coretypes.ResultBroadcastTxCommit var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTxCommit, error)); ok { @@ -180,6 +204,10 @@ func (_m *ABCIClient) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*cor func (_m *ABCIClient) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTxSync") + } + var r0 *coretypes.ResultBroadcastTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error)); ok { diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index 22533b3666..c62a3347a0 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -24,6 +24,10 @@ type Client struct { func (_m *Client) ABCIInfo(_a0 context.Context) (*coretypes.ResultABCIInfo, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ABCIInfo") + } + var r0 *coretypes.ResultABCIInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultABCIInfo, error)); ok { @@ -50,6 +54,10 @@ func (_m *Client) ABCIInfo(_a0 context.Context) (*coretypes.ResultABCIInfo, erro func (_m *Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { ret := _m.Called(ctx, path, data) + if len(ret) == 0 { + panic("no return value specified for ABCIQuery") + } + var r0 *coretypes.ResultABCIQuery var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes) (*coretypes.ResultABCIQuery, error)); ok { @@ -76,6 +84,10 @@ func (_m *Client) ABCIQuery(ctx context.Context, path string, data bytes.HexByte func (_m *Client) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { ret := _m.Called(ctx, path, data, opts) + if len(ret) == 0 { + panic("no return value specified for ABCIQueryWithOptions") + } + var r0 *coretypes.ResultABCIQuery var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error)); ok { @@ -102,6 +114,10 @@ func (_m *Client) ABCIQueryWithOptions(ctx context.Context, path string, data by func (_m *Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Block") + } + var r0 *coretypes.ResultBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultBlock, error)); ok { @@ -128,6 +144,10 @@ func (_m *Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBl func (_m *Client) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { ret := _m.Called(ctx, hash) + if len(ret) == 0 { + panic("no return value specified for BlockByHash") + } + var r0 *coretypes.ResultBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) (*coretypes.ResultBlock, error)); ok { @@ -154,6 +174,10 @@ func (_m *Client) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*corety func (_m *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for BlockResults") + } + var r0 *coretypes.ResultBlockResults var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultBlockResults, error)); ok { @@ -180,6 +204,10 @@ func (_m *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.R func (_m *Client) BlockSearch(ctx context.Context, query string, page *int, perPage *int, orderBy string) (*coretypes.ResultBlockSearch, error) { ret := _m.Called(ctx, query, page, perPage, orderBy) + if len(ret) == 0 { + panic("no return value specified for BlockSearch") + } + var r0 *coretypes.ResultBlockSearch var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *int, *int, string) (*coretypes.ResultBlockSearch, error)); ok { @@ -206,6 +234,10 @@ func (_m *Client) BlockSearch(ctx context.Context, query string, page *int, perP func (_m *Client) BlockchainInfo(ctx context.Context, minHeight int64, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { ret := _m.Called(ctx, minHeight, maxHeight) + if len(ret) == 0 { + panic("no return value specified for BlockchainInfo") + } + var r0 *coretypes.ResultBlockchainInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, int64) (*coretypes.ResultBlockchainInfo, error)); ok { @@ -232,6 +264,10 @@ func (_m *Client) BlockchainInfo(ctx context.Context, minHeight int64, maxHeight func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastEvidence") + } + var r0 *coretypes.ResultBroadcastEvidence var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) (*coretypes.ResultBroadcastEvidence, error)); ok { @@ -258,6 +294,10 @@ func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*c func (_m *Client) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTx") + } + var r0 *coretypes.ResultBroadcastTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error)); ok { @@ -284,6 +324,10 @@ func (_m *Client) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretypes.Res func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTxAsync") + } + var r0 *coretypes.ResultBroadcastTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error)); ok { @@ -310,6 +354,10 @@ func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretype func (_m *Client) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTxCommit") + } + var r0 *coretypes.ResultBroadcastTxCommit var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTxCommit, error)); ok { @@ -336,6 +384,10 @@ func (_m *Client) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretyp func (_m *Client) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTxSync") + } + var r0 *coretypes.ResultBroadcastTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error)); ok { @@ -362,6 +414,10 @@ func (_m *Client) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes func (_m *Client) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultCheckTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } + var r0 *coretypes.ResultCheckTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultCheckTx, error)); ok { @@ -388,6 +444,10 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultC func (_m *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 *coretypes.ResultCommit var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultCommit, error)); ok { @@ -414,6 +474,10 @@ func (_m *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultC func (_m *Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for ConsensusParams") + } + var r0 *coretypes.ResultConsensusParams var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultConsensusParams, error)); ok { @@ -440,6 +504,10 @@ func (_m *Client) ConsensusParams(ctx context.Context, height *int64) (*coretype func (_m *Client) ConsensusState(_a0 context.Context) (*coretypes.ResultConsensusState, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ConsensusState") + } + var r0 *coretypes.ResultConsensusState var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultConsensusState, error)); ok { @@ -466,6 +534,10 @@ func (_m *Client) ConsensusState(_a0 context.Context) (*coretypes.ResultConsensu func (_m *Client) DumpConsensusState(_a0 context.Context) (*coretypes.ResultDumpConsensusState, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for DumpConsensusState") + } + var r0 *coretypes.ResultDumpConsensusState var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultDumpConsensusState, error)); ok { @@ -492,6 +564,10 @@ func (_m *Client) DumpConsensusState(_a0 context.Context) (*coretypes.ResultDump func (_m *Client) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { ret := _m.Called(ctx, req) + if len(ret) == 0 { + panic("no return value specified for Events") + } + var r0 *coretypes.ResultEvents var r1 error if rf, ok := ret.Get(0).(func(context.Context, *coretypes.RequestEvents) (*coretypes.ResultEvents, error)); ok { @@ -518,6 +594,10 @@ func (_m *Client) Events(ctx context.Context, req *coretypes.RequestEvents) (*co func (_m *Client) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Genesis") + } + var r0 *coretypes.ResultGenesis var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultGenesis, error)); ok { @@ -544,6 +624,10 @@ func (_m *Client) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, error) func (_m *Client) GenesisChunked(_a0 context.Context, _a1 uint) (*coretypes.ResultGenesisChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GenesisChunked") + } + var r0 *coretypes.ResultGenesisChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint) (*coretypes.ResultGenesisChunk, error)); ok { @@ -570,6 +654,10 @@ func (_m *Client) GenesisChunked(_a0 context.Context, _a1 uint) (*coretypes.Resu func (_m *Client) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Header") + } + var r0 *coretypes.ResultHeader var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultHeader, error)); ok { @@ -596,6 +684,10 @@ func (_m *Client) Header(ctx context.Context, height *int64) (*coretypes.ResultH func (_m *Client) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { ret := _m.Called(ctx, hash) + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + var r0 *coretypes.ResultHeader var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) (*coretypes.ResultHeader, error)); ok { @@ -622,6 +714,10 @@ func (_m *Client) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coret func (_m *Client) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Health") + } + var r0 *coretypes.ResultHealth var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultHealth, error)); ok { @@ -648,6 +744,10 @@ func (_m *Client) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { func (_m *Client) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for NetInfo") + } + var r0 *coretypes.ResultNetInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultNetInfo, error)); ok { @@ -674,6 +774,10 @@ func (_m *Client) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) func (_m *Client) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUnconfirmedTxs, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for NumUnconfirmedTxs") + } + var r0 *coretypes.ResultUnconfirmedTxs var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultUnconfirmedTxs, error)); ok { @@ -700,6 +804,10 @@ func (_m *Client) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUncon func (_m *Client) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for RemoveTx") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, types.TxKey) error); ok { r0 = rf(_a0, _a1) @@ -714,6 +822,10 @@ func (_m *Client) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { func (_m *Client) Start(_a0 context.Context) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(_a0) @@ -728,6 +840,10 @@ func (_m *Client) Start(_a0 context.Context) error { func (_m *Client) Status(_a0 context.Context) (*coretypes.ResultStatus, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 *coretypes.ResultStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultStatus, error)); ok { @@ -761,6 +877,10 @@ func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + var r0 <-chan coretypes.ResultEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string, ...int) (<-chan coretypes.ResultEvent, error)); ok { @@ -787,6 +907,10 @@ func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string func (_m *Client) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { ret := _m.Called(ctx, hash, prove) + if len(ret) == 0 { + panic("no return value specified for Tx") + } + var r0 *coretypes.ResultTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes, bool) (*coretypes.ResultTx, error)); ok { @@ -813,6 +937,10 @@ func (_m *Client) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*cor func (_m *Client) TxSearch(ctx context.Context, query string, prove bool, page *int, perPage *int, orderBy string) (*coretypes.ResultTxSearch, error) { ret := _m.Called(ctx, query, prove, page, perPage, orderBy) + if len(ret) == 0 { + panic("no return value specified for TxSearch") + } + var r0 *coretypes.ResultTxSearch var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, bool, *int, *int, string) (*coretypes.ResultTxSearch, error)); ok { @@ -839,6 +967,10 @@ func (_m *Client) TxSearch(ctx context.Context, query string, prove bool, page * func (_m *Client) UnconfirmedTxs(ctx context.Context, page *int, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { ret := _m.Called(ctx, page, perPage) + if len(ret) == 0 { + panic("no return value specified for UnconfirmedTxs") + } + var r0 *coretypes.ResultUnconfirmedTxs var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int, *int) (*coretypes.ResultUnconfirmedTxs, error)); ok { @@ -865,6 +997,10 @@ func (_m *Client) UnconfirmedTxs(ctx context.Context, page *int, perPage *int) ( func (_m *Client) Unsubscribe(ctx context.Context, subscriber string, query string) error { ret := _m.Called(ctx, subscriber, query) + if len(ret) == 0 { + panic("no return value specified for Unsubscribe") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { r0 = rf(ctx, subscriber, query) @@ -879,6 +1015,10 @@ func (_m *Client) Unsubscribe(ctx context.Context, subscriber string, query stri func (_m *Client) UnsubscribeAll(ctx context.Context, subscriber string) error { ret := _m.Called(ctx, subscriber) + if len(ret) == 0 { + panic("no return value specified for UnsubscribeAll") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, subscriber) @@ -893,6 +1033,10 @@ func (_m *Client) UnsubscribeAll(ctx context.Context, subscriber string) error { func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perPage *int, requestQuorumInfo *bool) (*coretypes.ResultValidators, error) { ret := _m.Called(ctx, height, page, perPage, requestQuorumInfo) + if len(ret) == 0 { + panic("no return value specified for Validators") + } + var r0 *coretypes.ResultValidators var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64, *int, *int, *bool) (*coretypes.ResultValidators, error)); ok { diff --git a/rpc/client/mocks/events_client.go b/rpc/client/mocks/events_client.go index 092e7b15e6..469ce79ada 100644 --- a/rpc/client/mocks/events_client.go +++ b/rpc/client/mocks/events_client.go @@ -18,6 +18,10 @@ type EventsClient struct { func (_m *EventsClient) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { ret := _m.Called(ctx, req) + if len(ret) == 0 { + panic("no return value specified for Events") + } + var r0 *coretypes.ResultEvents var r1 error if rf, ok := ret.Get(0).(func(context.Context, *coretypes.RequestEvents) (*coretypes.ResultEvents, error)); ok { diff --git a/rpc/client/mocks/evidence_client.go b/rpc/client/mocks/evidence_client.go index 1b7817a0f1..fbd8316844 100644 --- a/rpc/client/mocks/evidence_client.go +++ b/rpc/client/mocks/evidence_client.go @@ -20,6 +20,10 @@ type EvidenceClient struct { func (_m *EvidenceClient) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastEvidence") + } + var r0 *coretypes.ResultBroadcastEvidence var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) (*coretypes.ResultBroadcastEvidence, error)); ok { diff --git a/rpc/client/mocks/history_client.go b/rpc/client/mocks/history_client.go index 280e1738a1..f4ae961c72 100644 --- a/rpc/client/mocks/history_client.go +++ b/rpc/client/mocks/history_client.go @@ -18,6 +18,10 @@ type HistoryClient struct { func (_m *HistoryClient) BlockchainInfo(ctx context.Context, minHeight int64, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { ret := _m.Called(ctx, minHeight, maxHeight) + if len(ret) == 0 { + panic("no return value specified for BlockchainInfo") + } + var r0 *coretypes.ResultBlockchainInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, int64) (*coretypes.ResultBlockchainInfo, error)); ok { @@ -44,6 +48,10 @@ func (_m *HistoryClient) BlockchainInfo(ctx context.Context, minHeight int64, ma func (_m *HistoryClient) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Genesis") + } + var r0 *coretypes.ResultGenesis var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultGenesis, error)); ok { @@ -70,6 +78,10 @@ func (_m *HistoryClient) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, func (_m *HistoryClient) GenesisChunked(_a0 context.Context, _a1 uint) (*coretypes.ResultGenesisChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GenesisChunked") + } + var r0 *coretypes.ResultGenesisChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint) (*coretypes.ResultGenesisChunk, error)); ok { diff --git a/rpc/client/mocks/mempool_client.go b/rpc/client/mocks/mempool_client.go index 66c296e1fd..f294b23de2 100644 --- a/rpc/client/mocks/mempool_client.go +++ b/rpc/client/mocks/mempool_client.go @@ -20,6 +20,10 @@ type MempoolClient struct { func (_m *MempoolClient) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultCheckTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } + var r0 *coretypes.ResultCheckTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultCheckTx, error)); ok { @@ -46,6 +50,10 @@ func (_m *MempoolClient) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes. func (_m *MempoolClient) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUnconfirmedTxs, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for NumUnconfirmedTxs") + } + var r0 *coretypes.ResultUnconfirmedTxs var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultUnconfirmedTxs, error)); ok { @@ -72,6 +80,10 @@ func (_m *MempoolClient) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.Resu func (_m *MempoolClient) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for RemoveTx") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, types.TxKey) error); ok { r0 = rf(_a0, _a1) @@ -86,6 +98,10 @@ func (_m *MempoolClient) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { func (_m *MempoolClient) UnconfirmedTxs(ctx context.Context, page *int, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { ret := _m.Called(ctx, page, perPage) + if len(ret) == 0 { + panic("no return value specified for UnconfirmedTxs") + } + var r0 *coretypes.ResultUnconfirmedTxs var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int, *int) (*coretypes.ResultUnconfirmedTxs, error)); ok { diff --git a/rpc/client/mocks/network_client.go b/rpc/client/mocks/network_client.go index f365a8a4c3..f9ae2ea916 100644 --- a/rpc/client/mocks/network_client.go +++ b/rpc/client/mocks/network_client.go @@ -18,6 +18,10 @@ type NetworkClient struct { func (_m *NetworkClient) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for ConsensusParams") + } + var r0 *coretypes.ResultConsensusParams var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultConsensusParams, error)); ok { @@ -44,6 +48,10 @@ func (_m *NetworkClient) ConsensusParams(ctx context.Context, height *int64) (*c func (_m *NetworkClient) ConsensusState(_a0 context.Context) (*coretypes.ResultConsensusState, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ConsensusState") + } + var r0 *coretypes.ResultConsensusState var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultConsensusState, error)); ok { @@ -70,6 +78,10 @@ func (_m *NetworkClient) ConsensusState(_a0 context.Context) (*coretypes.ResultC func (_m *NetworkClient) DumpConsensusState(_a0 context.Context) (*coretypes.ResultDumpConsensusState, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for DumpConsensusState") + } + var r0 *coretypes.ResultDumpConsensusState var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultDumpConsensusState, error)); ok { @@ -96,6 +108,10 @@ func (_m *NetworkClient) DumpConsensusState(_a0 context.Context) (*coretypes.Res func (_m *NetworkClient) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Health") + } + var r0 *coretypes.ResultHealth var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultHealth, error)); ok { @@ -122,6 +138,10 @@ func (_m *NetworkClient) Health(_a0 context.Context) (*coretypes.ResultHealth, e func (_m *NetworkClient) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for NetInfo") + } + var r0 *coretypes.ResultNetInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultNetInfo, error)); ok { diff --git a/rpc/client/mocks/remote_client.go b/rpc/client/mocks/remote_client.go index a09c863c8f..1708786162 100644 --- a/rpc/client/mocks/remote_client.go +++ b/rpc/client/mocks/remote_client.go @@ -24,6 +24,10 @@ type RemoteClient struct { func (_m *RemoteClient) ABCIInfo(_a0 context.Context) (*coretypes.ResultABCIInfo, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ABCIInfo") + } + var r0 *coretypes.ResultABCIInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultABCIInfo, error)); ok { @@ -50,6 +54,10 @@ func (_m *RemoteClient) ABCIInfo(_a0 context.Context) (*coretypes.ResultABCIInfo func (_m *RemoteClient) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { ret := _m.Called(ctx, path, data) + if len(ret) == 0 { + panic("no return value specified for ABCIQuery") + } + var r0 *coretypes.ResultABCIQuery var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes) (*coretypes.ResultABCIQuery, error)); ok { @@ -76,6 +84,10 @@ func (_m *RemoteClient) ABCIQuery(ctx context.Context, path string, data bytes.H func (_m *RemoteClient) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { ret := _m.Called(ctx, path, data, opts) + if len(ret) == 0 { + panic("no return value specified for ABCIQueryWithOptions") + } + var r0 *coretypes.ResultABCIQuery var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error)); ok { @@ -102,6 +114,10 @@ func (_m *RemoteClient) ABCIQueryWithOptions(ctx context.Context, path string, d func (_m *RemoteClient) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Block") + } + var r0 *coretypes.ResultBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultBlock, error)); ok { @@ -128,6 +144,10 @@ func (_m *RemoteClient) Block(ctx context.Context, height *int64) (*coretypes.Re func (_m *RemoteClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { ret := _m.Called(ctx, hash) + if len(ret) == 0 { + panic("no return value specified for BlockByHash") + } + var r0 *coretypes.ResultBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) (*coretypes.ResultBlock, error)); ok { @@ -154,6 +174,10 @@ func (_m *RemoteClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (* func (_m *RemoteClient) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for BlockResults") + } + var r0 *coretypes.ResultBlockResults var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultBlockResults, error)); ok { @@ -180,6 +204,10 @@ func (_m *RemoteClient) BlockResults(ctx context.Context, height *int64) (*coret func (_m *RemoteClient) BlockSearch(ctx context.Context, query string, page *int, perPage *int, orderBy string) (*coretypes.ResultBlockSearch, error) { ret := _m.Called(ctx, query, page, perPage, orderBy) + if len(ret) == 0 { + panic("no return value specified for BlockSearch") + } + var r0 *coretypes.ResultBlockSearch var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *int, *int, string) (*coretypes.ResultBlockSearch, error)); ok { @@ -206,6 +234,10 @@ func (_m *RemoteClient) BlockSearch(ctx context.Context, query string, page *int func (_m *RemoteClient) BlockchainInfo(ctx context.Context, minHeight int64, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { ret := _m.Called(ctx, minHeight, maxHeight) + if len(ret) == 0 { + panic("no return value specified for BlockchainInfo") + } + var r0 *coretypes.ResultBlockchainInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context, int64, int64) (*coretypes.ResultBlockchainInfo, error)); ok { @@ -232,6 +264,10 @@ func (_m *RemoteClient) BlockchainInfo(ctx context.Context, minHeight int64, max func (_m *RemoteClient) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastEvidence") + } + var r0 *coretypes.ResultBroadcastEvidence var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) (*coretypes.ResultBroadcastEvidence, error)); ok { @@ -258,6 +294,10 @@ func (_m *RemoteClient) BroadcastEvidence(_a0 context.Context, _a1 types.Evidenc func (_m *RemoteClient) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTx") + } + var r0 *coretypes.ResultBroadcastTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error)); ok { @@ -284,6 +324,10 @@ func (_m *RemoteClient) BroadcastTx(_a0 context.Context, _a1 types.Tx) (*coretyp func (_m *RemoteClient) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTxAsync") + } + var r0 *coretypes.ResultBroadcastTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error)); ok { @@ -310,6 +354,10 @@ func (_m *RemoteClient) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*co func (_m *RemoteClient) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTxCommit") + } + var r0 *coretypes.ResultBroadcastTxCommit var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTxCommit, error)); ok { @@ -336,6 +384,10 @@ func (_m *RemoteClient) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*c func (_m *RemoteClient) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for BroadcastTxSync") + } + var r0 *coretypes.ResultBroadcastTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error)); ok { @@ -362,6 +414,10 @@ func (_m *RemoteClient) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*cor func (_m *RemoteClient) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultCheckTx, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for CheckTx") + } + var r0 *coretypes.ResultCheckTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, types.Tx) (*coretypes.ResultCheckTx, error)); ok { @@ -388,6 +444,10 @@ func (_m *RemoteClient) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.R func (_m *RemoteClient) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 *coretypes.ResultCommit var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultCommit, error)); ok { @@ -414,6 +474,10 @@ func (_m *RemoteClient) Commit(ctx context.Context, height *int64) (*coretypes.R func (_m *RemoteClient) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for ConsensusParams") + } + var r0 *coretypes.ResultConsensusParams var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultConsensusParams, error)); ok { @@ -440,6 +504,10 @@ func (_m *RemoteClient) ConsensusParams(ctx context.Context, height *int64) (*co func (_m *RemoteClient) ConsensusState(_a0 context.Context) (*coretypes.ResultConsensusState, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for ConsensusState") + } + var r0 *coretypes.ResultConsensusState var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultConsensusState, error)); ok { @@ -466,6 +534,10 @@ func (_m *RemoteClient) ConsensusState(_a0 context.Context) (*coretypes.ResultCo func (_m *RemoteClient) DumpConsensusState(_a0 context.Context) (*coretypes.ResultDumpConsensusState, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for DumpConsensusState") + } + var r0 *coretypes.ResultDumpConsensusState var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultDumpConsensusState, error)); ok { @@ -492,6 +564,10 @@ func (_m *RemoteClient) DumpConsensusState(_a0 context.Context) (*coretypes.Resu func (_m *RemoteClient) Events(ctx context.Context, req *coretypes.RequestEvents) (*coretypes.ResultEvents, error) { ret := _m.Called(ctx, req) + if len(ret) == 0 { + panic("no return value specified for Events") + } + var r0 *coretypes.ResultEvents var r1 error if rf, ok := ret.Get(0).(func(context.Context, *coretypes.RequestEvents) (*coretypes.ResultEvents, error)); ok { @@ -518,6 +594,10 @@ func (_m *RemoteClient) Events(ctx context.Context, req *coretypes.RequestEvents func (_m *RemoteClient) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Genesis") + } + var r0 *coretypes.ResultGenesis var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultGenesis, error)); ok { @@ -544,6 +624,10 @@ func (_m *RemoteClient) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, func (_m *RemoteClient) GenesisChunked(_a0 context.Context, _a1 uint) (*coretypes.ResultGenesisChunk, error) { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for GenesisChunked") + } + var r0 *coretypes.ResultGenesisChunk var r1 error if rf, ok := ret.Get(0).(func(context.Context, uint) (*coretypes.ResultGenesisChunk, error)); ok { @@ -570,6 +654,10 @@ func (_m *RemoteClient) GenesisChunked(_a0 context.Context, _a1 uint) (*coretype func (_m *RemoteClient) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Header") + } + var r0 *coretypes.ResultHeader var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultHeader, error)); ok { @@ -596,6 +684,10 @@ func (_m *RemoteClient) Header(ctx context.Context, height *int64) (*coretypes.R func (_m *RemoteClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { ret := _m.Called(ctx, hash) + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + var r0 *coretypes.ResultHeader var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) (*coretypes.ResultHeader, error)); ok { @@ -622,6 +714,10 @@ func (_m *RemoteClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) ( func (_m *RemoteClient) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Health") + } + var r0 *coretypes.ResultHealth var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultHealth, error)); ok { @@ -648,6 +744,10 @@ func (_m *RemoteClient) Health(_a0 context.Context) (*coretypes.ResultHealth, er func (_m *RemoteClient) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for NetInfo") + } + var r0 *coretypes.ResultNetInfo var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultNetInfo, error)); ok { @@ -674,6 +774,10 @@ func (_m *RemoteClient) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, func (_m *RemoteClient) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUnconfirmedTxs, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for NumUnconfirmedTxs") + } + var r0 *coretypes.ResultUnconfirmedTxs var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultUnconfirmedTxs, error)); ok { @@ -700,6 +804,10 @@ func (_m *RemoteClient) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.Resul func (_m *RemoteClient) Remote() string { ret := _m.Called() + if len(ret) == 0 { + panic("no return value specified for Remote") + } + var r0 string if rf, ok := ret.Get(0).(func() string); ok { r0 = rf() @@ -714,6 +822,10 @@ func (_m *RemoteClient) Remote() string { func (_m *RemoteClient) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { ret := _m.Called(_a0, _a1) + if len(ret) == 0 { + panic("no return value specified for RemoveTx") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, types.TxKey) error); ok { r0 = rf(_a0, _a1) @@ -728,6 +840,10 @@ func (_m *RemoteClient) RemoveTx(_a0 context.Context, _a1 types.TxKey) error { func (_m *RemoteClient) Start(_a0 context.Context) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Start") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context) error); ok { r0 = rf(_a0) @@ -742,6 +858,10 @@ func (_m *RemoteClient) Start(_a0 context.Context) error { func (_m *RemoteClient) Status(_a0 context.Context) (*coretypes.ResultStatus, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 *coretypes.ResultStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultStatus, error)); ok { @@ -775,6 +895,10 @@ func (_m *RemoteClient) Subscribe(ctx context.Context, subscriber string, query _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + var r0 <-chan coretypes.ResultEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string, ...int) (<-chan coretypes.ResultEvent, error)); ok { @@ -801,6 +925,10 @@ func (_m *RemoteClient) Subscribe(ctx context.Context, subscriber string, query func (_m *RemoteClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { ret := _m.Called(ctx, hash, prove) + if len(ret) == 0 { + panic("no return value specified for Tx") + } + var r0 *coretypes.ResultTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes, bool) (*coretypes.ResultTx, error)); ok { @@ -827,6 +955,10 @@ func (_m *RemoteClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) func (_m *RemoteClient) TxSearch(ctx context.Context, query string, prove bool, page *int, perPage *int, orderBy string) (*coretypes.ResultTxSearch, error) { ret := _m.Called(ctx, query, prove, page, perPage, orderBy) + if len(ret) == 0 { + panic("no return value specified for TxSearch") + } + var r0 *coretypes.ResultTxSearch var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, bool, *int, *int, string) (*coretypes.ResultTxSearch, error)); ok { @@ -853,6 +985,10 @@ func (_m *RemoteClient) TxSearch(ctx context.Context, query string, prove bool, func (_m *RemoteClient) UnconfirmedTxs(ctx context.Context, page *int, perPage *int) (*coretypes.ResultUnconfirmedTxs, error) { ret := _m.Called(ctx, page, perPage) + if len(ret) == 0 { + panic("no return value specified for UnconfirmedTxs") + } + var r0 *coretypes.ResultUnconfirmedTxs var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int, *int) (*coretypes.ResultUnconfirmedTxs, error)); ok { @@ -879,6 +1015,10 @@ func (_m *RemoteClient) UnconfirmedTxs(ctx context.Context, page *int, perPage * func (_m *RemoteClient) Unsubscribe(ctx context.Context, subscriber string, query string) error { ret := _m.Called(ctx, subscriber, query) + if len(ret) == 0 { + panic("no return value specified for Unsubscribe") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { r0 = rf(ctx, subscriber, query) @@ -893,6 +1033,10 @@ func (_m *RemoteClient) Unsubscribe(ctx context.Context, subscriber string, quer func (_m *RemoteClient) UnsubscribeAll(ctx context.Context, subscriber string) error { ret := _m.Called(ctx, subscriber) + if len(ret) == 0 { + panic("no return value specified for UnsubscribeAll") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, subscriber) @@ -907,6 +1051,10 @@ func (_m *RemoteClient) UnsubscribeAll(ctx context.Context, subscriber string) e func (_m *RemoteClient) Validators(ctx context.Context, height *int64, page *int, perPage *int, requestQuorumInfo *bool) (*coretypes.ResultValidators, error) { ret := _m.Called(ctx, height, page, perPage, requestQuorumInfo) + if len(ret) == 0 { + panic("no return value specified for Validators") + } + var r0 *coretypes.ResultValidators var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64, *int, *int, *bool) (*coretypes.ResultValidators, error)); ok { diff --git a/rpc/client/mocks/sign_client.go b/rpc/client/mocks/sign_client.go index 9d312d993d..dea02efa1d 100644 --- a/rpc/client/mocks/sign_client.go +++ b/rpc/client/mocks/sign_client.go @@ -21,6 +21,10 @@ type SignClient struct { func (_m *SignClient) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Block") + } + var r0 *coretypes.ResultBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultBlock, error)); ok { @@ -47,6 +51,10 @@ func (_m *SignClient) Block(ctx context.Context, height *int64) (*coretypes.Resu func (_m *SignClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { ret := _m.Called(ctx, hash) + if len(ret) == 0 { + panic("no return value specified for BlockByHash") + } + var r0 *coretypes.ResultBlock var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) (*coretypes.ResultBlock, error)); ok { @@ -73,6 +81,10 @@ func (_m *SignClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*co func (_m *SignClient) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for BlockResults") + } + var r0 *coretypes.ResultBlockResults var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultBlockResults, error)); ok { @@ -99,6 +111,10 @@ func (_m *SignClient) BlockResults(ctx context.Context, height *int64) (*coretyp func (_m *SignClient) BlockSearch(ctx context.Context, query string, page *int, perPage *int, orderBy string) (*coretypes.ResultBlockSearch, error) { ret := _m.Called(ctx, query, page, perPage, orderBy) + if len(ret) == 0 { + panic("no return value specified for BlockSearch") + } + var r0 *coretypes.ResultBlockSearch var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, *int, *int, string) (*coretypes.ResultBlockSearch, error)); ok { @@ -125,6 +141,10 @@ func (_m *SignClient) BlockSearch(ctx context.Context, query string, page *int, func (_m *SignClient) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Commit") + } + var r0 *coretypes.ResultCommit var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultCommit, error)); ok { @@ -151,6 +171,10 @@ func (_m *SignClient) Commit(ctx context.Context, height *int64) (*coretypes.Res func (_m *SignClient) Header(ctx context.Context, height *int64) (*coretypes.ResultHeader, error) { ret := _m.Called(ctx, height) + if len(ret) == 0 { + panic("no return value specified for Header") + } + var r0 *coretypes.ResultHeader var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64) (*coretypes.ResultHeader, error)); ok { @@ -177,6 +201,10 @@ func (_m *SignClient) Header(ctx context.Context, height *int64) (*coretypes.Res func (_m *SignClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultHeader, error) { ret := _m.Called(ctx, hash) + if len(ret) == 0 { + panic("no return value specified for HeaderByHash") + } + var r0 *coretypes.ResultHeader var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) (*coretypes.ResultHeader, error)); ok { @@ -203,6 +231,10 @@ func (_m *SignClient) HeaderByHash(ctx context.Context, hash bytes.HexBytes) (*c func (_m *SignClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { ret := _m.Called(ctx, hash, prove) + if len(ret) == 0 { + panic("no return value specified for Tx") + } + var r0 *coretypes.ResultTx var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes, bool) (*coretypes.ResultTx, error)); ok { @@ -229,6 +261,10 @@ func (_m *SignClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) ( func (_m *SignClient) TxSearch(ctx context.Context, query string, prove bool, page *int, perPage *int, orderBy string) (*coretypes.ResultTxSearch, error) { ret := _m.Called(ctx, query, prove, page, perPage, orderBy) + if len(ret) == 0 { + panic("no return value specified for TxSearch") + } + var r0 *coretypes.ResultTxSearch var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, bool, *int, *int, string) (*coretypes.ResultTxSearch, error)); ok { @@ -255,6 +291,10 @@ func (_m *SignClient) TxSearch(ctx context.Context, query string, prove bool, pa func (_m *SignClient) Validators(ctx context.Context, height *int64, page *int, perPage *int, requestQuorumInfo *bool) (*coretypes.ResultValidators, error) { ret := _m.Called(ctx, height, page, perPage, requestQuorumInfo) + if len(ret) == 0 { + panic("no return value specified for Validators") + } + var r0 *coretypes.ResultValidators var r1 error if rf, ok := ret.Get(0).(func(context.Context, *int64, *int, *int, *bool) (*coretypes.ResultValidators, error)); ok { diff --git a/rpc/client/mocks/status_client.go b/rpc/client/mocks/status_client.go index 7094efff07..71f5a6f6a5 100644 --- a/rpc/client/mocks/status_client.go +++ b/rpc/client/mocks/status_client.go @@ -18,6 +18,10 @@ type StatusClient struct { func (_m *StatusClient) Status(_a0 context.Context) (*coretypes.ResultStatus, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for Status") + } + var r0 *coretypes.ResultStatus var r1 error if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.ResultStatus, error)); ok { diff --git a/rpc/client/mocks/subscription_client.go b/rpc/client/mocks/subscription_client.go index 512da36df5..84095a2b03 100644 --- a/rpc/client/mocks/subscription_client.go +++ b/rpc/client/mocks/subscription_client.go @@ -25,6 +25,10 @@ func (_m *SubscriptionClient) Subscribe(ctx context.Context, subscriber string, _ca = append(_ca, _va...) ret := _m.Called(_ca...) + if len(ret) == 0 { + panic("no return value specified for Subscribe") + } + var r0 <-chan coretypes.ResultEvent var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, string, ...int) (<-chan coretypes.ResultEvent, error)); ok { @@ -51,6 +55,10 @@ func (_m *SubscriptionClient) Subscribe(ctx context.Context, subscriber string, func (_m *SubscriptionClient) Unsubscribe(ctx context.Context, subscriber string, query string) error { ret := _m.Called(ctx, subscriber, query) + if len(ret) == 0 { + panic("no return value specified for Unsubscribe") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { r0 = rf(ctx, subscriber, query) @@ -65,6 +73,10 @@ func (_m *SubscriptionClient) Unsubscribe(ctx context.Context, subscriber string func (_m *SubscriptionClient) UnsubscribeAll(ctx context.Context, subscriber string) error { ret := _m.Called(ctx, subscriber) + if len(ret) == 0 { + panic("no return value specified for UnsubscribeAll") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(ctx, subscriber) diff --git a/rpc/jsonrpc/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go index ed91fdec15..8082e9ded3 100644 --- a/rpc/jsonrpc/client/ws_client.go +++ b/rpc/jsonrpc/client/ws_client.go @@ -37,7 +37,7 @@ var defaultWSOptions = wsOptions{ // the remote server. // // WSClient is safe for concurrent use by multiple goroutines. -type WSClient struct { // nolint: maligned +type WSClient struct { //nolint: maligned Logger log.Logger conn *websocket.Conn @@ -206,7 +206,7 @@ func (c *WSClient) dial() error { Proxy: http.ProxyFromEnvironment, } rHeader := http.Header{} - conn, _, err := dialer.Dial(c.protocol+"://"+c.Address+c.Endpoint, rHeader) // nolint:nolintlint,bodyclose + conn, _, err := dialer.Dial(c.protocol+"://"+c.Address+c.Endpoint, rHeader) //nolint:nolintlint,bodyclose if err != nil { return err } @@ -232,7 +232,7 @@ func (c *WSClient) reconnect(ctx context.Context) error { defer timer.Stop() for { - // nolint:gosec // G404: Use of weak random number generator + //nolint:gosec // G404: Use of weak random number generator jitter := time.Duration(mrand.Float64() * float64(time.Second)) // 1s == (1e9 ns) backoffDuration := jitter + ((1 << attempt) * time.Second) diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index b171de3cc3..4bf9d5a537 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -75,23 +75,23 @@ var Routes = map[string]*server.RPCFunc{ "echo_int": server.NewRPCFunc(EchoIntResult), } -func EchoResult(ctx context.Context, v *RequestEcho) (*ResultEcho, error) { +func EchoResult(_ctx context.Context, v *RequestEcho) (*ResultEcho, error) { return &ResultEcho{v.Value}, nil } -func EchoWSResult(ctx context.Context, v *RequestEcho) (*ResultEcho, error) { +func EchoWSResult(_ctx context.Context, v *RequestEcho) (*ResultEcho, error) { return &ResultEcho{v.Value}, nil } -func EchoIntResult(ctx context.Context, v *RequestEchoInt) (*ResultEchoInt, error) { +func EchoIntResult(_ctx context.Context, v *RequestEchoInt) (*ResultEchoInt, error) { return &ResultEchoInt{v.Value}, nil } -func EchoBytesResult(ctx context.Context, v *RequestEchoBytes) (*ResultEchoBytes, error) { +func EchoBytesResult(_ctx context.Context, v *RequestEchoBytes) (*ResultEchoBytes, error) { return &ResultEchoBytes{v.Value}, nil } -func EchoDataBytesResult(ctx context.Context, v *RequestEchoDataBytes) (*ResultEchoDataBytes, error) { +func EchoDataBytesResult(_ctx context.Context, v *RequestEchoDataBytes) (*ResultEchoDataBytes, error) { return &ResultEchoDataBytes{v.Value}, nil } diff --git a/rpc/jsonrpc/test/main.go b/rpc/jsonrpc/test/main.go index d1b5928981..6b265b8fa9 100644 --- a/rpc/jsonrpc/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -17,7 +17,7 @@ var routes = map[string]*rpcserver.RPCFunc{ "hello_world": rpcserver.NewRPCFunc(HelloWorld), } -func HelloWorld(ctx context.Context, name string, num int) (Result, error) { +func HelloWorld(_ctx context.Context, name string, num int) (Result, error) { return Result{fmt.Sprintf("hi %s %d", name, num)}, nil } diff --git a/scripts/keymigrate/migrate.go b/scripts/keymigrate/migrate.go index 02ad486c7b..b5b56e8392 100644 --- a/scripts/keymigrate/migrate.go +++ b/scripts/keymigrate/migrate.go @@ -597,7 +597,7 @@ func replaceKey(db dbm.DB, storeName string, key keyID) error { // 10% of the time, force a write to disk, but mostly don't, // because it's faster. - if rand.Intn(100)%10 == 0 { // nolint:gosec + if rand.Intn(100)%10 == 0 { //nolint:gosec if err = batch.WriteSync(); err != nil { return err } diff --git a/scripts/mockery_generate.sh b/scripts/mockery_generate.sh index 3c7e8d3e46..1d00aef13f 100755 --- a/scripts/mockery_generate.sh +++ b/scripts/mockery_generate.sh @@ -6,7 +6,7 @@ # runs the published Docker container. This legerdemain is so that the CI build # and a local build can work off the same script. # -VERSION=v2.33.2 +VERSION=v2.41.0 if ! mockery --version 2>/dev/null | grep $VERSION; then echo "Please install mockery $VERSION" diff --git a/scripts/release/cliff-pre.toml b/scripts/release/cliff-pre.toml index c5aac65826..14331286f0 100644 --- a/scripts/release/cliff-pre.toml +++ b/scripts/release/cliff-pre.toml @@ -35,21 +35,21 @@ conventional_commits = true filter_unconventional = true # regex for parsing and grouping commits commit_parsers = [ - { message = "^feat", group = "Features"}, - { message = "^fix", group = "Bug Fixes"}, - { message = "^doc", group = "Documentation"}, - { message = "^perf", group = "Performance"}, - { message = "^refactor", group = "Refactor"}, - { message = "^style", group = "Styling"}, - { message = "^test", group = "Testing"}, - { message = "^chore\\(release\\): update changelog and bump version to", skip = true}, - { message = "^chore", group = "Miscellaneous Tasks"}, - { body = ".*security", group = "Security"}, + { message = "^feat", group = "Features" }, + { message = "^fix", group = "Bug Fixes" }, + { message = "^doc", group = "Documentation" }, + { message = "^perf", group = "Performance" }, + { message = "^refactor", group = "Refactor" }, + { message = "^style", group = "Styling" }, + { message = "^test", group = "Testing" }, + { message = "^chore\\(release\\): update changelog and bump version to", skip = true }, + { message = "^chore", group = "Miscellaneous Tasks" }, + { body = ".*security", group = "Security" }, ] # filter out the commits that are not matched by commit parsers filter_commits = false # glob pattern for matching git tags -tag_pattern = 'v0.[0-9]*' +tag_pattern = 'v[0-9].[0-9]*' # regex for skipping tags # skip_tags = "v0.1.0-beta.1" # regex for ignoring tags diff --git a/scripts/release/cliff.toml b/scripts/release/cliff.toml index d7ddda9e9b..550800ea1a 100644 --- a/scripts/release/cliff.toml +++ b/scripts/release/cliff.toml @@ -35,21 +35,21 @@ conventional_commits = true filter_unconventional = true # regex for parsing and grouping commits commit_parsers = [ - { message = "^feat", group = "Features"}, - { message = "^fix", group = "Bug Fixes"}, - { message = "^doc", group = "Documentation"}, - { message = "^perf", group = "Performance"}, - { message = "^refactor", group = "Refactor"}, - { message = "^style", group = "Styling"}, - { message = "^test", group = "Testing"}, - { message = "^chore\\(release\\): update changelog and bump version to", skip = true}, - { message = "^chore", group = "Miscellaneous Tasks"}, - { body = ".*security", group = "Security"}, + { message = "^feat", group = "Features" }, + { message = "^fix", group = "Bug Fixes" }, + { message = "^doc", group = "Documentation" }, + { message = "^perf", group = "Performance" }, + { message = "^refactor", group = "Refactor" }, + { message = "^style", group = "Styling" }, + { message = "^test", group = "Testing" }, + { message = "^chore\\(release\\): update changelog and bump version to", skip = true }, + { message = "^chore", group = "Miscellaneous Tasks" }, + { body = ".*security", group = "Security" }, ] # filter out the commits that are not matched by commit parsers filter_commits = false # glob pattern for matching git tags -tag_pattern = 'v0.[0-9]*' +tag_pattern = 'v[0-9].[0-9]*' # regex for skipping tags # skip_tags = "v0.1.0-beta.1" # regex for ignoring tags diff --git a/scripts/txs/random.sh b/scripts/txs/random.sh old mode 100644 new mode 100755 index 231fabcfea..dae4c42d74 --- a/scripts/txs/random.sh +++ b/scripts/txs/random.sh @@ -2,18 +2,18 @@ set -u function toHex() { - echo -n $1 | hexdump -ve '1/1 "%.2X"' + echo -n "$1" | hexdump -ve '1/1 "%.2X"' } -N=$1 -PORT=$2 +N="$1" +PORT="$2" -for i in `seq 1 $N`; do +for i in $(seq 1 $N); do # store key value pair KEY=$(head -c 10 /dev/urandom) VALUE="$i" - echo $(toHex $KEY=$VALUE) - curl 127.0.0.1:$PORT/broadcast_tx_sync?tx=0x$(toHex $KEY=$VALUE) + KV="$(toHex "$KEY=$VALUE")" + echo "$KV" + curl "127.0.0.1:$PORT/broadcast_tx_sync?tx=0x${KV}" + echo done - - diff --git a/spec/abci++/api.md b/spec/abci++/api.md index eebd8feef0..0eadee789e 100644 --- a/spec/abci++/api.md +++ b/spec/abci++/api.md @@ -142,7 +142,6 @@ ExecTxResult contains results of executing one individual transaction. | data | [bytes](#bytes) | | Result bytes, if any (arbitrary data, not interpreted by Tenderdash). | | log | [string](#string) | | The output of the application's logger. May be non-deterministic. | | info | [string](#string) | | Additional information. May be non-deterministic. | -| gas_wanted | [int64](#int64) | | Amount of gas requested for transaction. | | gas_used | [int64](#int64) | | Amount of gas consumed by transaction. | | events | [Event](#tendermint-abci-Event) | repeated | Type & Key-Value events for indexing transactions (e.g. by account). | | codespace | [string](#string) | | Namespace for the code. | @@ -155,13 +154,24 @@ ExecTxResult contains results of executing one individual transaction. ### ExtendVoteExtension -Provides a vote extension for signing. Each field is mandatory for filling +Provides a vote extension for signing. `type` and `extension` fields are mandatory for filling | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| type | [tendermint.types.VoteExtensionType](#tendermint-types-VoteExtensionType) | | Vote extension type can be either DEFAULT or THRESHOLD_RECOVER. The Tenderdash supports only THRESHOLD_RECOVER at this moment. | -| extension | [bytes](#bytes) | | Deterministic or (Non-Deterministic) extension provided by the sending validator's Application. | +| type | [tendermint.types.VoteExtensionType](#tendermint-types-VoteExtensionType) | | Vote extension type can be either DEFAULT, THRESHOLD_RECOVER or THRESHOLD_RECOVER_RAW. The Tenderdash supports only THRESHOLD_RECOVER and THRESHOLD_RECOVER_RAW at this moment. | +| extension | [bytes](#bytes) | | Deterministic or (Non-Deterministic) extension provided by the sending validator's Application. + +For THRESHOLD_RECOVER_RAW, it MUST be 32 bytes. + +Sign request ID that will be used to sign the vote extensions. Only applicable for THRESHOLD_RECOVER_RAW vote extension type. + +Tenderdash will use SHA256 checksum of `sign_request_id` when generating quorum signatures of THRESHOLD_RECOVER_RAW vote extensions. It MUST NOT be set for any other vote extension types. | +| sign_request_id | [bytes](#bytes) | optional | If not set, Tenderdash will generate it based on height and round. + +If set, it SHOULD be unique per voting round, and it MUST start with `dpevote` or `\x06plwdtx` prefix. + +Use with caution - it can have severe security consequences. | @@ -381,8 +391,8 @@ Finalize newly decided block. - The application must execute the transactions in full, in the order they appear in `RequestFinalizeBlock.txs`, before returning control to Tenderdash. Alternatively, it can commit the candidate state corresponding to the same block previously executed via `PrepareProposal` or `ProcessProposal`. -- `ResponseFinalizeBlock.tx_results[i].Code == 0` only if the _i_-th transaction is fully valid. -- Application is expected to persist its state at the end of this call, before calling `ResponseFinalizeBlock`. +- If ProcessProposal for the same arguments have succeeded, FinalizeBlock MUST always succeed. +- Application is expected to persist its state at the end of this call, before returning `ResponseFinalizeBlock`. - Later calls to `Query` can return proofs about the application state anchored in this Merkle root hash. - Use `ResponseFinalizeBlock.retain_height` with caution! If all nodes in the network remove historical @@ -433,7 +443,7 @@ Return information about the application state. Used to sync Tenderdash with the application during a handshake that happens on startup. The returned app_version will be included in the Header of every block. -Tenderdsah expects last_block_app_hash and last_block_height to be updated during Commit, +Tenderdash expects last_block_app_hash and last_block_height to be updated during Commit, ensuring that Commit is never called twice for the same block height. @@ -572,7 +582,7 @@ Prepare new block proposal, potentially altering list of transactions. their propose timeout goes off. - As a result of executing the prepared proposal, the Application may produce header events or transaction events. The Application must keep those events until a block is decided and then pass them on to Tenderdash via - `ResponseFinalizeBlock`. + `ResponsePrepareProposal`. - As a sanity check, Tenderdash will check the returned parameters for validity if the Application modified them. In particular, `ResponsePrepareProposal.tx_records` will be deemed invalid if - There is a duplicate transaction in the list. @@ -632,7 +642,7 @@ Note that, if _p_ has a non-`nil` _validValue_, Tenderdash will use it as propos | core_chain_locked_height | [uint32](#uint32) | | Core chain lock height to be used when signing this block. | | proposer_pro_tx_hash | [bytes](#bytes) | | ProTxHash of the original proposer of the block. | | proposed_app_version | [uint64](#uint64) | | Proposer's latest available app protocol version. | -| version | [tendermint.version.Consensus](#tendermint-version-Consensus) | | App and block version used to generate the block. | +| version | [tendermint.version.Consensus](#tendermint-version-Consensus) | | App and block version used to generate the block. App version included in the block can be modified by setting ResponsePrepareProposal.app_version. | | quorum_hash | [bytes](#bytes) | | quorum_hash contains hash of validator quorum that will sign the block | @@ -702,7 +712,7 @@ When a validator _p_ enters Tenderdash consensus round _r_, height _h_, in which | core_chain_lock_update | [tendermint.types.CoreChainLock](#tendermint-types-CoreChainLock) | | Next core-chain-lock-update for validation in ABCI. | | proposer_pro_tx_hash | [bytes](#bytes) | | ProTxHash of the original proposer of the block. | | proposed_app_version | [uint64](#uint64) | | Proposer's latest available app protocol version. | -| version | [tendermint.version.Consensus](#tendermint-version-Consensus) | | App and block version used to generate the block. | +| version | [tendermint.version.Consensus](#tendermint-version-Consensus) | | App and block version used to generate the block. App version MUST be verified by the app. | | quorum_hash | [bytes](#bytes) | | quorum_hash contains hash of validator quorum that will sign the block | @@ -902,7 +912,6 @@ nondeterministic | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | -| events | [Event](#tendermint-abci-Event) | repeated | Type & Key-Value events for indexing | | retain_height | [int64](#int64) | | Blocks below this height may be removed. Defaults to `0` (retain all). | @@ -1017,6 +1026,7 @@ nondeterministic | consensus_param_updates | [tendermint.types.ConsensusParams](#tendermint-types-ConsensusParams) | | Changes to consensus-critical gas, size, and other parameters that will be applied at next height. | | core_chain_lock_update | [tendermint.types.CoreChainLock](#tendermint-types-CoreChainLock) | | Core chain lock that will be used for next block. | | validator_set_update | [ValidatorSetUpdate](#tendermint-abci-ValidatorSetUpdate) | | Changes to validator set that will be applied at next height. | +| app_version | [uint64](#uint64) | | Application version that was used to create the current proposal. | @@ -1036,6 +1046,7 @@ nondeterministic | tx_results | [ExecTxResult](#tendermint-abci-ExecTxResult) | repeated | List of structures containing the data resulting from executing the transactions. | | consensus_param_updates | [tendermint.types.ConsensusParams](#tendermint-types-ConsensusParams) | | Changes to consensus-critical gas, size, and other parameters. | | validator_set_update | [ValidatorSetUpdate](#tendermint-abci-ValidatorSetUpdate) | | Changes to validator set (set voting power to 0 to remove). | +| events | [Event](#tendermint-abci-Event) | repeated | Type & Key-Value events for indexing | @@ -1168,7 +1179,21 @@ Validator ### ValidatorSetUpdate +ValidatorSetUpdate represents a change in the validator set. +It can be used to add, remove, or update a validator. + +Validator set update consists of multiple ValidatorUpdate records, +each of them can be used to add, remove, or update a validator, according to the +following rules: +1. If a validator with the same public key already exists in the validator set +and power is greater than 0, the existing validator will be updated with the new power. +2. If a validator with the same public key already exists in the validator set +and power is 0, the existing validator will be removed from the validator set. +3. If a validator with the same public key does not exist in the validator set and the power is greater than 0, +a new validator will be added to the validator set. +4. As a special case, if quorum hash has changed, all existing validators will be removed before applying +the new validator set update. | Field | Type | Label | Description | @@ -1313,6 +1338,7 @@ TxAction contains App-provided information on what to do with a transaction that | UNMODIFIED | 1 | The Application did not modify this transaction. | | ADDED | 2 | The Application added this transaction. | | REMOVED | 3 | The Application wants this transaction removed from the proposal and the mempool. | +| DELAYED | 4 | The Application wants this transaction removed from the proposal but not the mempool. | diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index aa250c4dd9..29121647f4 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -6,7 +6,7 @@ import ( "encoding/binary" "errors" "fmt" - "math/rand" + "math/big" "strconv" "strings" "time" @@ -17,6 +17,7 @@ import ( "github.com/dashpay/tenderdash/abci/example/code" "github.com/dashpay/tenderdash/abci/example/kvstore" abci "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/crypto" "github.com/dashpay/tenderdash/libs/log" types1 "github.com/dashpay/tenderdash/proto/tendermint/types" "github.com/dashpay/tenderdash/types" @@ -50,6 +51,7 @@ func NewApplication(cfg kvstore.Config, opts ...kvstore.OptFunc) (*Application, kvstore.WithLogger(logger.With("module", "kvstore")), kvstore.WithVerifyTxFunc(verifyTx), kvstore.WithPrepareTxsFunc(prepareTxs), + kvstore.WithAppVersion(0), }, opts...) app := Application{ logger: logger.With("module", "kvstore"), @@ -85,22 +87,18 @@ func (app *Application) ExtendVote(_ context.Context, req *abci.RequestExtendVot ) return &abci.ResponseExtendVote{}, nil } - ext := make([]byte, binary.MaxVarintLen64) - // We don't care that these values are generated by a weak random number - // generator. It's just for test purposes. - //nolint:gosec // G404: Use of weak random number generator - num := rand.Int63n(voteExtensionMaxVal) - extLen := binary.PutVarint(ext, num) + ext := make([]byte, crypto.DefaultHashSize) + copy(ext, big.NewInt(lastHeight+1).Bytes()) + app.logger.Info("generated vote extension", - "num", num, - "ext", fmt.Sprintf("%x", ext[:extLen]), - "state.Height", lastHeight, + "ext", fmt.Sprintf("%x", ext), + "state.Height", lastHeight+1, ) return &abci.ResponseExtendVote{ VoteExtensions: []*abci.ExtendVoteExtension{ { - Type: types1.VoteExtensionType_DEFAULT, - Extension: ext[:extLen], + Type: types1.VoteExtensionType_THRESHOLD_RECOVER_RAW, + Extension: ext, }, { Type: types1.VoteExtensionType_THRESHOLD_RECOVER, @@ -161,6 +159,14 @@ func (app *Application) FinalizeBlock(ctx context.Context, req *abci.RequestFina app.mu.Lock() defer app.mu.Unlock() + for i, ext := range req.Commit.ThresholdVoteExtensions { + if len(ext.Signature) == 0 { + return &abci.ResponseFinalizeBlock{}, fmt.Errorf("vote extension signature is empty: %+v", ext) + } + + app.logger.Debug("vote extension received in FinalizeBlock", "extension", ext, "i", i) + } + prevState := kvstore.NewKvState(db.NewMemDB(), 0) if err := app.LastCommittedState.Copy(prevState); err != nil { return &abci.ResponseFinalizeBlock{}, err @@ -201,15 +207,14 @@ func parseVoteExtension(ext []byte) (int64, error) { } func prepareTxs(req abci.RequestPrepareProposal) ([]*abci.TxRecord, error) { - var ( - totalBytes int64 - txRecords []*abci.TxRecord - ) - + txRecords := kvstore.TxRecords{ + Size: 0, + Limit: req.MaxTxBytes, + Txs: make([]*abci.TxRecord, 0, len(req.Txs)+1), + } txs := req.Txs extCount := len(req.LocalLastCommit.ThresholdVoteExtensions) - txRecords = make([]*abci.TxRecord, 0, len(txs)+1) extTxPrefix := VoteExtensionKey + "=" extTx := []byte(fmt.Sprintf("%s%d", extTxPrefix, extCount)) @@ -218,35 +223,42 @@ func prepareTxs(req abci.RequestPrepareProposal) ([]*abci.TxRecord, error) { for _, tx := range txs { // we only modify transactions if there is at least 1 extension, eg. extCount > 0 if extCount > 0 && strings.HasPrefix(string(tx), extTxPrefix) { - txRecords = append(txRecords, &abci.TxRecord{ + if _, err := txRecords.Add(&abci.TxRecord{ Action: abci.TxRecord_REMOVED, Tx: tx, - }) - totalBytes -= int64(len(tx)) + }); err != nil { + return nil, err + } } else { - txRecords = append(txRecords, &abci.TxRecord{ + if _, err := txRecords.Add(&abci.TxRecord{ Action: abci.TxRecord_UNMODIFIED, Tx: tx, - }) - totalBytes += int64(len(tx)) + }); err != nil { + return nil, err + } } } // we only modify transactions if there is at least 1 extension, eg. extCount > 0 if extCount > 0 { - if totalBytes+int64(len(extTx)) < req.MaxTxBytes { - txRecords = append(txRecords, &abci.TxRecord{ - Action: abci.TxRecord_ADDED, - Tx: extTx, - }) + tx := abci.TxRecord{ + Action: abci.TxRecord_ADDED, + Tx: extTx, + } + if _, err := txRecords.Add(&tx); err != nil { + return nil, err } } - return txRecords, nil + return txRecords.Txs, nil } func verifyTx(tx types.Tx, _ abci.CheckTxType) (abci.ResponseCheckTx, error) { split := bytes.SplitN(tx, []byte{'='}, 2) + if len(split) != 2 { + return abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}, nil + } + k, v := split[0], split[1] if string(k) == VoteExtensionKey { @@ -256,5 +268,14 @@ func verifyTx(tx types.Tx, _ abci.CheckTxType) (abci.ResponseCheckTx, error) { fmt.Errorf("malformed vote extension transaction %X=%X: %w", k, v, err) } } - return abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}, nil + // For TestApp_TxTooBig we need to preserve order of transactions + var priority int64 + // in this case, k is defined as fmt.Sprintf("testapp-big-tx-%v-%08x-%d=", node.Name, session, i) + // but in general, we take last digit as inverse priority + split = bytes.Split(k, []byte{'-'}) + if n, err := strconv.ParseInt(string(split[len(split)-1]), 10, 64); err == nil { + priority = 1000000000 - n + } + + return abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1, Priority: priority}, nil } diff --git a/test/e2e/app/app_test.go b/test/e2e/app/app_test.go index a238be37b3..c3cb484007 100644 --- a/test/e2e/app/app_test.go +++ b/test/e2e/app/app_test.go @@ -63,7 +63,7 @@ func TestPrepareFinalize(t *testing.T) { txs := make([][]byte, 0, len(respPrep.TxRecords)) bz := &bytes.Buffer{} for _, tx := range respPrep.TxRecords { - if tx.Action != abci.TxRecord_REMOVED { + if tx.Action != abci.TxRecord_REMOVED && tx.Action != abci.TxRecord_DELAYED { txs = append(txs, tx.Tx) n, err := bz.Write(tx.Tx) assert.NoError(t, err) @@ -99,9 +99,12 @@ func TestPrepareFinalize(t *testing.T) { func TestPrepareProposal(t *testing.T) { testCases := []struct { - request abci.RequestPrepareProposal + request abci.RequestPrepareProposal + expectTxCount int }{ + // valid { + expectTxCount: 2, // one added due to vote extensions request: abci.RequestPrepareProposal{ Height: 1, Time: time.Now(), @@ -129,6 +132,25 @@ Ym1saWRueGJDSmpZdXBUTkNNdFpMcUdC`)}, }, }, }, + // too many txs + { + expectTxCount: 5, + request: abci.RequestPrepareProposal{ + Height: 1, + Time: time.Now(), + MaxTxBytes: 32, + Txs: [][]byte{ + {1, 2, 3, 4, 5, 6, 7, 8}, // 8 bytes + {2, 2, 3, 4, 5, 6, 7, 8}, // 8+8=16 bytes + {3, 2, 3, 4, 5, 6, 7, 8}, // 16+8=24 bytes + {4, 2, 3, 4, 5, 6, 7}, // 24+7=31 bytes + {5, 2, 3, 4, 5, 6, 7, 8}, // 31+8=39 bytes - this one does not fit + {6}, // 31+1=32 bytes - this one fits + {7}, // 32+1=33 bytes - this one does not fit + }, + LocalLastCommit: abci.CommitInfo{}, + }, + }, } ctx := context.TODO() @@ -138,6 +160,16 @@ Ym1saWRueGJDSmpZdXBUTkNNdFpMcUdC`)}, respPrep, err := app.PrepareProposal(ctx, &tc.request) require.NoError(t, err) assert.NotEmpty(t, respPrep.AppHash) + + txCount := 0 + for _, tx := range respPrep.TxRecords { + if tx.Action != abci.TxRecord_REMOVED && tx.Action != abci.TxRecord_DELAYED { + txCount++ + } else { + t.Logf("removed or delayed tx: %+v", tx) + } + } + assert.Equal(t, tc.expectTxCount, txCount) }) } diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index 44d2e2c9f1..db5c852c71 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -1,6 +1,6 @@ ## Stage 1 and 2 is copied from /DOCKER/Dockerfile -ARG ALIPNE_VERSION=3.17 -ARG GOLANG_VERSION=1.19 +ARG ALIPNE_VERSION=3.19 +ARG GOLANG_VERSION=1.22 ################################# # STAGE 1: install dependencies # ################################# @@ -8,7 +8,7 @@ FROM golang:${GOLANG_VERSION}-alpine${ALIPNE_VERSION} AS base RUN apk update && \ apk upgrade && \ - apk --no-cache add bash git gmp-dev sudo cmake build-base python3-dev libpcap-dev leveldb-dev && \ + apk --no-cache add bash git gmp-dev sudo cmake build-base libpcap-dev leveldb-dev && \ rm -rf /var/cache/apk/* WORKDIR /src/bls diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index 66143562cd..2fbc995cd3 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -1,14 +1,12 @@ package main import ( - "encoding/json" "fmt" "math/rand" "sort" "strings" "time" - "github.com/dashpay/tenderdash/abci/example/kvstore" e2e "github.com/dashpay/tenderdash/test/e2e/pkg" "github.com/dashpay/tenderdash/types" ) @@ -20,7 +18,11 @@ var ( "topology": {"single", "quad", "large"}, "initialState": { "{}", - `{"items": {"initial01": "a", "initial02": "b", "initial03": "c"}}`, + `{} + {"key":"initial01","value":"a"} + {"key":"initial02","value":"b"} + {"key":"initial03","value":"c"} + `, }, "validators": {"genesis", "initchain"}, @@ -113,13 +115,8 @@ type Options struct { // generateTestnet generates a single testnet with the given options. func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { - initialState := kvstore.StateExport{} - if opt["initialState"] != nil { - data := opt["initialState"].(string) - if err := json.Unmarshal([]byte(data), &initialState); err != nil { - return e2e.Manifest{}, fmt.Errorf("unmarshal initialState: %w", err) - } - } + initialState := opt["initialState"].(string) + manifest := e2e.Manifest{ IPv6: ipv6.Choose(r).(bool), InitialState: initialState, @@ -186,7 +183,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // prepare the list of the validator names validatorNames := generateValidatorNames(numValidators) valPlr := validatorUpdatesPopulator{ - rand: rand.New(rand.NewSource(time.Now().UnixNano())), // nolint:gosec + rand: rand.New(rand.NewSource(time.Now().UnixNano())), //nolint:gosec initialHeight: manifest.InitialHeight, validatorNames: validatorNames, quorumMembers: topology.quorumMembersCount, diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index d1349aa9bc..ab1f7491a0 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -86,7 +86,7 @@ func (cli *CLI) generate() error { return err } - // nolint: gosec + //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand) manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), cli.opts) if err != nil { diff --git a/test/e2e/networks/dashcore.toml b/test/e2e/networks/dashcore.toml index e403206c16..aa70916364 100644 --- a/test/e2e/networks/dashcore.toml +++ b/test/e2e/networks/dashcore.toml @@ -2,11 +2,15 @@ # functionality with a single network. initial_height = 1000 -initial_state = {items = {initial01 = "a", initial02 = "b", initial03 = "c"}} +initial_state = '{} { "key":"initial01","value":"a"}{"key":"initial02","value":"b"}{"key":"initial03","value":"c"}' initial_core_chain_locked_height = 3400 queue_type = "priority" log_level = "debug" +# Tune block size for TestApp_TxTooBig +max_block_size = 262144 # 0.25 MB +max_evidence_size = 52428 # 50 kB + [chainlock_updates] 1000 = 3450 1004 = 3451 @@ -76,7 +80,7 @@ privval_protocol = "dashcore" perturb = ["pause"] [node.validator05] -start_at = 1005 # Becomes part of the validator set at 1010 +start_at = 1005 # Becomes part of the validator set at 1010 seeds = ["seed01"] database = "cleveldb" block_sync = "v0" @@ -95,7 +99,7 @@ retain_blocks = 10 perturb = ["restart"] [node.light01] -mode= "light" -start_at= 1010 +mode = "light" +start_at = 1010 privval_protocol = "dashcore" persistent_peers = ["validator01", "validator02", "validator03"] diff --git a/test/e2e/networks/rotate.toml b/test/e2e/networks/rotate.toml index 5d705b5baf..736e56765c 100644 --- a/test/e2e/networks/rotate.toml +++ b/test/e2e/networks/rotate.toml @@ -2,12 +2,16 @@ # functionality with a single network. initial_height = 1000 -initial_state = {items={ initial01 = "a", initial02 = "b", initial03 = "c" }} +initial_state = '{}{ "key": "initial01","value":"a"} {"key":"initial02","value":"b"} {"key":"initial03" ,"value":"c" }' initial_core_chain_locked_height = 3400 -init_app_core_chain_locked_height = 2308 # should override initial_core_chain_locked_height +init_app_core_chain_locked_height = 2308 # should override initial_core_chain_locked_height queue_type = "priority" log_level = "debug" +# Tune block size for TestApp_TxTooBig +max_block_size = 262144 # 0.25 MB +max_evidence_size = 52428 # 50 kB + [chainlock_updates] 1000 = 3450 1004 = 3451 @@ -92,7 +96,7 @@ privval_protocol = "dashcore" perturb = ["disconnect"] [node.validator06] -start_at = 1005 # Becomes part of the validator set at 1030 to ensure there is enough time for state sync +start_at = 1005 # Becomes part of the validator set at 1030 to ensure there is enough time for state sync seeds = ["seed01"] snapshot_interval = 5 block_sync = "v0" @@ -137,13 +141,19 @@ start_at = 1030 mode = "full" block_sync = "v0" #state_sync = "rpc" -persistent_peers = ["validator01", "validator02", "validator03", "validator04", "validator05"] +persistent_peers = [ + "validator01", + "validator02", + "validator03", + "validator04", + "validator05", +] privval_protocol = "dashcore" retain_blocks = 10 perturb = ["restart"] [node.light01] -mode= "light" -start_at= 1035 +mode = "light" +start_at = 1035 privval_protocol = "dashcore" persistent_peers = ["validator01", "validator02", "validator03"] diff --git a/test/e2e/networks/simple.toml b/test/e2e/networks/simple.toml index 96b81f79fe..81fcfa5b0b 100644 --- a/test/e2e/networks/simple.toml +++ b/test/e2e/networks/simple.toml @@ -1,3 +1,6 @@ +max_block_size = 10240 +max_evidence_size = 4096 + [node.validator01] [node.validator02] [node.validator03] diff --git a/test/e2e/node/config.go b/test/e2e/node/config.go index 8eb9ee74a7..edca4e9cb6 100644 --- a/test/e2e/node/config.go +++ b/test/e2e/node/config.go @@ -1,4 +1,3 @@ -// nolint: goconst package main import ( diff --git a/test/e2e/pkg/infra/docker/infra.go b/test/e2e/pkg/infra/docker/infra.go index f0b0902c8c..cf412d8fd3 100644 --- a/test/e2e/pkg/infra/docker/infra.go +++ b/test/e2e/pkg/infra/docker/infra.go @@ -30,12 +30,12 @@ func NewTestnetInfra(logger log.Logger, testnet *e2e.Testnet) infra.TestnetInfra } } -func (ti *testnetInfra) Setup(ctx context.Context) error { +func (ti *testnetInfra) Setup(_ctx context.Context) error { compose, err := makeDockerCompose(ti.testnet) if err != nil { return err } - // nolint: gosec + //nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less err = os.WriteFile(filepath.Join(ti.testnet.Dir, "docker-compose.yml"), compose, 0644) if err != nil { diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 9bd98e0a64..a070b2b398 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -7,8 +7,6 @@ import ( "time" "github.com/BurntSushi/toml" - - "github.com/dashpay/tenderdash/abci/example/kvstore" ) // Manifest represents a TOML testnet manifest. @@ -31,7 +29,7 @@ type Manifest struct { // InitialState is an initial set of key/value pairs for the application, // set in genesis. Defaults to nothing. - InitialState kvstore.StateExport `toml:"initial_state"` + InitialState string `toml:"initial_state"` // Validators is the initial validator set in genesis, given as node names // and power (for Dash power must all be set to default power): @@ -109,6 +107,8 @@ type Manifest struct { CheckTxDelayMS uint64 `toml:"check_tx_delay_ms"` VoteExtensionDelayMS uint64 `toml:"vote_extension_delay_ms"` FinalizeBlockDelayMS uint64 `toml:"finalize_block_delay_ms"` + MaxBlockSize uint64 `toml:"max_block_size"` + MaxEvidenceSize uint64 `toml:"max_evidence_size"` } // ManifestNode represents a node in a testnet manifest. diff --git a/test/e2e/pkg/mockcoreserver/core_server.go b/test/e2e/pkg/mockcoreserver/core_server.go index 9a3880f837..4b6025e125 100644 --- a/test/e2e/pkg/mockcoreserver/core_server.go +++ b/test/e2e/pkg/mockcoreserver/core_server.go @@ -9,8 +9,8 @@ import ( "github.com/dashpay/dashd-go/btcjson" "github.com/dashpay/tenderdash/crypto" - tmbytes "github.com/dashpay/tenderdash/libs/bytes" "github.com/dashpay/tenderdash/privval" + "github.com/dashpay/tenderdash/types" ) // CoreServer is an interface of a mock core-server @@ -93,13 +93,8 @@ func (c *MockCoreServer) QuorumSign(ctx context.Context, cmd btcjson.QuorumCmd) panic(err) } quorumHash := crypto.QuorumHash(quorumHashBytes) + signID := types.NewSignItemFromHash(*cmd.LLMQType, quorumHash, reqID, msgHash).SignHash - signID := crypto.SignID( - *cmd.LLMQType, - tmbytes.Reverse(quorumHash), - tmbytes.Reverse(reqID), - tmbytes.Reverse(msgHash), - ) privateKey, err := c.FilePV.GetPrivateKey(ctx, quorumHash) if err != nil { panic(err) @@ -142,13 +137,8 @@ func (c *MockCoreServer) QuorumVerify(ctx context.Context, cmd btcjson.QuorumCmd if err != nil { panic(err) } + signID := types.NewSignItemFromHash(*cmd.LLMQType, quorumHash, reqID, msgHash).SignHash - signID := crypto.SignID( - *cmd.LLMQType, - tmbytes.Reverse(quorumHash), - tmbytes.Reverse(reqID), - tmbytes.Reverse(msgHash), - ) thresholdPublicKey, err := c.FilePV.GetThresholdPublicKey(ctx, quorumHash) if err != nil { panic(err) @@ -219,7 +209,7 @@ func (c *StaticCoreServer) GetNetworkInfo(_ context.Context, _ btcjson.GetNetwor } // Ping ... -func (c *StaticCoreServer) Ping(_ context.Context, cmd btcjson.PingCmd) error { +func (c *StaticCoreServer) Ping(_ context.Context, _cmd btcjson.PingCmd) error { return nil } diff --git a/test/e2e/pkg/mockcoreserver/server.go b/test/e2e/pkg/mockcoreserver/server.go index cbe1f2149a..67cb10fbb5 100644 --- a/test/e2e/pkg/mockcoreserver/server.go +++ b/test/e2e/pkg/mockcoreserver/server.go @@ -9,6 +9,7 @@ import ( "log" "net" "net/http" + "time" sync "github.com/sasha-s/go-deadlock" @@ -57,8 +58,9 @@ func (s *HTTPServer) On(pattern string) *Call { func (s *HTTPServer) Start() { s.guard.Lock() s.httpSrv = &http.Server{ - Addr: s.addr, - Handler: s.mux, + Addr: s.addr, + Handler: s.mux, + ReadHeaderTimeout: 5 * time.Second, } s.guard.Unlock() l, err := net.Listen("tcp", s.addr) diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 6ec8295ea4..44ca245d28 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -15,7 +15,6 @@ import ( "github.com/dashpay/dashd-go/btcjson" - "github.com/dashpay/tenderdash/abci/example/kvstore" abci "github.com/dashpay/tenderdash/abci/types" "github.com/dashpay/tenderdash/crypto" "github.com/dashpay/tenderdash/crypto/bls12381" @@ -77,7 +76,7 @@ type Testnet struct { Dir string IP *net.IPNet InitialHeight int64 - InitialState kvstore.StateExport + InitialState string Validators ValidatorsMap ValidatorUpdates map[int64]ValidatorsMap Nodes []*Node @@ -91,6 +90,8 @@ type Testnet struct { CheckTxDelayMS int VoteExtensionDelayMS int FinalizeBlockDelayMS int + MaxBlockSize int64 + MaxEvidenceSize int64 // Tenderdash-specific fields GenesisCoreHeight uint32 // InitialCoreHeight is a core height put into genesis file @@ -208,6 +209,8 @@ func LoadTestnet(file string) (*Testnet, error) { CheckTxDelayMS: int(manifest.CheckTxDelayMS), VoteExtensionDelayMS: int(manifest.VoteExtensionDelayMS), FinalizeBlockDelayMS: int(manifest.FinalizeBlockDelayMS), + MaxBlockSize: int64(manifest.MaxBlockSize), + MaxEvidenceSize: int64(manifest.MaxEvidenceSize), ThresholdPublicKey: ld.ThresholdPubKey, ThresholdPublicKeyUpdates: map[int64]crypto.PubKey{}, QuorumType: btcjson.LLMQType(quorumType), @@ -663,7 +666,7 @@ type keyGenerator struct { } func newKeyGenerator(seed int64) *keyGenerator { - // nolint: gosec + //nolint:gosec // G404: Use of weak random number generator (math/rand instead of crypto/rand) return &keyGenerator{ random: rand.New(rand.NewSource(seed)), diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index e50b1eafd9..c63abf53ae 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -89,7 +89,7 @@ func NewCLI(logger log.Logger) *CLI { return err } - r := rand.New(rand.NewSource(randomSeed)) // nolint: gosec + r := rand.New(rand.NewSource(randomSeed)) //nolint:gosec chLoadResult := make(chan error) ctx, cancel := context.WithCancel(cmd.Context()) @@ -241,7 +241,7 @@ func NewCLI(logger log.Logger) *CLI { return Load( cmd.Context(), logger, - rand.New(rand.NewSource(randomSeed)), // nolint: gosec + rand.New(rand.NewSource(randomSeed)), //nolint:gosec cli.testnet, ) }, @@ -264,7 +264,7 @@ func NewCLI(logger log.Logger) *CLI { return InjectEvidence( cmd.Context(), logger, - rand.New(rand.NewSource(randomSeed)), // nolint: gosec + rand.New(rand.NewSource(randomSeed)), //nolint:gosec cli.testnet, amount, ) @@ -350,7 +350,7 @@ Does not run any perbutations. ctx, cancel := context.WithCancel(cmd.Context()) defer cancel() - r := rand.New(rand.NewSource(randomSeed)) // nolint: gosec + r := rand.New(rand.NewSource(randomSeed)) //nolint:gosec lctx, loadCancel := context.WithCancel(ctx) defer loadCancel() diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index 7e6a3ae23b..a5395ba8fd 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -5,7 +5,6 @@ import ( "context" "encoding/base64" "encoding/hex" - "encoding/json" "errors" "fmt" "os" @@ -98,7 +97,7 @@ func Setup(ctx context.Context, logger log.Logger, testnet *e2e.Testnet, ti infr if err != nil { return err } - // nolint: gosec + //nolint:gosec // G306: Expect WriteFile permissions to be 0600 or less err = os.WriteFile(filepath.Join(nodeDir, "config", "app.toml"), appCfg, 0644) if err != nil { @@ -174,6 +173,12 @@ func MakeGenesis(testnet *e2e.Testnet, genesisTime time.Time) (types.GenesisDoc, append(genesis.ConsensusParams.Validator.PubKeyTypes, types.ABCIPubKeyTypeBLS12381) genesis.ConsensusParams.Evidence.MaxAgeNumBlocks = e2e.EvidenceAgeHeight genesis.ConsensusParams.Evidence.MaxAgeDuration = e2e.EvidenceAgeTime + if testnet.MaxEvidenceSize > 0 { + genesis.ConsensusParams.Evidence.MaxBytes = testnet.MaxEvidenceSize + } + if testnet.MaxBlockSize > 0 { + genesis.ConsensusParams.Block.MaxBytes = testnet.MaxBlockSize + } for validator, validatorUpdate := range testnet.Validators { if validatorUpdate.PubKey == nil { @@ -196,13 +201,9 @@ func MakeGenesis(testnet *e2e.Testnet, genesisTime time.Time) (types.GenesisDoc, sort.Slice(genesis.Validators, func(i, j int) bool { return strings.Compare(genesis.Validators[i].Name, genesis.Validators[j].Name) == -1 }) - if len(testnet.InitialState.Items) > 0 { - appState, err := json.Marshal(testnet.InitialState) - if err != nil { - return genesis, err - } - genesis.AppState = appState - } + + genesis.AppState = []byte(testnet.InitialState) + return genesis, genesis.ValidateAndComplete() } @@ -210,13 +211,15 @@ func MakeGenesis(testnet *e2e.Testnet, genesisTime time.Time) (types.GenesisDoc, func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg := config.DefaultConfig() cfg.Moniker = node.Name - cfg.ProxyApp = AppAddressTCP + cfg.Abci.Address = AppAddressTCP cfg.TxIndex = config.TestTxIndexConfig() if node.LogLevel != "" { cfg.LogLevel = node.LogLevel } + cfg.Mempool.TxEnqueueTimeout = 10 * time.Millisecond + cfg.RPC.ListenAddress = "tcp://0.0.0.0:26657" cfg.RPC.PprofListenAddress = ":6060" cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false)) @@ -229,15 +232,15 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { switch node.Testnet.ABCIProtocol { case e2e.ProtocolUNIX: - cfg.ProxyApp = AppAddressUNIX + cfg.Abci.Address = AppAddressUNIX case e2e.ProtocolTCP: - cfg.ProxyApp = AppAddressTCP + cfg.Abci.Address = AppAddressTCP case e2e.ProtocolGRPC: - cfg.ProxyApp = AppAddressTCP - cfg.ABCI = ABCIGRPC + cfg.Abci.Address = AppAddressTCP + cfg.Abci.Transport = ABCIGRPC case e2e.ProtocolBuiltin: - cfg.ProxyApp = "" - cfg.ABCI = "" + cfg.Abci.Address = "" + cfg.Abci.Transport = "" default: return nil, fmt.Errorf("unexpected ABCI protocol setting %q", node.Testnet.ABCIProtocol) } diff --git a/test/e2e/runner/test.go b/test/e2e/runner/test.go index c44f58acd4..79886fd3ea 100644 --- a/test/e2e/runner/test.go +++ b/test/e2e/runner/test.go @@ -15,5 +15,5 @@ func Test(ctx context.Context, testnet *e2e.Testnet) error { return err } - return exec.CommandVerbose(ctx, "./build/tests", "-test.count=1", "-test.v", "-test.timeout=5m") + return exec.CommandVerbose(ctx, "./build/tests", "-test.count=1", "-test.v", "-test.timeout=10m") } diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index 7fe54e4f06..77186b95f3 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -4,14 +4,20 @@ import ( "bytes" "context" "fmt" + "math/big" "math/rand" + "os" + "sort" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + db "github.com/tendermint/tm-db" "github.com/dashpay/tenderdash/abci/example/code" + "github.com/dashpay/tenderdash/abci/example/kvstore" + tmbytes "github.com/dashpay/tenderdash/libs/bytes" tmrand "github.com/dashpay/tenderdash/libs/rand" "github.com/dashpay/tenderdash/rpc/client/http" e2e "github.com/dashpay/tenderdash/test/e2e/pkg" @@ -25,17 +31,24 @@ const ( // Tests that any initial state given in genesis has made it into the app. func TestApp_InitialState(t *testing.T) { testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { - if len(node.Testnet.InitialState.Items) == 0 { - return - } client, err := node.Client() require.NoError(t, err) - for k, v := range node.Testnet.InitialState.Items { - resp, err := client.ABCIQuery(ctx, "", []byte(k)) + state := kvstore.NewKvState(db.NewMemDB(), 0) + err = state.Load(bytes.NewBufferString(node.Testnet.InitialState)) + require.NoError(t, err) + iter, err := state.Iterator(nil, nil) + require.NoError(t, err) + + for iter.Valid() { + k := iter.Key() + v := iter.Value() + resp, err := client.ABCIQuery(ctx, "", k) require.NoError(t, err) - assert.Equal(t, k, string(resp.Response.Key)) - assert.Equal(t, v, string(resp.Response.Value)) + assert.Equal(t, k, resp.Response.Key) + assert.Equal(t, v, resp.Response.Value) + + iter.Next() } }) } @@ -189,3 +202,173 @@ func TestApp_Tx(t *testing.T) { } } + +// Given transactions which take more than the block size, +// when I submit them to the node, +// then the first transaction should be committed before the last one. +func TestApp_TxTooBig(t *testing.T) { + // Pair of txs, last must be in block later than first + type txPair struct { + firstTxHash tmbytes.HexBytes + lastTxHash tmbytes.HexBytes + } + + /// timeout for broadcast to single node + const broadcastTimeout = 10 * time.Second + /// Timeout to read response from single node + const readTimeout = 1 * time.Second + /// Time to process whole mempool + const includeInBlockTimeout = 120 * time.Second + + mainCtx, cancel := context.WithCancel(context.Background()) + defer cancel() + + testnet := loadTestnet(t) + nodes := testnet.Nodes + + if name := os.Getenv("E2E_NODE"); name != "" { + node := testnet.LookupNode(name) + require.NotNil(t, node, "node %q not found in testnet %q", name, testnet.Name) + nodes = []*e2e.Node{node} + } else { + sort.Slice(nodes, func(i, j int) bool { + return nodes[i].Name < nodes[j].Name + }) + } + + // we will use last client to check if txs were included in block, so we + // define it outside the loop + var client *http.HTTP + outcome := make([]txPair, 0, len(nodes)) + + start := time.Now() + /// Send to each node more txs than we can fit into block + for _, node := range nodes { + ctx, cancel := context.WithTimeout(mainCtx, broadcastTimeout) + defer cancel() + + if ctx.Err() != nil { + t.Fatalf("context canceled before broadcasting to all nodes") + } + node := *node + + if node.Stateless() { + continue + } + + t.Logf("broadcasting to %s", node.Name) + + session := rand.Int63() + + var err error + client, err = node.Client() + require.NoError(t, err) + + // FIXME: ConsensusParams is broken for last height, this is just workaround + status, err := client.Status(ctx) + assert.NoError(t, err) + cp, err := client.ConsensusParams(ctx, &status.SyncInfo.LatestBlockHeight) + assert.NoError(t, err) + + // ensure we have more txs than fits the block + TxPayloadSize := int(cp.ConsensusParams.Block.MaxBytes / 100) // 1% of block size + numTxs := 101 + + tx := make(types.Tx, TxPayloadSize) // first tx is just zeros + + var firstTxHash []byte + var key string + + for i := 0; i < numTxs; i++ { + key = fmt.Sprintf("testapp-big-tx-%v-%08x-%d=", node.Name, session, i) + copy(tx, key) + + payloadOffset := len(tx) - 8 // where we put the `i` into the payload + assert.Greater(t, payloadOffset, len(key)) + + big.NewInt(int64(i)).FillBytes(tx[payloadOffset:]) + assert.Len(t, tx, TxPayloadSize) + + if i == 0 { + firstTxHash = tx.Hash() + } + + _, err = client.BroadcastTxAsync(ctx, tx) + + assert.NoError(t, err, "failed to broadcast tx %06x", i) + } + + outcome = append(outcome, txPair{ + firstTxHash: firstTxHash, + lastTxHash: tx.Hash(), + }) + } + + t.Logf("submitted txs in %s", time.Since(start).String()) + + successful := 0 + // now we check if these txs were committed within timeout + require.Eventuallyf(t, func() bool { + failed := false + successful = 0 + for _, item := range outcome { + ctx, cancel := context.WithTimeout(mainCtx, readTimeout) + defer cancel() + + firstTxHash := item.firstTxHash + lastTxHash := item.lastTxHash + + // last tx should be committed later than first + lastTxResp, err := client.Tx(ctx, lastTxHash, false) + if err == nil { + assert.Equal(t, lastTxHash, lastTxResp.Tx.Hash()) + + // fetch first tx + firstTxResp, err := client.Tx(ctx, firstTxHash, false) + assert.NoError(t, err, "first tx should be committed before second") + assert.EqualValues(t, firstTxHash, firstTxResp.Tx.Hash()) + + firstTxBlock, err := client.Header(ctx, &firstTxResp.Height) + assert.NoError(t, err) + lastTxBlock, err := client.Header(ctx, &lastTxResp.Height) + assert.NoError(t, err) + + t.Logf("first tx in block %d, last tx in block %d, time diff %s", + firstTxResp.Height, + lastTxResp.Height, + lastTxBlock.Header.Time.Sub(firstTxBlock.Header.Time).String(), + ) + + assert.Less(t, firstTxResp.Height, lastTxResp.Height, "first tx should in block before last tx") + successful++ + } else { + failed = true + } + } + + return !failed + }, + includeInBlockTimeout, // timeout + time.Second, // interval + "submitted transactions were not committed after %s", + includeInBlockTimeout.String(), + ) +} + +// Tests that the app version in most recent block is set to height of the block. +// Requires kvstore.WithEnforceVersionToHeight() to be enabled. +func TestApp_AppVersion(t *testing.T) { + testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { + client, err := node.Client() + require.NoError(t, err) + info, err := client.ABCIInfo(ctx) + require.NoError(t, err) + require.NotZero(t, info.Response.LastBlockHeight) + + block, err := client.Block(ctx, &info.Response.LastBlockHeight) + require.NoError(t, err) + + require.Equal(t, info.Response.LastBlockHeight, block.Block.Height) + require.EqualValues(t, block.Block.Height, block.Block.Version.App) + }) +} diff --git a/test/e2e/tests/validator_test.go b/test/e2e/tests/validator_test.go index 23c4c2c3da..332f3e5a31 100644 --- a/test/e2e/tests/validator_test.go +++ b/test/e2e/tests/validator_test.go @@ -88,7 +88,7 @@ func TestValidator_Propose(t *testing.T) { defer cancel() blocks := fetchBlockChain(ctx, t) - testNode(t, func(ctx context.Context, t *testing.T, node e2e.Node) { + testNode(t, func(_ctx context.Context, t *testing.T, node e2e.Node) { if node.Mode != e2e.ModeValidator { return } diff --git a/test/fuzz/README.md b/test/fuzz/README.md index c75373cf64..2a16020eba 100644 --- a/test/fuzz/README.md +++ b/test/fuzz/README.md @@ -1,7 +1,7 @@ # fuzz Fuzzing for various packages in Tendermint using the fuzzing infrastructure included in -Go 1.19. +Go 1.22. Inputs: diff --git a/third_party/bls-signatures/build.sh b/third_party/bls-signatures/build.sh index 269ac582aa..8a01299185 100755 --- a/third_party/bls-signatures/build.sh +++ b/third_party/bls-signatures/build.sh @@ -1,19 +1,22 @@ #!/bin/bash -SCRIPT_PATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +SCRIPT_PATH="$(realpath "$(dirname "$0")")" + SRC_PATH="${SCRIPT_PATH}/src" BUILD_PATH="${SCRIPT_PATH}/build" -BLS_SM_PATH="third_party/bls-signatures/src" +BLS_SM_PATH="${SRC_PATH}" BLS_GIT_REPO="https://github.com/dashpay/bls-signatures.git" BLS_GIT_BRANCH=${BLS_GIT_BRANCH:-"1.2.6"} set -e -if ! git submodule update --init "${BLS_SM_PATH}" ; then +pushd "${SCRIPT_PATH}" + +if ! git submodule update --init "${BLS_SM_PATH}"; then echo "It looks like this source code is not tracked by git." echo "As a fallback scenario we will fetch \"${BLS_GIT_BRANCH}\" branch \"${BLS_GIT_REPO}\" library." echo "We would recommend to clone of this project rather than using a release archive." - rm -r "${BLS_SM_PATH}" || true + rm -r "${BLS_SM_PATH}" || true git clone --single-branch --branch "${BLS_GIT_BRANCH}" "${BLS_GIT_REPO}" "${BLS_SM_PATH}" fi @@ -21,7 +24,11 @@ fi mkdir -p "${BUILD_PATH}" # Configurate the library build -cmake -B "${BUILD_PATH}" -S "${SRC_PATH}" +cmake \ + -D BUILD_BLS_PYTHON_BINDINGS=OFF \ + -D BUILD_BLS_TESTS=OFF \ + -D BUILD_BLS_BENCHMARKS=OFF \ + -B "${BUILD_PATH}" -S "${SRC_PATH}" # Build the library cmake --build "${BUILD_PATH}" -- -j 6 @@ -29,4 +36,6 @@ cmake --build "${BUILD_PATH}" -- -j 6 mkdir -p "${BUILD_PATH}/src/bls-dash" cp -r ${SRC_PATH}/src/* "${BUILD_PATH}/src/bls-dash" +popd + exit 0 diff --git a/third_party/bls-signatures/install.sh b/third_party/bls-signatures/install.sh index cc3d06c548..37b6fa60d5 100755 --- a/third_party/bls-signatures/install.sh +++ b/third_party/bls-signatures/install.sh @@ -2,15 +2,25 @@ set -e -SCRIPT_PATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )" +SCRIPT_PATH="$(realpath "$(dirname "$0")")" BUILD_PATH="$SCRIPT_PATH/build" -if [ ! -d $BUILD_PATH ]; then +if [ "$UID" -eq "0" ]; then + DESTDIR=${DESTDIR:-"/usr/local"} +else + DESTDIR=${DESTDIR:-"${HOME}/.local"} +fi + +if [ ! -d "$BUILD_PATH" ]; then echo "$BUILD_PATH doesn't exist. Run \"make build-bls\" first." >/dev/stderr exit 1 fi +pushd "${SCRIPT_PATH}" + # Install the library -cmake -P $BUILD_PATH/cmake_install.cmake +cmake -D CMAKE_INSTALL_PREFIX="${DESTDIR}" -P "$BUILD_PATH/cmake_install.cmake" + +popd exit 0 diff --git a/types/block.go b/types/block.go index df65dc7be3..10901ca371 100644 --- a/types/block.go +++ b/types/block.go @@ -13,7 +13,6 @@ import ( sync "github.com/sasha-s/go-deadlock" - "github.com/dashpay/dashd-go/btcjson" "github.com/gogo/protobuf/proto" gogotypes "github.com/gogo/protobuf/types" "github.com/rs/zerolog" @@ -32,7 +31,7 @@ const ( // MaxHeaderBytes is a maximum header size. // NOTE: Because app hash can be of arbitrary size, the header is therefore not // capped in size and thus this number should be seen as a soft max - MaxHeaderBytes int64 = 725 + MaxHeaderBytes int64 = 726 MaxCoreChainLockSize int64 = 132 // MaxOverheadForBlock - maximum overhead to encode a block (up to @@ -294,6 +293,20 @@ func (b *Block) ToProto() (*tmproto.Block, error) { return pb, nil } +func (b *Block) MarshalZerologObject(e *zerolog.Event) { + if b == nil { + e.Bool("nil", true) + return + } + e.Bool("nil", false) + + e.Interface("header", b.Header) + e.Interface("core_chain_lock", b.CoreChainLock) + e.Object("data", &b.Data) + e.Interface("evidence", b.Evidence) + e.Object("last_commit", b.LastCommit) +} + // FromProto sets a protobuf Block to the given pointer. // It returns an error if the block is invalid. func BlockFromProto(bp *tmproto.Block) (*Block, error) { @@ -522,7 +535,7 @@ func (h Header) ValidateBasic() error { // IsTimely defines whether the the proposal time is correct, as per PBTS spec. // NOTE: By definition, at initial height, recvTime MUST be genesis time. func (h Header) IsTimely(recvTime time.Time, sp SynchronyParams, round int32) bool { - return isTimely(h.Time, recvTime, sp, round) + return checkTimely(h.Time, recvTime, sp, round) == 0 } // StateID returns a state ID of this block @@ -737,7 +750,7 @@ type Commit struct { QuorumHash crypto.QuorumHash `json:"quorum_hash"` ThresholdBlockSignature []byte `json:"threshold_block_signature"` // ThresholdVoteExtensions keeps the list of recovered threshold signatures for vote-extensions - ThresholdVoteExtensions []ThresholdExtensionSign `json:"threshold_vote_extensions"` + ThresholdVoteExtensions tmproto.VoteExtensions `json:"threshold_vote_extensions"` // Memoized in first call to corresponding method. // NOTE: can't memoize in constructor because constructor isn't used for @@ -746,12 +759,17 @@ type Commit struct { } // NewCommit returns a new Commit. -func NewCommit(height int64, round int32, blockID BlockID, commitSigns *CommitSigns) *Commit { +func NewCommit(height int64, round int32, blockID BlockID, voteExtensions VoteExtensions, commitSigns *CommitSigns) *Commit { commit := &Commit{ Height: height, Round: round, BlockID: blockID, + ThresholdVoteExtensions: voteExtensions.Filter(func(ext VoteExtensionIf) bool { + _, ok := ext.(ThresholdVoteExtensionIf) + return ok + }).ToProto(), } + if commitSigns != nil { commitSigns.CopyToCommit(commit) } @@ -764,17 +782,18 @@ func (commit *Commit) ToCommitInfo() types.CommitInfo { Round: commit.Round, QuorumHash: commit.QuorumHash, BlockSignature: commit.ThresholdBlockSignature, - ThresholdVoteExtensions: ThresholdExtensionSignToProto(commit.ThresholdVoteExtensions), + ThresholdVoteExtensions: commit.ThresholdVoteExtensions, } } // GetCanonicalVote returns the message that is being voted on in the form of a vote without signatures. func (commit *Commit) GetCanonicalVote() *Vote { return &Vote{ - Type: tmproto.PrecommitType, - Height: commit.Height, - Round: commit.Round, - BlockID: commit.BlockID, + Type: tmproto.PrecommitType, + Height: commit.Height, + Round: commit.Round, + BlockID: commit.BlockID, + VoteExtensions: VoteExtensionsFromProto(commit.ThresholdVoteExtensions...), } } @@ -796,24 +815,6 @@ func (commit *Commit) VoteBlockRequestID() []byte { return hash[:] } -// CanonicalVoteVerifySignBytes returns the bytes of the Canonical Vote that is threshold signed. -func (commit *Commit) CanonicalVoteVerifySignBytes(chainID string) []byte { - voteCanonical := commit.GetCanonicalVote() - vCanonical := voteCanonical.ToProto() - bz, err := vCanonical.SignBytes(chainID) - if err != nil { - panic(fmt.Errorf("canonical vote sign bytes: %w", err)) - } - return bz -} - -// CanonicalVoteVerifySignID returns the signID bytes of the Canonical Vote that is threshold signed. -func (commit *Commit) CanonicalVoteVerifySignID(chainID string, quorumType btcjson.LLMQType, quorumHash []byte) []byte { - voteCanonical := commit.GetCanonicalVote() - vCanonical := voteCanonical.ToProto() - return VoteBlockSignID(chainID, vCanonical, quorumType, quorumHash) -} - // Type returns the vote type of the commit, which is always VoteTypePrecommit // Implements VoteSetReader. func (commit *Commit) Type() byte { @@ -943,7 +944,7 @@ func (commit *Commit) ToProto() *tmproto.Commit { c.BlockID = commit.BlockID.ToProto() c.ThresholdBlockSignature = commit.ThresholdBlockSignature - c.ThresholdVoteExtensions = ThresholdExtensionSignToProto(commit.ThresholdVoteExtensions) + c.ThresholdVoteExtensions = commit.ThresholdVoteExtensions c.QuorumHash = commit.QuorumHash return c @@ -967,7 +968,7 @@ func CommitFromProto(cp *tmproto.Commit) (*Commit, error) { commit.QuorumHash = cp.QuorumHash commit.ThresholdBlockSignature = cp.ThresholdBlockSignature - commit.ThresholdVoteExtensions = ThresholdExtensionSignFromProto(cp.ThresholdVoteExtensions) + commit.ThresholdVoteExtensions = cp.ThresholdVoteExtensions commit.Height = cp.Height commit.Round = cp.Round @@ -1034,6 +1035,17 @@ func (data *Data) ToProto() tmproto.Data { return *tp } +func (data *Data) MarshalZerologObject(e *zerolog.Event) { + if data == nil { + e.Bool("nil", true) + return + } + e.Bool("nil", false) + + e.Str("hash", data.Hash().ShortString()) + e.Object("txs", data.Txs) +} + // DataFromProto takes a protobuf representation of Data & // returns the native type. func DataFromProto(dp *tmproto.Data) (Data, error) { diff --git a/types/block_test.go b/types/block_test.go index f9a4af6489..2803f8f1cc 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -22,6 +22,7 @@ import ( "github.com/dashpay/tenderdash/crypto/bls12381" "github.com/dashpay/tenderdash/crypto/merkle" tmbytes "github.com/dashpay/tenderdash/libs/bytes" + "github.com/dashpay/tenderdash/libs/log" tmrand "github.com/dashpay/tenderdash/libs/rand" tmtime "github.com/dashpay/tenderdash/libs/time" tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" @@ -117,7 +118,7 @@ func TestBlockValidateBasic(t *testing.T) { blk.LastCommit = nil }, true}, {"Invalid LastCommit", func(blk *Block) { - blk.LastCommit = NewCommit(-1, 0, *voteSet.maj23, nil) + blk.LastCommit = NewCommit(-1, 0, *voteSet.maj23, nil, nil) }, true}, {"Invalid Evidence", func(blk *Block) { emptyEv := &DuplicateVoteEvidence{} @@ -213,6 +214,36 @@ func TestBlockSize(t *testing.T) { } } +// Given a block with more than `maxLoggedTxs` transactions, +// when we marshal it for logging, +// then we should see short hashes of the first `maxLoggedTxs` transactions in the log message, ending with "..." +func TestBlockMarshalZerolog(t *testing.T) { + ctx := context.Background() + logger := log.NewTestingLogger(t) + + txs := make(Txs, 0, 2*maxLoggedTxs) + expectTxs := make(Txs, 0, maxLoggedTxs) + for i := 0; i < 2*maxLoggedTxs; i++ { + txs = append(txs, Tx(fmt.Sprintf("tx%d", i))) + if i < maxLoggedTxs { + expectTxs = append(expectTxs, txs[i]) + } + } + + block := MakeBlock(1, txs, randCommit(ctx, t, 1, RandStateID()), nil) + + // define assertions + expected := fmt.Sprintf(",\"txs\":{\"num_txs\":%d,\"hashes\":[", 2*maxLoggedTxs) + for i := 0; i < maxLoggedTxs; i++ { + expected += "\"" + expectTxs[i].Hash().ShortString() + "\"," + } + expected += "\"...\"]}" + logger.AssertContains(expected) + + // execute test + logger.Info("test block", "block", block) +} + func TestBlockString(t *testing.T) { assert.Equal(t, "nil-Block", (*Block)(nil).String()) assert.Equal(t, "nil-Block", (*Block)(nil).StringIndented("")) @@ -468,7 +499,7 @@ func TestMaxHeaderBytes(t *testing.T) { timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) h := Header{ - Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, + Version: version.Consensus{Block: math.MaxInt64, App: math.MaxUint64}, ChainID: maxChainID, Height: math.MaxInt64, Time: timestamp, @@ -528,7 +559,7 @@ func TestBlockMaxDataBytes(t *testing.T) { require.NotNil(t, commit) // minBlockSize is minimum correct size of a block - const minBlockSize = 1231 + const minBlockSize = 1371 testCases := []struct { maxBytes int64 @@ -565,7 +596,7 @@ func TestBlockMaxDataBytes(t *testing.T) { func TestBlockMaxDataBytesNoEvidence(t *testing.T) { // minBlockSize is minimum correct size of a block - const minBlockSize = 1128 + const minBlockSize = 1129 testCases := []struct { maxBytes int64 diff --git a/types/canonical.go b/types/canonical.go index e4c5d74f51..1bdaf231b1 100644 --- a/types/canonical.go +++ b/types/canonical.go @@ -1,6 +1,7 @@ package types import ( + "fmt" "time" tmtime "github.com/dashpay/tenderdash/libs/time" @@ -31,14 +32,21 @@ func CanonicalizeProposal(chainID string, proposal *tmproto.Proposal) tmproto.Ca // CanonicalizeVoteExtension extracts the vote extension from the given vote // and constructs a CanonicalizeVoteExtension struct, whose representation in // bytes is what is signed in order to produce the vote extension's signature. -func CanonicalizeVoteExtension(chainID string, ext *tmproto.VoteExtension, height int64, round int32) tmproto.CanonicalVoteExtension { - return tmproto.CanonicalVoteExtension{ - Extension: ext.Extension, - Type: ext.Type, - Height: height, - Round: int64(round), - ChainId: chainID, +func CanonicalizeVoteExtension(chainID string, ext *tmproto.VoteExtension, height int64, round int32) (tmproto.CanonicalVoteExtension, error) { + switch ext.Type { + case tmproto.VoteExtensionType_DEFAULT, tmproto.VoteExtensionType_THRESHOLD_RECOVER: + { + canonical := tmproto.CanonicalVoteExtension{ + Extension: ext.Extension, + Type: ext.Type, + Height: height, + Round: int64(round), + ChainId: chainID, + } + return canonical, nil + } } + return tmproto.CanonicalVoteExtension{}, fmt.Errorf("provided vote extension type %s does not have canonical form for signing", ext.Type) } // CanonicalTime can be used to stringify time in a canonical way. diff --git a/types/core_chainlock.go b/types/core_chainlock.go index b6c9d3b1c2..07643796c9 100644 --- a/types/core_chainlock.go +++ b/types/core_chainlock.go @@ -8,6 +8,7 @@ import ( "github.com/dashpay/tenderdash/crypto" tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" + "github.com/rs/zerolog" ) type CoreChainLock struct { @@ -107,6 +108,16 @@ func (cl *CoreChainLock) IsZero() bool { return cl == nil || (len(cl.CoreBlockHash) == 0 && len(cl.Signature) == 0 && cl.CoreBlockHeight == 0) } +func (cl *CoreChainLock) MarshalZerologObject(e *zerolog.Event) { + if cl == nil { + e.Bool("nil", true) + return + } + e.Hex("core_block_hash", cl.CoreBlockHash) + e.Uint32("core_block_height", cl.CoreBlockHeight) + e.Hex("signature", cl.Signature) +} + // FromProto sets a protobuf Header to the given pointer. // It returns an error if the chain lock is invalid. func CoreChainLockFromProto(clp *tmproto.CoreChainLock) (*CoreChainLock, error) { diff --git a/types/events.go b/types/events.go index 90568bbc92..3be39451ef 100644 --- a/types/events.go +++ b/types/events.go @@ -126,7 +126,7 @@ type EventDataNewBlock struct { Block *Block `json:"block"` BlockID BlockID `json:"block_id"` - ResultFinalizeBlock abci.ResponseFinalizeBlock `json:"result_finalize_block"` + ResultProcessProposal abci.ResponseProcessProposal `json:"result_finalize_block"` } // TypeTag implements the required method of jsontypes.Tagged. @@ -135,7 +135,7 @@ func (EventDataNewBlock) TypeTag() string { return "tendermint/event/NewBlock" } // ABCIEvents implements the eventlog.ABCIEventer interface. func (e EventDataNewBlock) ABCIEvents() []abci.Event { base := []abci.Event{eventWithAttr(BlockHeightKey, fmt.Sprint(e.Block.Header.Height))} - return append(base, e.ResultFinalizeBlock.Events...) + return append(base, e.ResultProcessProposal.Events...) } type EventDataNewBlockHeader struct { @@ -144,7 +144,6 @@ type EventDataNewBlockHeader struct { NumTxs int64 `json:"num_txs,string"` // Number of txs in a block ResultProcessProposal abci.ResponseProcessProposal `json:"result_process_proposal"` - ResultFinalizeBlock abci.ResponseFinalizeBlock `json:"result_finalize_block"` } // TypeTag implements the required method of jsontypes.Tagged. @@ -153,7 +152,7 @@ func (EventDataNewBlockHeader) TypeTag() string { return "tendermint/event/NewBl // ABCIEvents implements the eventlog.ABCIEventer interface. func (e EventDataNewBlockHeader) ABCIEvents() []abci.Event { base := []abci.Event{eventWithAttr(BlockHeightKey, fmt.Sprint(e.Header.Height))} - return append(base, e.ResultFinalizeBlock.Events...) + return append(base, e.ResultProcessProposal.Events...) } type EventDataNewEvidence struct { diff --git a/types/genesis.go b/types/genesis.go index c98872fdd9..3af46b108e 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -75,7 +75,7 @@ type GenesisDoc struct { ConsensusParams *ConsensusParams Validators []GenesisValidator AppHash tmbytes.HexBytes - AppState json.RawMessage + AppState []byte // dash fields InitialCoreChainLockedHeight uint32 `json:"initial_core_chain_locked_height"` @@ -92,7 +92,7 @@ type genesisDocJSON struct { ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` Validators []GenesisValidator `json:"validators,omitempty"` AppHash tmbytes.HexBytes `json:"app_hash,omitempty"` - AppState json.RawMessage `json:"app_state,omitempty"` + AppState []byte `json:"app_state,omitempty"` // dash fields InitialCoreChainLockedHeight uint32 `json:"initial_core_chain_locked_height,omitempty"` diff --git a/types/genesis_test.go b/types/genesis_test.go index 92a918f01d..344c189fb8 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -2,6 +2,7 @@ package types import ( + "encoding/base64" "encoding/json" "fmt" "os" @@ -202,7 +203,8 @@ func TestGenesisCorrect(t *testing.T) { func TestBasicGenesisDoc(t *testing.T) { // test a good one by raw json - genDocBytes := []byte( + appState := base64.StdEncoding.AppendEncode(nil, []byte(`{"account_owner": "Bob"}`)) + genDocBytes := []byte(fmt.Sprintf( `{ "genesis_time": "0001-01-01T00:00:00Z", "chain_id": "test-chain-QDKdJr", @@ -222,7 +224,7 @@ func TestBasicGenesisDoc(t *testing.T) { "validator_quorum_hash":"43FF39CC1F41B9FC63DFA5B1EDF3F0CA3AD5CAFAE4B12B4FE9263B08BB50C4CC", "validator_quorum_type":100, "app_hash":"", - "app_state":{"account_owner": "Bob"}, + "app_state":"%s", "consensus_params": { "synchrony": {"precision": "1", "message_delay": "10"}, "timeout": { @@ -237,8 +239,8 @@ func TestBasicGenesisDoc(t *testing.T) { "block": {"max_bytes": "100"}, "evidence": {"max_age_num_blocks": "100", "max_age_duration": "10"} } - }`, - ) + }`, appState, + )) _, err := GenesisDocFromJSON(genDocBytes) assert.NoError(t, err, "expected no error for good genDoc json") diff --git a/types/mempool.go b/types/mempool.go index fa0f8a2082..4b61eff8f2 100644 --- a/types/mempool.go +++ b/types/mempool.go @@ -2,6 +2,7 @@ package types import ( "crypto/sha256" + "encoding/hex" "errors" "fmt" ) @@ -12,6 +13,10 @@ var ErrTxInCache = errors.New("tx already exists in cache") // TxKey is the fixed length array key used as an index. type TxKey [sha256.Size]byte +func (k TxKey) String() string { + return hex.EncodeToString(k[:]) +} + // ErrTxTooLarge defines an error when a transaction is too big to be sent in a // message to other peers. type ErrTxTooLarge struct { diff --git a/types/mocks/block_event_publisher.go b/types/mocks/block_event_publisher.go index 8cd1f3d266..8f5c17f637 100644 --- a/types/mocks/block_event_publisher.go +++ b/types/mocks/block_event_publisher.go @@ -16,6 +16,10 @@ type BlockEventPublisher struct { func (_m *BlockEventPublisher) PublishEventNewBlock(_a0 types.EventDataNewBlock) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for PublishEventNewBlock") + } + var r0 error if rf, ok := ret.Get(0).(func(types.EventDataNewBlock) error); ok { r0 = rf(_a0) @@ -30,6 +34,10 @@ func (_m *BlockEventPublisher) PublishEventNewBlock(_a0 types.EventDataNewBlock) func (_m *BlockEventPublisher) PublishEventNewBlockHeader(_a0 types.EventDataNewBlockHeader) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for PublishEventNewBlockHeader") + } + var r0 error if rf, ok := ret.Get(0).(func(types.EventDataNewBlockHeader) error); ok { r0 = rf(_a0) @@ -44,6 +52,10 @@ func (_m *BlockEventPublisher) PublishEventNewBlockHeader(_a0 types.EventDataNew func (_m *BlockEventPublisher) PublishEventNewEvidence(_a0 types.EventDataNewEvidence) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for PublishEventNewEvidence") + } + var r0 error if rf, ok := ret.Get(0).(func(types.EventDataNewEvidence) error); ok { r0 = rf(_a0) @@ -58,6 +70,10 @@ func (_m *BlockEventPublisher) PublishEventNewEvidence(_a0 types.EventDataNewEvi func (_m *BlockEventPublisher) PublishEventTx(_a0 types.EventDataTx) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for PublishEventTx") + } + var r0 error if rf, ok := ret.Get(0).(func(types.EventDataTx) error); ok { r0 = rf(_a0) @@ -72,6 +88,10 @@ func (_m *BlockEventPublisher) PublishEventTx(_a0 types.EventDataTx) error { func (_m *BlockEventPublisher) PublishEventValidatorSetUpdates(_a0 types.EventDataValidatorSetUpdate) error { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for PublishEventValidatorSetUpdates") + } + var r0 error if rf, ok := ret.Get(0).(func(types.EventDataValidatorSetUpdate) error); ok { r0 = rf(_a0) diff --git a/types/mocks/priv_validator.go b/types/mocks/priv_validator.go index 9f75bf3771..4fff072e51 100644 --- a/types/mocks/priv_validator.go +++ b/types/mocks/priv_validator.go @@ -28,6 +28,10 @@ type PrivValidator struct { func (_m *PrivValidator) ExtractIntoValidator(ctx context.Context, quorumHash bytes.HexBytes) *types.Validator { ret := _m.Called(ctx, quorumHash) + if len(ret) == 0 { + panic("no return value specified for ExtractIntoValidator") + } + var r0 *types.Validator if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) *types.Validator); ok { r0 = rf(ctx, quorumHash) @@ -44,6 +48,10 @@ func (_m *PrivValidator) ExtractIntoValidator(ctx context.Context, quorumHash by func (_m *PrivValidator) GetFirstQuorumHash(_a0 context.Context) (bytes.HexBytes, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for GetFirstQuorumHash") + } + var r0 bytes.HexBytes var r1 error if rf, ok := ret.Get(0).(func(context.Context) (bytes.HexBytes, error)); ok { @@ -70,6 +78,10 @@ func (_m *PrivValidator) GetFirstQuorumHash(_a0 context.Context) (bytes.HexBytes func (_m *PrivValidator) GetHeight(ctx context.Context, quorumHash bytes.HexBytes) (int64, error) { ret := _m.Called(ctx, quorumHash) + if len(ret) == 0 { + panic("no return value specified for GetHeight") + } + var r0 int64 var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) (int64, error)); ok { @@ -94,6 +106,10 @@ func (_m *PrivValidator) GetHeight(ctx context.Context, quorumHash bytes.HexByte func (_m *PrivValidator) GetPrivateKey(ctx context.Context, quorumHash bytes.HexBytes) (crypto.PrivKey, error) { ret := _m.Called(ctx, quorumHash) + if len(ret) == 0 { + panic("no return value specified for GetPrivateKey") + } + var r0 crypto.PrivKey var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) (crypto.PrivKey, error)); ok { @@ -120,6 +136,10 @@ func (_m *PrivValidator) GetPrivateKey(ctx context.Context, quorumHash bytes.Hex func (_m *PrivValidator) GetProTxHash(_a0 context.Context) (bytes.HexBytes, error) { ret := _m.Called(_a0) + if len(ret) == 0 { + panic("no return value specified for GetProTxHash") + } + var r0 bytes.HexBytes var r1 error if rf, ok := ret.Get(0).(func(context.Context) (bytes.HexBytes, error)); ok { @@ -146,6 +166,10 @@ func (_m *PrivValidator) GetProTxHash(_a0 context.Context) (bytes.HexBytes, erro func (_m *PrivValidator) GetPubKey(ctx context.Context, quorumHash bytes.HexBytes) (crypto.PubKey, error) { ret := _m.Called(ctx, quorumHash) + if len(ret) == 0 { + panic("no return value specified for GetPubKey") + } + var r0 crypto.PubKey var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) (crypto.PubKey, error)); ok { @@ -172,6 +196,10 @@ func (_m *PrivValidator) GetPubKey(ctx context.Context, quorumHash bytes.HexByte func (_m *PrivValidator) GetThresholdPublicKey(ctx context.Context, quorumHash bytes.HexBytes) (crypto.PubKey, error) { ret := _m.Called(ctx, quorumHash) + if len(ret) == 0 { + panic("no return value specified for GetThresholdPublicKey") + } + var r0 crypto.PubKey var r1 error if rf, ok := ret.Get(0).(func(context.Context, bytes.HexBytes) (crypto.PubKey, error)); ok { @@ -198,6 +226,10 @@ func (_m *PrivValidator) GetThresholdPublicKey(ctx context.Context, quorumHash b func (_m *PrivValidator) SignProposal(ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash bytes.HexBytes, proposal *tenderminttypes.Proposal) (bytes.HexBytes, error) { ret := _m.Called(ctx, chainID, quorumType, quorumHash, proposal) + if len(ret) == 0 { + panic("no return value specified for SignProposal") + } + var r0 bytes.HexBytes var r1 error if rf, ok := ret.Get(0).(func(context.Context, string, btcjson.LLMQType, bytes.HexBytes, *tenderminttypes.Proposal) (bytes.HexBytes, error)); ok { @@ -224,6 +256,10 @@ func (_m *PrivValidator) SignProposal(ctx context.Context, chainID string, quoru func (_m *PrivValidator) SignVote(ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash bytes.HexBytes, vote *tenderminttypes.Vote, logger log.Logger) error { ret := _m.Called(ctx, chainID, quorumType, quorumHash, vote, logger) + if len(ret) == 0 { + panic("no return value specified for SignVote") + } + var r0 error if rf, ok := ret.Get(0).(func(context.Context, string, btcjson.LLMQType, bytes.HexBytes, *tenderminttypes.Vote, log.Logger) error); ok { r0 = rf(ctx, chainID, quorumType, quorumHash, vote, logger) diff --git a/types/node_info.go b/types/node_info.go index eb3720f890..ffb14e2157 100644 --- a/types/node_info.go +++ b/types/node_info.go @@ -9,7 +9,7 @@ import ( "github.com/dashpay/tenderdash/crypto" tmstrings "github.com/dashpay/tenderdash/internal/libs/strings" - tmbytes "github.com/dashpay/tenderdash/libs/bytes" + tmsync "github.com/dashpay/tenderdash/internal/libs/sync" tmp2p "github.com/dashpay/tenderdash/proto/tendermint/p2p" ) @@ -48,8 +48,8 @@ type NodeInfo struct { // Channels are HexBytes so easier to read as JSON Network string `json:"network"` // network/chain ID Version string `json:"version"` // major.minor.revision - // FIXME: This should be changed to uint16 to be consistent with the updated channel type - Channels tmbytes.HexBytes `json:"channels"` // channels this node knows about + // Channels supported by this node. Use GetChannels() as a getter. + Channels *tmsync.ConcurrentSlice[uint16] `json:"channels"` // channels this node knows about // ASCIIText fields Moniker string `json:"moniker"` // arbitrary moniker @@ -97,11 +97,15 @@ func (info NodeInfo) Validate() error { } // Validate Channels - ensure max and check for duplicates. - if len(info.Channels) > maxNumChannels { - return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels) + if info.Channels == nil { + return fmt.Errorf("info.Channels is nil") } - channels := make(map[byte]struct{}) - for _, ch := range info.Channels { + + if info.Channels.Len() > maxNumChannels { + return fmt.Errorf("info.Channels is too long (%v). Max is %v", info.Channels.Len(), maxNumChannels) + } + channels := make(map[uint16]struct{}) + for _, ch := range info.Channels.ToSlice() { _, ok := channels[ch] if ok { return fmt.Errorf("info.Channels contains duplicate channel id %v", ch) @@ -147,15 +151,15 @@ func (info NodeInfo) CompatibleWith(other NodeInfo) error { } // if we have no channels, we're just testing - if len(info.Channels) == 0 { + if info.Channels.Len() == 0 { return nil } // for each of our channels, check if they have it found := false OUTER_LOOP: - for _, ch1 := range info.Channels { - for _, ch2 := range other.Channels { + for _, ch1 := range info.Channels.ToSlice() { + for _, ch2 := range other.Channels.ToSlice() { if ch1 == ch2 { found = true break OUTER_LOOP // only need one @@ -171,23 +175,24 @@ OUTER_LOOP: // AddChannel is used by the router when a channel is opened to add it to the node info func (info *NodeInfo) AddChannel(channel uint16) { // check that the channel doesn't already exist - for _, ch := range info.Channels { - if ch == byte(channel) { + for _, ch := range info.Channels.ToSlice() { + if ch == channel { return } } - info.Channels = append(info.Channels, byte(channel)) + info.Channels.Append(channel) } func (info NodeInfo) Copy() NodeInfo { + chans := info.Channels.Copy() return NodeInfo{ ProtocolVersion: info.ProtocolVersion, NodeID: info.NodeID, ListenAddr: info.ListenAddr, Network: info.Network, Version: info.Version, - Channels: info.Channels, + Channels: &chans, Moniker: info.Moniker, Other: info.Other, ProTxHash: info.ProTxHash.Copy(), @@ -203,11 +208,14 @@ func (info NodeInfo) ToProto() *tmp2p.NodeInfo { App: info.ProtocolVersion.App, } + for _, ch := range info.Channels.ToSlice() { + dni.Channels = append(dni.Channels, uint32(ch)) + } + dni.NodeID = string(info.NodeID) dni.ListenAddr = info.ListenAddr dni.Network = info.Network dni.Version = info.Version - dni.Channels = info.Channels dni.Moniker = info.Moniker dni.ProTxHash = info.ProTxHash.Copy() dni.Other = tmp2p.NodeInfoOther{ @@ -232,7 +240,7 @@ func NodeInfoFromProto(pb *tmp2p.NodeInfo) (NodeInfo, error) { ListenAddr: pb.ListenAddr, Network: pb.Network, Version: pb.Version, - Channels: pb.Channels, + Channels: tmsync.NewConcurrentSlice[uint16](), Moniker: pb.Moniker, Other: NodeInfoOther{ TxIndex: pb.Other.TxIndex, @@ -240,6 +248,11 @@ func NodeInfoFromProto(pb *tmp2p.NodeInfo) (NodeInfo, error) { }, ProTxHash: pb.ProTxHash, } + + for _, ch := range pb.Channels { + dni.Channels.Append(uint16(ch)) + } + return dni, nil } diff --git a/types/node_info_test.go b/types/node_info_test.go index b600df5820..d16b1e9850 100644 --- a/types/node_info_test.go +++ b/types/node_info_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/dashpay/tenderdash/crypto/ed25519" + tmsync "github.com/dashpay/tenderdash/internal/libs/sync" tmnet "github.com/dashpay/tenderdash/libs/net" "github.com/dashpay/tenderdash/version" ) @@ -20,13 +21,13 @@ func TestNodeInfoValidate(t *testing.T) { ni := NodeInfo{} assert.Error(t, ni.Validate()) - channels := make([]byte, maxNumChannels) - for i := 0; i < maxNumChannels; i++ { - channels[i] = byte(i) + channels := tmsync.NewConcurrentSlice[uint16]() + for i := uint16(0); i < maxNumChannels; i++ { + channels.Append(i) } - dupChannels := make([]byte, 5) - copy(dupChannels, channels[:5]) - dupChannels = append(dupChannels, testCh) + + dupChannels := tmsync.NewConcurrentSlice[uint16](channels.ToSlice()[:5]...) + dupChannels.Append(testCh) nonASCII := "¢§µ" emptyTab := "\t" @@ -39,11 +40,14 @@ func TestNodeInfoValidate(t *testing.T) { }{ { "Too Many Channels", - func(ni *NodeInfo) { ni.Channels = append(channels, byte(maxNumChannels)) }, + func(ni *NodeInfo) { + ni.Channels = ref(channels.Copy()) + ni.Channels.Append(maxNumChannels) + }, true, }, {"Duplicate Channel", func(ni *NodeInfo) { ni.Channels = dupChannels }, true}, - {"Good Channels", func(ni *NodeInfo) { ni.Channels = ni.Channels[:5] }, false}, + {"Good Channels", func(ni *NodeInfo) { ni.Channels = tmsync.NewConcurrentSlice(ni.Channels.ToSlice()[:5]...) }, false}, {"Invalid NetAddress", func(ni *NodeInfo) { ni.ListenAddr = "not-an-address" }, true}, {"Good NetAddress", func(ni *NodeInfo) { ni.ListenAddr = "0.0.0.0:26656" }, false}, @@ -97,6 +101,10 @@ func TestNodeInfoValidate(t *testing.T) { } +func ref[T any](t T) *T { + return &t +} + func testNodeID() NodeID { return NodeIDFromPubKey(ed25519.GenPrivKey().PubKey()) } @@ -117,7 +125,7 @@ func testNodeInfoWithNetwork(t *testing.T, id NodeID, name, network string) Node ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort(t)), Network: network, Version: "1.2.3-rc0-deadbeef", - Channels: []byte{testCh}, + Channels: tmsync.NewConcurrentSlice[uint16](testCh), Moniker: name, Other: NodeInfoOther{ TxIndex: "on", @@ -146,7 +154,7 @@ func TestNodeInfoCompatible(t *testing.T) { assert.NoError(t, ni1.CompatibleWith(ni2)) // add another channel; still compatible - ni2.Channels = []byte{newTestChannel, testCh} + ni2.Channels = tmsync.NewConcurrentSlice[uint16](testCh) assert.NoError(t, ni1.CompatibleWith(ni2)) testCases := []struct { @@ -155,7 +163,7 @@ func TestNodeInfoCompatible(t *testing.T) { }{ {"Wrong block version", func(ni *NodeInfo) { ni.ProtocolVersion.Block++ }}, {"Wrong network", func(ni *NodeInfo) { ni.Network += "-wrong" }}, - {"No common channels", func(ni *NodeInfo) { ni.Channels = []byte{newTestChannel} }}, + {"No common channels", func(ni *NodeInfo) { ni.Channels = tmsync.NewConcurrentSlice[uint16](uint16(newTestChannel)) }}, } for _, tc := range testCases { @@ -167,15 +175,15 @@ func TestNodeInfoCompatible(t *testing.T) { func TestNodeInfoAddChannel(t *testing.T) { nodeInfo := testNodeInfo(t, testNodeID(), "testing") - nodeInfo.Channels = []byte{} + nodeInfo.Channels = tmsync.NewConcurrentSlice[uint16]() require.Empty(t, nodeInfo.Channels) nodeInfo.AddChannel(2) - require.Contains(t, nodeInfo.Channels, byte(0x02)) + require.Contains(t, nodeInfo.Channels.ToSlice(), uint16(2)) // adding the same channel again shouldn't be a problem nodeInfo.AddChannel(2) - require.Contains(t, nodeInfo.Channels, byte(0x02)) + require.Contains(t, nodeInfo.Channels.ToSlice(), uint16(2)) } func TestParseAddressString(t *testing.T) { diff --git a/types/params.go b/types/params.go index 04ceda4693..3e2b7535bc 100644 --- a/types/params.go +++ b/types/params.go @@ -4,6 +4,7 @@ import ( "crypto/sha256" "errors" "fmt" + "os" "time" "github.com/dashpay/tenderdash/crypto/bls12381" @@ -80,18 +81,23 @@ type VersionParams struct { // block validity, see the Proposer-Based Timestamps specification: // https://github.com/tendermint/tendermint/blob/master/spec/consensus/proposer-based-timestamp/README.md type SynchronyParams struct { - Precision time.Duration `json:"precision,string"` + // Precision is the maximum amount of time by which node clocks can differ. + Precision time.Duration `json:"precision,string"` + // MessageDelay is the maximum amount of time a message spend in transit. MessageDelay time.Duration `json:"message_delay,string"` } // TimeoutParams configure the timings of the steps of the Tendermint consensus algorithm. type TimeoutParams struct { - Propose time.Duration `json:"propose,string"` - ProposeDelta time.Duration `json:"propose_delta,string"` - Vote time.Duration `json:"vote,string"` - VoteDelta time.Duration `json:"vote_delta,string"` - Commit time.Duration `json:"commit,string"` - BypassCommitTimeout bool `json:"bypass_commit_timeout"` + Propose time.Duration `json:"propose,string"` + ProposeDelta time.Duration `json:"propose_delta,string"` + Vote time.Duration `json:"vote,string"` + VoteDelta time.Duration `json:"vote_delta,string"` + + // Unused, TODO: Remove in 0.15 + Commit time.Duration `json:"commit,string"` + // Unused, TODO: Remove in 0.15 + BypassCommitTimeout bool `json:"bypass_commit_timeout"` } // ABCIParams configure ABCI functionality specific to the Application Blockchain @@ -172,12 +178,10 @@ func (s SynchronyParams) SynchronyParamsOrDefaults() SynchronyParams { func DefaultTimeoutParams() TimeoutParams { return TimeoutParams{ - Propose: 3000 * time.Millisecond, - ProposeDelta: 500 * time.Millisecond, - Vote: 1000 * time.Millisecond, - VoteDelta: 500 * time.Millisecond, - Commit: 1000 * time.Millisecond, - BypassCommitTimeout: false, + Propose: 3000 * time.Millisecond, + ProposeDelta: 500 * time.Millisecond, + Vote: 1000 * time.Millisecond, + VoteDelta: 500 * time.Millisecond, } } @@ -207,9 +211,6 @@ func (t TimeoutParams) TimeoutParamsOrDefaults() TimeoutParams { if t.VoteDelta == 0 { t.VoteDelta = defaults.VoteDelta } - if t.Commit == 0 { - t.Commit = defaults.Commit - } return t } @@ -227,13 +228,6 @@ func (t TimeoutParams) VoteTimeout(round int32) time.Duration { ) * time.Nanosecond } -// CommitTime accepts ti, the time at which the consensus engine received +2/3 -// precommits for a block and returns the point in time at which the consensus -// engine should begin consensus on the next block. -func (t TimeoutParams) CommitTime(ti time.Time) time.Time { - return ti.Add(t.Commit) -} - func (val *ValidatorParams) IsValidPubkeyType(pubkeyType string) bool { for i := 0; i < len(val.PubKeyTypes); i++ { if val.PubKeyTypes[i] == pubkeyType { @@ -319,14 +313,19 @@ func (params ConsensusParams) ValidateConsensusParams() error { return fmt.Errorf("timeout.VoteDelta must be greater than 0. Got: %d", params.Timeout.VoteDelta) } - if params.Timeout.Commit <= 0 { - return fmt.Errorf("timeout.Commit must be greater than 0. Got: %d", params.Timeout.Commit) - } - if len(params.Validator.PubKeyTypes) == 0 { return errors.New("len(Validator.PubKeyTypes) must be greater than 0") } + // TODO: Remove in v0.15 + if params.Timeout.Commit != 0 { + fmt.Fprintln(os.Stderr, "WARNING: ConsensusParams.Timeout.Commit is not used and will be removed in v0.15") + } + // TODO: Remove in v0.15 + if params.Timeout.BypassCommitTimeout { + fmt.Fprintln(os.Stderr, "WARNING: ConsensusParams.Timeout.BypassCommitTimeout is not used and will be removed in v0.15") + } + // Check if keyType is a known ABCIPubKeyType for i := 0; i < len(params.Validator.PubKeyTypes); i++ { keyType := params.Validator.PubKeyTypes[i] @@ -418,10 +417,6 @@ func (params ConsensusParams) UpdateConsensusParams(params2 *tmproto.ConsensusPa if params2.Timeout.VoteDelta != nil { res.Timeout.VoteDelta = *params2.Timeout.GetVoteDelta() } - if params2.Timeout.Commit != nil { - res.Timeout.Commit = *params2.Timeout.GetCommit() - } - res.Timeout.BypassCommitTimeout = params2.Timeout.GetBypassCommitTimeout() } if params2.Abci != nil { res.ABCI.RecheckTx = params2.Abci.GetRecheckTx() @@ -451,12 +446,10 @@ func (params *ConsensusParams) ToProto() tmproto.ConsensusParams { Precision: ¶ms.Synchrony.Precision, }, Timeout: &tmproto.TimeoutParams{ - Propose: ¶ms.Timeout.Propose, - ProposeDelta: ¶ms.Timeout.ProposeDelta, - Vote: ¶ms.Timeout.Vote, - VoteDelta: ¶ms.Timeout.VoteDelta, - Commit: ¶ms.Timeout.Commit, - BypassCommitTimeout: params.Timeout.BypassCommitTimeout, + Propose: ¶ms.Timeout.Propose, + ProposeDelta: ¶ms.Timeout.ProposeDelta, + Vote: ¶ms.Timeout.Vote, + VoteDelta: ¶ms.Timeout.VoteDelta, }, Abci: &tmproto.ABCIParams{ RecheckTx: params.ABCI.RecheckTx, @@ -503,10 +496,6 @@ func ConsensusParamsFromProto(pbParams tmproto.ConsensusParams) ConsensusParams if pbParams.Timeout.VoteDelta != nil { c.Timeout.VoteDelta = *pbParams.Timeout.GetVoteDelta() } - if pbParams.Timeout.Commit != nil { - c.Timeout.Commit = *pbParams.Timeout.GetCommit() - } - c.Timeout.BypassCommitTimeout = pbParams.Timeout.BypassCommitTimeout } if pbParams.Abci != nil { c.ABCI.RecheckTx = pbParams.Abci.GetRecheckTx() diff --git a/types/params_test.go b/types/params_test.go index 7e74c21315..a5abcfa6a4 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -173,21 +173,19 @@ func TestConsensusParamsValidation(t *testing.T) { } type makeParamsArgs struct { - blockBytes int64 - blockGas int64 - recheck bool - evidenceAge int64 - maxEvidenceBytes int64 - pubkeyTypes []string - precision time.Duration - messageDelay time.Duration - bypassCommitTimeout bool + blockBytes int64 + blockGas int64 + recheck bool + evidenceAge int64 + maxEvidenceBytes int64 + pubkeyTypes []string + precision time.Duration + messageDelay time.Duration propose *time.Duration proposeDelta *time.Duration vote *time.Duration voteDelta *time.Duration - commit *time.Duration } func makeParams(args makeParamsArgs) ConsensusParams { @@ -206,9 +204,7 @@ func makeParams(args makeParamsArgs) ConsensusParams { if args.voteDelta == nil { args.voteDelta = durationPtr(1) } - if args.commit == nil { - args.commit = durationPtr(1) - } + return ConsensusParams{ Block: BlockParams{ MaxBytes: args.blockBytes, @@ -227,12 +223,10 @@ func makeParams(args makeParamsArgs) ConsensusParams { MessageDelay: args.messageDelay, }, Timeout: TimeoutParams{ - Propose: *args.propose, - ProposeDelta: *args.proposeDelta, - Vote: *args.vote, - VoteDelta: *args.voteDelta, - Commit: *args.commit, - BypassCommitTimeout: args.bypassCommitTimeout, + Propose: *args.propose, + ProposeDelta: *args.proposeDelta, + Vote: *args.vote, + VoteDelta: *args.voteDelta, }, ABCI: ABCIParams{ RecheckTx: args.recheck, @@ -293,30 +287,24 @@ func TestConsensusParamsUpdate(t *testing.T) { { // update timeout params initialParams: makeParams(makeParamsArgs{ - propose: durationPtr(3 * time.Second), - proposeDelta: durationPtr(500 * time.Millisecond), - vote: durationPtr(time.Second), - voteDelta: durationPtr(500 * time.Millisecond), - commit: durationPtr(time.Second), - bypassCommitTimeout: false, + propose: durationPtr(3 * time.Second), + proposeDelta: durationPtr(500 * time.Millisecond), + vote: durationPtr(time.Second), + voteDelta: durationPtr(500 * time.Millisecond), }), updates: &tmproto.ConsensusParams{ Timeout: &tmproto.TimeoutParams{ - Propose: durationPtr(2 * time.Second), - ProposeDelta: durationPtr(400 * time.Millisecond), - Vote: durationPtr(5 * time.Second), - VoteDelta: durationPtr(400 * time.Millisecond), - Commit: durationPtr(time.Minute), - BypassCommitTimeout: true, + Propose: durationPtr(2 * time.Second), + ProposeDelta: durationPtr(400 * time.Millisecond), + Vote: durationPtr(5 * time.Second), + VoteDelta: durationPtr(400 * time.Millisecond), }, }, updatedParams: makeParams(makeParamsArgs{ - propose: durationPtr(2 * time.Second), - proposeDelta: durationPtr(400 * time.Millisecond), - vote: durationPtr(5 * time.Second), - voteDelta: durationPtr(400 * time.Millisecond), - commit: durationPtr(time.Minute), - bypassCommitTimeout: true, + propose: durationPtr(2 * time.Second), + proposeDelta: durationPtr(400 * time.Millisecond), + vote: durationPtr(5 * time.Second), + voteDelta: durationPtr(400 * time.Millisecond), }), }, // fine updates @@ -396,12 +384,10 @@ func TestProto(t *testing.T) { makeParams(makeParamsArgs{precision: time.Second, messageDelay: time.Minute}), makeParams(makeParamsArgs{precision: time.Nanosecond, messageDelay: time.Millisecond}), makeParams(makeParamsArgs{ - propose: durationPtr(2 * time.Second), - proposeDelta: durationPtr(400 * time.Millisecond), - vote: durationPtr(5 * time.Second), - voteDelta: durationPtr(400 * time.Millisecond), - commit: durationPtr(time.Minute), - bypassCommitTimeout: true, + propose: durationPtr(2 * time.Second), + proposeDelta: durationPtr(400 * time.Millisecond), + vote: durationPtr(5 * time.Second), + voteDelta: durationPtr(400 * time.Millisecond), }), } diff --git a/types/priv_validator.go b/types/priv_validator.go index 723d21af69..7d3faabfcb 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -180,7 +180,7 @@ func NewMockPVWithParams( } // GetPubKey implements PrivValidator. -func (pv *MockPV) GetPubKey(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { +func (pv *MockPV) GetPubKey(_ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() if keys, ok := pv.PrivateKeys[quorumHash.String()]; ok { @@ -190,7 +190,7 @@ func (pv *MockPV) GetPubKey(ctx context.Context, quorumHash crypto.QuorumHash) ( } // GetProTxHash implements PrivValidator. -func (pv *MockPV) GetProTxHash(ctx context.Context) (crypto.ProTxHash, error) { +func (pv *MockPV) GetProTxHash(_ctx context.Context) (crypto.ProTxHash, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() @@ -200,7 +200,7 @@ func (pv *MockPV) GetProTxHash(ctx context.Context) (crypto.ProTxHash, error) { return pv.ProTxHash, nil } -func (pv *MockPV) GetFirstQuorumHash(ctx context.Context) (crypto.QuorumHash, error) { +func (pv *MockPV) GetFirstQuorumHash(_ctx context.Context) (crypto.QuorumHash, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() for quorumHashString := range pv.PrivateKeys { @@ -210,14 +210,14 @@ func (pv *MockPV) GetFirstQuorumHash(ctx context.Context) (crypto.QuorumHash, er } // GetThresholdPublicKey ... -func (pv *MockPV) GetThresholdPublicKey(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { +func (pv *MockPV) GetThresholdPublicKey(_ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() return pv.PrivateKeys[quorumHash.String()].ThresholdPublicKey, nil } // GetPrivateKey ... -func (pv *MockPV) GetPrivateKey(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { +func (pv *MockPV) GetPrivateKey(_ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PrivKey, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() return pv.PrivateKeys[quorumHash.String()].PrivKey, nil @@ -228,14 +228,14 @@ func (pv *MockPV) getPrivateKey(quorumHash crypto.QuorumHash) crypto.PrivKey { } // ThresholdPublicKeyForQuorumHash ... -func (pv *MockPV) ThresholdPublicKeyForQuorumHash(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { +func (pv *MockPV) ThresholdPublicKeyForQuorumHash(_ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() return pv.PrivateKeys[quorumHash.String()].ThresholdPublicKey, nil } // GetHeight ... -func (pv *MockPV) GetHeight(ctx context.Context, quorumHash crypto.QuorumHash) (int64, error) { +func (pv *MockPV) GetHeight(_ctx context.Context, quorumHash crypto.QuorumHash) (int64, error) { pv.mtx.RLock() defer pv.mtx.RUnlock() if intString, ok := pv.FirstHeightOfQuorums[quorumHash.String()]; ok { @@ -246,12 +246,12 @@ func (pv *MockPV) GetHeight(ctx context.Context, quorumHash crypto.QuorumHash) ( // SignVote implements PrivValidator. func (pv *MockPV) SignVote( - ctx context.Context, + _ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, vote *tmproto.Vote, - logger log.Logger) error { + _logger log.Logger) error { pv.mtx.Lock() defer pv.mtx.Unlock() useChainID := chainID @@ -273,6 +273,7 @@ func (pv *MockPV) SignVote( } vote.BlockSignature = blockSignature + // We only sign vote extensions for precommits if vote.Type != tmproto.PrecommitType { if len(vote.VoteExtensions) > 0 { return errors.New("unexpected vote extension - vote extensions are only allowed in precommits") @@ -280,28 +281,26 @@ func (pv *MockPV) SignVote( return nil } - // We only sign vote extensions for precommits - extSigns, err := MakeVoteExtensionSignItems(useChainID, vote, quorumType, quorumHash) + extensions := VoteExtensionsFromProto(vote.VoteExtensions...) + signItems, err := extensions.SignItems(useChainID, quorumType, quorumHash, vote.Height, vote.Round) if err != nil { return err } - protoExtensionsMap := vote.VoteExtensionsToMap() - for et, signs := range extSigns { - extensions := protoExtensionsMap[et] - for i, sign := range signs { - sign, err := privKey.SignDigest(sign.ID) - if err != nil { - return err - } - extensions[i].Signature = sign + + for i, sign := range signItems { + sig, err := privKey.SignDigest(sign.SignHash) + if err != nil { + return err } + vote.VoteExtensions[i].Signature = sig } + return nil } // SignProposal Implements PrivValidator. func (pv *MockPV) SignProposal( - ctx context.Context, + _ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, @@ -331,7 +330,7 @@ func (pv *MockPV) SignProposal( } func (pv *MockPV) UpdatePrivateKey( - ctx context.Context, + _ctx context.Context, privateKey crypto.PrivKey, quorumHash crypto.QuorumHash, thresholdPublicKey crypto.PubKey, @@ -383,23 +382,23 @@ type ErroringMockPV struct { var ErroringMockPVErr = errors.New("erroringMockPV always returns an error") // GetPubKey Implements PrivValidator. -func (pv *ErroringMockPV) GetPubKey(ctx context.Context, quorumHash crypto.QuorumHash) (crypto.PubKey, error) { +func (pv *ErroringMockPV) GetPubKey(_ctx context.Context, _quorumHash crypto.QuorumHash) (crypto.PubKey, error) { return nil, ErroringMockPVErr } // SignVote Implements PrivValidator. func (pv *ErroringMockPV) SignVote( - ctx context.Context, chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, - vote *tmproto.Vote, logger log.Logger) error { + _ctx context.Context, _chainID string, _quorumType btcjson.LLMQType, _quorumHash crypto.QuorumHash, + _vote *tmproto.Vote, _logger log.Logger) error { return ErroringMockPVErr } // SignProposal Implements PrivValidator. func (pv *ErroringMockPV) SignProposal( - ctx context.Context, chainID string, - quorumType btcjson.LLMQType, - quorumHash crypto.QuorumHash, - proposal *tmproto.Proposal, + _ctx context.Context, _chainID string, + _quorumType btcjson.LLMQType, + _quorumHash crypto.QuorumHash, + _proposal *tmproto.Proposal, ) (tmbytes.HexBytes, error) { return nil, ErroringMockPVErr } diff --git a/types/proposal.go b/types/proposal.go index 9525556a0b..8d7aefd38f 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -98,7 +98,7 @@ func (p *Proposal) ValidateBasic() error { return nil } -// IsTimely validates that the block timestamp is 'timely' according to the proposer-based timestamp algorithm. +// CheckTimely validates that the block timestamp is 'timely' according to the proposer-based timestamp algorithm. // To evaluate if a block is timely, its timestamp is compared to the local time of the validator along with the // configured Precision and MsgDelay parameters. // Specifically, a proposed block timestamp is considered timely if it is satisfies the following inequalities: @@ -110,8 +110,14 @@ func (p *Proposal) ValidateBasic() error { // https://github.com/dashpay/tenderdash/tree/master/spec/consensus/proposer-based-timestamp // // NOTE: by definition, at initial height, recvTime MUST be genesis time. -func (p *Proposal) IsTimely(recvTime time.Time, sp SynchronyParams, round int32) bool { - return isTimely(p.Timestamp, recvTime, sp, round) +// +// # Returns +// +// 0: timely +// -1: too early +// 1: too late +func (p *Proposal) CheckTimely(recvTime time.Time, sp SynchronyParams, round int32) int { + return checkTimely(p.Timestamp, recvTime, sp, round) } // String returns a string representation of the Proposal. @@ -182,12 +188,7 @@ func ProposalBlockSignID( proposalRequestID := ProposalRequestIDProto(p) - signID := crypto.SignID( - quorumType, - tmbytes.Reverse(quorumHash), - tmbytes.Reverse(proposalRequestID), - tmbytes.Reverse(proposalMessageHash[:]), - ) + signID := NewSignItemFromHash(quorumType, quorumHash, proposalRequestID, proposalMessageHash[:]).SignHash return signID } diff --git a/types/proposal_test.go b/types/proposal_test.go index a7f9d5ab9a..49ea38cff2 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -171,7 +171,7 @@ func TestProposalValidateBasic(t *testing.T) { malleateProposal func(*Proposal) expectErr bool }{ - {"Good Proposal", func(p *Proposal) {}, false}, + {"Good Proposal", func(_ *Proposal) {}, false}, {"Invalid Type", func(p *Proposal) { p.Type = tmproto.PrecommitType }, true}, {"Invalid Height", func(p *Proposal) { p.Height = -1 }, true}, {"Invalid Round", func(p *Proposal) { p.Round = -1 }, true}, @@ -238,7 +238,7 @@ func TestProposalProtoBuf(t *testing.T) { expPass bool }{ {"success", proposal, true}, - {"success", proposal2, false}, // blcokID cannot be empty + {"success", proposal2, false}, // blockID cannot be empty {"empty proposal failure validatebasic", &Proposal{}, false}, {"nil proposal", nil, false}, } @@ -333,8 +333,8 @@ func TestIsTimely(t *testing.T) { MessageDelay: testCase.msgDelay, } - ti := p.IsTimely(testCase.recvTime, sp, testCase.round) - assert.Equal(t, testCase.expectTimely, ti) + ti := p.CheckTimely(testCase.recvTime, sp, testCase.round) + assert.Equal(t, testCase.expectTimely, ti == 0) }) } } diff --git a/types/protobuf.go b/types/protobuf.go index 2bfa083b94..671e21321c 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -164,12 +164,21 @@ func (pb2tm) ValidatorSetFromProtoUpdate( valSetUpdate *abci.ValidatorSetUpdate, ) (*ValidatorSet, error) { hasPublicKeys := true - for _, v := range valSetUpdate.ValidatorUpdates { + for i, v := range valSetUpdate.ValidatorUpdates { if v.PubKey == nil { hasPublicKeys = false break } + + pubkey, err := cryptoenc.PubKeyFromProto(*v.PubKey) + if err != nil { + return nil, fmt.Errorf("invalid pubkey of validator %d (%x) in valset update: %w", i, v.ProTxHash, err) + } + if len(pubkey.Bytes()) == 0 { + return nil, fmt.Errorf("pubkey of validator %d (%x) in valset update has zero length", i, v.ProTxHash) + } } + tmVals, pub, quorumHash, err := PB2TM.ValidatorUpdatesFromValidatorSet(valSetUpdate) if err != nil { return nil, err diff --git a/types/protobuf_test.go b/types/protobuf_test.go index 4069c59d81..d1530e134b 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -58,20 +58,20 @@ func TestABCIValidators(t *testing.T) { type pubKeyBLS struct{} -func (pubKeyBLS) Address() Address { return []byte{} } -func (pubKeyBLS) Bytes() []byte { return []byte{} } -func (pubKeyBLS) VerifySignature(msg []byte, sig []byte) bool { return false } -func (pubKeyBLS) VerifySignatureDigest(msg []byte, sig []byte) bool { return false } -func (pubKeyBLS) AggregateSignatures(sigSharesData [][]byte, messages [][]byte) ([]byte, error) { +func (pubKeyBLS) Address() Address { return []byte{} } +func (pubKeyBLS) Bytes() []byte { return []byte{} } +func (pubKeyBLS) VerifySignature(_ []byte, _ []byte) bool { return false } +func (pubKeyBLS) VerifySignatureDigest(_ []byte, _ []byte) bool { return false } +func (pubKeyBLS) AggregateSignatures(_sigSharesData [][]byte, _messages [][]byte) ([]byte, error) { return []byte{}, nil } -func (pubKeyBLS) VerifyAggregateSignature(msgs [][]byte, sig []byte) bool { return false } -func (pubKeyBLS) Equals(crypto.PubKey) bool { return false } -func (pubKeyBLS) String() string { return "" } -func (pubKeyBLS) HexString() string { return "" } -func (pubKeyBLS) Type() string { return bls12381.KeyType } -func (pubKeyBLS) TypeValue() crypto.KeyType { return crypto.BLS12381 } -func (pubKeyBLS) TypeTag() string { return bls12381.PubKeyName } +func (pubKeyBLS) VerifyAggregateSignature(_ [][]byte, _ []byte) bool { return false } +func (pubKeyBLS) Equals(crypto.PubKey) bool { return false } +func (pubKeyBLS) String() string { return "" } +func (pubKeyBLS) HexString() string { return "" } +func (pubKeyBLS) Type() string { return bls12381.KeyType } +func (pubKeyBLS) TypeValue() crypto.KeyType { return crypto.BLS12381 } +func (pubKeyBLS) TypeTag() string { return bls12381.PubKeyName } func TestABCIValidatorFromPubKeyAndPower(t *testing.T) { pubkey := bls12381.GenPrivKey().PubKey() diff --git a/types/quorum.go b/types/quorum.go index 5e0a5c7e67..527c19fb34 100644 --- a/types/quorum.go +++ b/types/quorum.go @@ -1,11 +1,8 @@ package types import ( + "bytes" "fmt" - - "github.com/dashpay/tenderdash/crypto" - "github.com/dashpay/tenderdash/libs/log" - tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" ) // CommitSigns is used to combine threshold signatures and quorum-hash that were used @@ -15,194 +12,39 @@ type CommitSigns struct { } // CopyToCommit copies threshold signature to commit +// +// commit.ThresholdVoteExtensions must be initialized func (c *CommitSigns) CopyToCommit(commit *Commit) { commit.QuorumHash = c.QuorumHash commit.ThresholdBlockSignature = c.BlockSign - commit.ThresholdVoteExtensions = c.ExtensionSigns + if len(c.VoteExtensionSignatures) != len(commit.ThresholdVoteExtensions) { + panic(fmt.Sprintf("count of threshold vote extension signatures (%d) doesn't match vote extensions in commit (%d)", + len(commit.ThresholdVoteExtensions), len(c.VoteExtensionSignatures), + )) + } + for i, ext := range c.VoteExtensionSignatures { + commit.ThresholdVoteExtensions[i].Signature = bytes.Clone(ext) + } } // QuorumSigns holds all created signatures, block, state and for each recovered vote-extensions type QuorumSigns struct { - BlockSign []byte - ExtensionSigns []ThresholdExtensionSign + BlockSign []byte + // List of vote extensions signatures. Order matters. + VoteExtensionSignatures [][]byte } -// NewQuorumSignsFromCommit creates and returns QuorumSigns using threshold signatures from a commit +// NewQuorumSignsFromCommit creates and returns QuorumSigns using threshold signatures from a commit. +// +// Note it only uses threshold-revoverable vote extension signatures, as non-threshold signatures are not included in the commit func NewQuorumSignsFromCommit(commit *Commit) QuorumSigns { - return QuorumSigns{ - BlockSign: commit.ThresholdBlockSignature, - ExtensionSigns: commit.ThresholdVoteExtensions, - } -} - -// ThresholdExtensionSign is used for keeping extension and recovered threshold signature -type ThresholdExtensionSign struct { - Extension []byte - ThresholdSignature []byte -} - -// MakeThresholdExtensionSigns creates and returns the list of ThresholdExtensionSign for given VoteExtensions container -func MakeThresholdExtensionSigns(voteExtensions VoteExtensions) []ThresholdExtensionSign { - if voteExtensions == nil { - return nil - } - extensions := voteExtensions[tmproto.VoteExtensionType_THRESHOLD_RECOVER] - if len(extensions) == 0 { - return nil - } - thresholdSigns := make([]ThresholdExtensionSign, len(extensions)) - for i, ext := range extensions { - thresholdSigns[i] = ThresholdExtensionSign{ - Extension: ext.Extension, - ThresholdSignature: ext.Signature, - } - } - return thresholdSigns -} - -// ThresholdExtensionSignFromProto transforms a list of protobuf ThresholdVoteExtension -// into the list of domain ThresholdExtensionSign -func ThresholdExtensionSignFromProto(protoExtensions []*tmproto.VoteExtension) []ThresholdExtensionSign { - if len(protoExtensions) == 0 { - return nil - } - extensions := make([]ThresholdExtensionSign, len(protoExtensions)) - for i, ext := range protoExtensions { - extensions[i] = ThresholdExtensionSign{ - Extension: ext.Extension, - ThresholdSignature: ext.Signature, - } - } - return extensions -} - -// ThresholdExtensionSignToProto transforms a list of domain ThresholdExtensionSign -// into the list of protobuf VoteExtension -func ThresholdExtensionSignToProto(extensions []ThresholdExtensionSign) []*tmproto.VoteExtension { - if len(extensions) == 0 { - return nil - } - protoExtensions := make([]*tmproto.VoteExtension, len(extensions)) - for i, ext := range extensions { - protoExtensions[i] = &tmproto.VoteExtension{ - Extension: ext.Extension, - Signature: ext.ThresholdSignature, - } - } - return protoExtensions -} - -// MakeThresholdVoteExtensions creates a list of ThresholdExtensionSign from the list of VoteExtension -// and recovered threshold signatures. The lengths of vote-extensions and threshold signatures must be the same -func MakeThresholdVoteExtensions(extensions []VoteExtension, thresholdSigs [][]byte) []ThresholdExtensionSign { - thresholdExtensions := make([]ThresholdExtensionSign, len(extensions)) - for i, ext := range extensions { - thresholdExtensions[i] = ThresholdExtensionSign{ - Extension: ext.Extension, - ThresholdSignature: thresholdSigs[i], - } - } - return thresholdExtensions -} - -// QuorumSingsVerifier ... -type QuorumSingsVerifier struct { - QuorumSignData - shouldVerifyBlock bool - shouldVerifyVoteExtensions bool - logger log.Logger -} - -// WithVerifyExtensions sets a flag that tells QuorumSingsVerifier to verify vote-extension signatures or not -func WithVerifyExtensions(shouldVerify bool) func(*QuorumSingsVerifier) { - return func(verifier *QuorumSingsVerifier) { - verifier.shouldVerifyVoteExtensions = shouldVerify - } -} - -// WithVerifyBlock sets a flag that tells QuorumSingsVerifier to verify block signature or not -func WithVerifyBlock(shouldVerify bool) func(*QuorumSingsVerifier) { - return func(verifier *QuorumSingsVerifier) { - verifier.shouldVerifyBlock = shouldVerify - } -} - -// WithVerifyReachedQuorum sets a flag that tells QuorumSingsVerifier to verify -// vote-extension and stateID signatures or not -func WithVerifyReachedQuorum(quorumReached bool) func(*QuorumSingsVerifier) { - return func(verifier *QuorumSingsVerifier) { - verifier.shouldVerifyVoteExtensions = quorumReached - } -} - -// WithLogger sets a logger -func WithLogger(logger log.Logger) func(*QuorumSingsVerifier) { - return func(verifier *QuorumSingsVerifier) { - verifier.logger = logger - } -} - -// NewQuorumSignsVerifier creates and returns an instance of QuorumSingsVerifier that is used for verification -// quorum signatures -func NewQuorumSignsVerifier(quorumData QuorumSignData, opts ...func(*QuorumSingsVerifier)) *QuorumSingsVerifier { - verifier := &QuorumSingsVerifier{ - QuorumSignData: quorumData, - shouldVerifyBlock: true, - shouldVerifyVoteExtensions: true, - logger: log.NewNopLogger(), + sigs := make([][]byte, 0, len(commit.ThresholdVoteExtensions)) + for _, ext := range commit.ThresholdVoteExtensions { + sigs = append(sigs, ext.Signature) } - for _, opt := range opts { - opt(verifier) - } - return verifier -} -// Verify verifies quorum data using public key and passed signatures -func (q *QuorumSingsVerifier) Verify(pubKey crypto.PubKey, signs QuorumSigns) error { - err := q.verifyBlock(pubKey, signs) - if err != nil { - return err - } - return q.verifyVoteExtensions(pubKey, signs) -} - -func (q *QuorumSingsVerifier) verifyBlock(pubKey crypto.PubKey, signs QuorumSigns) error { - if !q.shouldVerifyBlock { - return nil - } - if !pubKey.VerifySignatureDigest(q.Block.ID, signs.BlockSign) { - return fmt.Errorf( - "threshold block signature is invalid: (%X) signID=%X: %w", - q.Block.Raw, - q.Block.ID, - ErrVoteInvalidBlockSignature, - ) - } - return nil -} - -func (q *QuorumSingsVerifier) verifyVoteExtensions( - pubKey crypto.PubKey, - signs QuorumSigns, -) error { - if !q.shouldVerifyVoteExtensions { - return nil - } - sings := signs.ExtensionSigns - signItems := q.Extensions[tmproto.VoteExtensionType_THRESHOLD_RECOVER] - if len(signItems) == 0 { - return nil - } - if len(signItems) != len(sings) { - return fmt.Errorf("count of threshold vote extension signatures (%d) doesn't match with recoverable vote extensions (%d)", - len(sings), len(signItems), - ) - } - for i, ext := range sings { - if !pubKey.VerifySignatureDigest(signItems[i].ID, ext.ThresholdSignature) { - return fmt.Errorf("threshold vote-extension signature is invalid (%d) %X", - i, signItems[i].Raw) - } + return QuorumSigns{ + BlockSign: commit.ThresholdBlockSignature, + VoteExtensionSignatures: sigs, } - return nil } diff --git a/types/quorum_sign_data.go b/types/quorum_sign_data.go index 9607131d6e..1bd51c6224 100644 --- a/types/quorum_sign_data.go +++ b/types/quorum_sign_data.go @@ -1,10 +1,12 @@ package types import ( - "errors" + "bytes" "fmt" + bls "github.com/dashpay/bls-signatures/go-bindings" "github.com/dashpay/dashd-go/btcjson" + "github.com/hashicorp/go-multierror" "github.com/rs/zerolog" "github.com/dashpay/tenderdash/crypto" @@ -12,42 +14,80 @@ import ( "github.com/dashpay/tenderdash/proto/tendermint/types" ) -var ( - errUnexpectedVoteType = errors.New("unexpected vote extension - vote extensions are only allowed in precommits") -) - // QuorumSignData holds data which is necessary for signing and verification block, state, and each vote-extension in a list type QuorumSignData struct { - Block SignItem - Extensions map[types.VoteExtensionType][]SignItem + Block SignItem + VoteExtensionSignItems []SignItem } -// Verify verifies a quorum signatures: block, state and vote-extensions -func (q QuorumSignData) Verify(pubKey crypto.PubKey, signs QuorumSigns) error { - return NewQuorumSignsVerifier(q).Verify(pubKey, signs) +// Signs items inside QuorumSignData using a given private key. +// +// Mainly for testing. +func (q QuorumSignData) SignWithPrivkey(key crypto.PrivKey) (QuorumSigns, error) { + var err error + var signs QuorumSigns + if signs.BlockSign, err = key.SignDigest(q.Block.SignHash); err != nil { + return signs, err + } + + signs.VoteExtensionSignatures = make([][]byte, 0, len(q.VoteExtensionSignItems)) + for _, item := range q.VoteExtensionSignItems { + var sign []byte + if sign, err = key.SignDigest(item.SignHash); err != nil { + return signs, err + } + signs.VoteExtensionSignatures = append(signs.VoteExtensionSignatures, sign) + } + + return signs, nil } -// SignItem represents quorum sign data, like a request id, message bytes, sha256 hash of message and signID -type SignItem struct { - ReqID []byte // Request ID for quorum signing - ID []byte // Signature ID - Raw []byte // Raw data to be signed - Hash []byte // Checksum of Raw +// Verify verifies a block and threshold vote extensions quorum signatures. +// It needs quorum to be reached so that we have enough signatures to verify. +func (q QuorumSignData) Verify(pubKey crypto.PubKey, signatures QuorumSigns) error { + var err error + if err1 := q.VerifyBlock(pubKey, signatures); err1 != nil { + err = multierror.Append(err, err1) + } + + if err1 := q.VerifyVoteExtensions(pubKey, signatures); err1 != nil { + err = multierror.Append(err, err1) + } + + return err } -// Validate validates prepared data for signing -func (i *SignItem) Validate() error { - if len(i.ReqID) != crypto.DefaultHashSize { - return fmt.Errorf("invalid request ID size: %X", i.ReqID) +// VerifyBlock verifies block signature +func (q QuorumSignData) VerifyBlock(pubKey crypto.PubKey, signatures QuorumSigns) error { + if !q.Block.VerifySignature(pubKey, signatures.BlockSign) { + return ErrVoteInvalidBlockSignature } + return nil } -func (i SignItem) MarshalZerologObject(e *zerolog.Event) { - e.Hex("signBytes", i.Raw) - e.Hex("signRequestID", i.ReqID) - e.Hex("signID", i.ID) - e.Hex("signHash", i.Hash) +// VerifyVoteExtensions verifies threshold vote extensions signatures +func (q QuorumSignData) VerifyVoteExtensions(pubKey crypto.PubKey, signatures QuorumSigns) error { + if len(q.VoteExtensionSignItems) != len(signatures.VoteExtensionSignatures) { + return fmt.Errorf("count of vote extension signatures (%d) doesn't match recoverable vote extensions (%d)", + len(signatures.VoteExtensionSignatures), len(q.VoteExtensionSignItems), + ) + } + + var err error + for i, signItem := range q.VoteExtensionSignItems { + if !signItem.VerifySignature(pubKey, signatures.VoteExtensionSignatures[i]) { + err = multierror.Append(err, fmt.Errorf("vote-extension %d signature is invalid: pubkey %X, raw msg: %X, sigHash: %X, signature %X", + i, + pubKey.Bytes(), + signItem.Msg, + signItem.MsgHash, + signatures.VoteExtensionSignatures[i], + )) + } + } + + return err } // MakeQuorumSignsWithVoteSet creates and returns QuorumSignData struct built with a vote-set and an added vote @@ -72,7 +112,12 @@ func MakeQuorumSigns( Block: MakeBlockSignItem(chainID, protoVote, quorumType, quorumHash), } var err error - quorumSign.Extensions, err = MakeVoteExtensionSignItems(chainID, protoVote, quorumType, quorumHash) + quorumSign.VoteExtensionSignItems, err = + VoteExtensionsFromProto(protoVote.VoteExtensions...). + Filter(func(ext VoteExtensionIf) bool { + return ext.IsThresholdRecoverable() + }). + SignItems(chainID, quorumType, quorumHash, protoVote.Height, protoVote.Round) if err != nil { return QuorumSignData{}, err } @@ -94,52 +139,123 @@ func BlockRequestID(height int64, round int32) []byte { return heightRoundRequestID("dpbvote", height, round) } -// MakeVoteExtensionSignItems creates a list SignItem structs for a vote extensions -func MakeVoteExtensionSignItems( - chainID string, - protoVote *types.Vote, - quorumType btcjson.LLMQType, - quorumHash []byte, -) (map[types.VoteExtensionType][]SignItem, error) { - // We only sign vote extensions for precommits - if protoVote.Type != types.PrecommitType { - if len(protoVote.VoteExtensions) > 0 { - return nil, errUnexpectedVoteType - } - return nil, nil - } - items := make(map[types.VoteExtensionType][]SignItem) - reqID := VoteExtensionRequestID(protoVote.Height, protoVote.Round) - protoExtensionsMap := protoVote.VoteExtensionsToMap() - for t, exts := range protoExtensionsMap { - if items[t] == nil && len(exts) > 0 { - items[t] = make([]SignItem, len(exts)) - } - for i, ext := range exts { - raw := VoteExtensionSignBytes(chainID, protoVote.Height, protoVote.Round, ext) - items[t][i] = NewSignItem(quorumType, quorumHash, reqID, raw) +// SignItem represents signing session data (in field SignItem.ID) that will be signed to get threshold signature share. +// Field names are the same as in Dash Core, but the meaning is different. +// See DIP-0007 +type SignItem struct { + LlmqType btcjson.LLMQType // Quorum type for which this sign item is created + ID []byte // Request ID for quorum signing + MsgHash []byte // Checksum of Raw + QuorumHash []byte // Quorum hash for which this sign item is created + + SignHash []byte // Hash of llmqType, quorumHash, id, and msgHash - as provided to crypto sign/verify functions + + Msg []byte // Raw data to be signed, before any transformations; optional +} + +// Validate validates prepared data for signing +func (i *SignItem) Validate() error { + if len(i.ID) != crypto.DefaultHashSize { + return fmt.Errorf("invalid request ID size: %X", i.ID) + } + if len(i.MsgHash) != crypto.DefaultHashSize { + return fmt.Errorf("invalid hash size %d: %X", len(i.MsgHash), i.MsgHash) + } + if len(i.QuorumHash) != crypto.DefaultHashSize { + return fmt.Errorf("invalid quorum hash size %d: %X", len(i.QuorumHash), i.QuorumHash) + } + // Msg is optional + if len(i.Msg) > 0 { + if !bytes.Equal(crypto.Checksum(i.Msg), i.MsgHash) { + return fmt.Errorf("invalid hash %X for raw data: %X", i.MsgHash, i.Msg) } } - return items, nil + return nil +} + +func (i SignItem) MarshalZerologObject(e *zerolog.Event) { + e.Hex("msg", i.Msg) + e.Hex("signRequestID", i.ID) + e.Hex("signID", i.SignHash) + e.Hex("msgHash", i.MsgHash) + e.Hex("quorumHash", i.QuorumHash) + e.Uint8("llmqType", uint8(i.LlmqType)) + } // NewSignItem creates a new instance of SignItem with calculating a hash for a raw and creating signID -func NewSignItem(quorumType btcjson.LLMQType, quorumHash, reqID, raw []byte) SignItem { - msgHash := crypto.Checksum(raw) - return SignItem{ - ReqID: reqID, - ID: MakeSignID(msgHash, reqID, quorumType, quorumHash), - Raw: raw, - Hash: msgHash, - } -} - -// MakeSignID creates singID -func MakeSignID(msgHash, reqID []byte, quorumType btcjson.LLMQType, quorumHash []byte) []byte { - return crypto.SignID( - quorumType, - tmbytes.Reverse(quorumHash), - tmbytes.Reverse(reqID), - tmbytes.Reverse(msgHash), - ) +// +// Arguments: +// - quorumType: quorum type +// - quorumHash: quorum hash +// - reqID: sign request ID +// - msg: raw data to be signed; it will be hashed with crypto.Checksum() +func NewSignItem(quorumType btcjson.LLMQType, quorumHash, reqID, msg []byte) SignItem { + msgHash := crypto.Checksum(msg) // FIXME: shouldn't we use sha256(sha256(raw)) here? + item := NewSignItemFromHash(quorumType, quorumHash, reqID, msgHash) + item.Msg = msg + + return item +} + +// Create a new sign item without raw value, using provided hash. +func NewSignItemFromHash(quorumType btcjson.LLMQType, quorumHash, reqID, msgHash []byte) SignItem { + item := SignItem{ + ID: reqID, + MsgHash: msgHash, + LlmqType: quorumType, + QuorumHash: quorumHash, + Msg: nil, // Raw is empty, as we don't have it + } + + // By default, reverse fields when calculating SignHash + item.UpdateSignHash(true) + + return item +} + +// UpdateSignHash recalculates signHash field +// If reverse is true, then all []byte elements will be reversed before +// calculating signID +func (i *SignItem) UpdateSignHash(reverse bool) { + if err := i.Validate(); err != nil { + panic("invalid sign item: " + err.Error()) + } + llmqType := i.LlmqType + + quorumHash := i.QuorumHash + requestID := i.ID + messageHash := i.MsgHash + + if reverse { + quorumHash = tmbytes.Reverse(quorumHash) + requestID = tmbytes.Reverse(requestID) + messageHash = tmbytes.Reverse(messageHash) + } + + var blsQuorumHash bls.Hash + copy(blsQuorumHash[:], quorumHash) + + var blsRequestID bls.Hash + copy(blsRequestID[:], requestID) + + var blsMessageHash bls.Hash + copy(blsMessageHash[:], messageHash) + + // fmt.Printf("LlmqType: %x + ", llmqType) + // fmt.Printf("QuorumHash: %x + ", blsQuorumHash) + // fmt.Printf("RequestID: %x + ", blsRequestID) + // fmt.Printf("MsgHash: %x\n", blsMessageHash) + + blsSignHash := bls.BuildSignHash(uint8(llmqType), blsQuorumHash, blsRequestID, blsMessageHash) + + signHash := make([]byte, 32) + copy(signHash, blsSignHash[:]) + + i.SignHash = signHash +} + +// VerifySignature verifies signature for a sign item +func (i *SignItem) VerifySignature(pubkey crypto.PubKey, sig []byte) bool { + return pubkey.VerifySignatureDigest(i.SignHash, sig) } diff --git a/types/quorum_sign_data_test.go b/types/quorum_sign_data_test.go index 54467ab895..422c3d96a5 100644 --- a/types/quorum_sign_data_test.go +++ b/types/quorum_sign_data_test.go @@ -1,6 +1,7 @@ package types import ( + "encoding/hex" "fmt" "testing" @@ -8,9 +9,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/dashpay/tenderdash/crypto" tmbytes "github.com/dashpay/tenderdash/libs/bytes" - "github.com/dashpay/tenderdash/libs/log" "github.com/dashpay/tenderdash/proto/tendermint/types" ) @@ -20,8 +19,10 @@ func TestBlockRequestID(t *testing.T) { assert.EqualValues(t, expected, got) } -func TestMakeBlockSignID(t *testing.T) { +func TestMakeBlockSignItem(t *testing.T) { const chainID = "dash-platform" + const quorumType = btcjson.LLMQType_5_60 + testCases := []struct { vote Vote quorumHash []byte @@ -40,88 +41,47 @@ func TestMakeBlockSignID(t *testing.T) { "DA25B746781DDF47B5D736F30B1D9D0CC86981EEC67CBE255265C4361DEF8C2E", "02000000E9030000000000000000000000000000E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B"+ "7852B855E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855646173682D706C6174666F726D", + "6A12D9CF7091D69072E254B297AEF15997093E480FDE295E09A7DE73B31CEEDD", + quorumType, ), wantHash: tmbytes.MustHexDecode("0CA3D5F42BDFED0C4FDE7E6DE0F046CC76CDA6CEE734D65E8B2EE0E375D4C57D"), }, } for i, tc := range testCases { t.Run(fmt.Sprintf("test-case %d", i), func(t *testing.T) { - signItem := MakeBlockSignItem(chainID, tc.vote.ToProto(), btcjson.LLMQType_5_60, tc.quorumHash) - t.Logf("hash %X id %X raw %X reqID %X", signItem.Hash, signItem.ID, signItem.Raw, signItem.ReqID) - require.Equal(t, tc.want, signItem) - require.Equal(t, tc.wantHash, signItem.Hash) + signItem := MakeBlockSignItem(chainID, tc.vote.ToProto(), quorumType, tc.quorumHash) + t.Logf("hash %X id %X raw %X reqID %X", signItem.MsgHash, signItem.SignHash, signItem.Msg, signItem.ID) + require.Equal(t, tc.want, signItem, "Got ID: %X", signItem.SignHash) + require.Equal(t, tc.wantHash, signItem.MsgHash) }) } } -func TestMakeVoteExtensionSignsData(t *testing.T) { - const chainID = "dash-platform" - logger := log.NewTestingLogger(t) - testCases := []struct { - vote Vote - quorumHash []byte - want map[types.VoteExtensionType][]SignItem - wantHash map[types.VoteExtensionType][][]byte - }{ - { - vote: Vote{ - Type: types.PrecommitType, - Height: 1001, - ValidatorProTxHash: tmbytes.MustHexDecode("9CC13F685BC3EA0FCA99B87F42ABCC934C6305AA47F62A32266A2B9D55306B7B"), - VoteExtensions: VoteExtensions{ - types.VoteExtensionType_DEFAULT: []VoteExtension{{Extension: []byte("default")}}, - types.VoteExtensionType_THRESHOLD_RECOVER: []VoteExtension{{Extension: []byte("threshold")}}, - }, - }, - quorumHash: tmbytes.MustHexDecode("6A12D9CF7091D69072E254B297AEF15997093E480FDE295E09A7DE73B31CEEDD"), - want: map[types.VoteExtensionType][]SignItem{ - types.VoteExtensionType_DEFAULT: { - newSignItem( - "FB95F2CA6530F02AC623589D7938643FF22AE79A75DD79AEA1C8871162DE675E", - "533524404D3A905F5AC9A30FCEB5A922EAD96F30DA02F979EE41C4342F540467", - "210A0764656661756C7411E903000000000000220D646173682D706C6174666F726D", - ), - }, - types.VoteExtensionType_THRESHOLD_RECOVER: { - newSignItem( - "fb95f2ca6530f02ac623589d7938643ff22ae79a75dd79aea1c8871162de675e", - "d3b7d53a0f9ca8072d47d6c18e782ee3155ef8dcddb010087030b6cbc63978bc", - "250a097468726573686f6c6411e903000000000000220d646173682d706c6174666f726d2801", - ), - }, - }, - wantHash: map[types.VoteExtensionType][][]byte{ - types.VoteExtensionType_DEFAULT: { - tmbytes.MustHexDecode("61519D79DE4C4D5AC5DD210C1BCE81AA24F76DD5581A24970E60112890C68FB7"), - }, - types.VoteExtensionType_THRESHOLD_RECOVER: { - tmbytes.MustHexDecode("46C72C423B74034E1AF574A99091B017C0698FEAA55C8B188BFD512FCADD3143"), - }, - }, - }, - } - for i, tc := range testCases { - t.Run(fmt.Sprintf("test-case #%d", i), func(t *testing.T) { - signItems, err := MakeVoteExtensionSignItems(chainID, tc.vote.ToProto(), btcjson.LLMQType_5_60, tc.quorumHash) - require.NoError(t, err) - for et, signs := range signItems { - for i, sign := range signs { - assert.Equal(t, tc.wantHash[et][i], sign.Hash, "want %X, actual %X", tc.wantHash[et][i], sign.Hash) - if !assert.Equal(t, tc.want[et][i], sign) { - logger.Error("invalid sign", "sign", sign, "type", et, "i", i) - } - } - } - }) +func newSignItem(reqID, signHash, raw, quorumHash string, quorumType btcjson.LLMQType) SignItem { + item := NewSignItem(quorumType, tmbytes.MustHexDecode(quorumHash), tmbytes.MustHexDecode(reqID), tmbytes.MustHexDecode(raw)) + item.SignHash = tmbytes.MustHexDecode(signHash) + return item +} + +func TestQuorumSignItem(t *testing.T) { + + si := SignItem{ + ID: mustHexDecode("87cda9461081793e7e31ab1def8ffbd453775a0f9987304598398d42a78d68d4"), + MsgHash: mustHexDecode("5ef9b9eecc4df7c5aee677c0a72816f4515999a539003cf4bbb6c15c39634c31"), + LlmqType: 106, + QuorumHash: mustHexDecode("366f07c9b80a2661563a33c09f02156720159b911186b4438ff281e537674771"), } + si.UpdateSignHash(true) + + expectID := tmbytes.Reverse(mustHexDecode("94635358f4c75a1d0b38314619d1c5d9a16f12961b5314d857e04f2eb61d78d2")) + + assert.EqualValues(t, expectID, si.SignHash) } -func newSignItem(reqID, ID, raw string) SignItem { - item := SignItem{ - ReqID: tmbytes.MustHexDecode(reqID), - ID: tmbytes.MustHexDecode(ID), - Raw: tmbytes.MustHexDecode(raw), +func mustHexDecode(s string) []byte { + b, err := hex.DecodeString(s) + if err != nil { + panic(err) } - item.Hash = crypto.Checksum(item.Raw) - return item + return b } diff --git a/types/signs_recoverer.go b/types/signs_recoverer.go index d9e7afdfa9..f84e9e1edc 100644 --- a/types/signs_recoverer.go +++ b/types/signs_recoverer.go @@ -4,7 +4,7 @@ import ( "fmt" "github.com/dashpay/tenderdash/crypto/bls12381" - tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" + "github.com/dashpay/tenderdash/proto/tendermint/types" ) // SignsRecoverer is used to recover threshold block, state, and vote-extension signatures @@ -13,8 +13,11 @@ type SignsRecoverer struct { blockSigs [][]byte stateSigs [][]byte validatorProTxHashes [][]byte - voteExts [][]byte - voteExtSigs [][][]byte + // List of all vote extensions. Order matters. + voteExtensions VoteExtensions + + // true when the recovery of vote extensions was already executed + voteExtensionsRecovered bool quorumReached bool } @@ -55,11 +58,32 @@ func (v *SignsRecoverer) Recover() (*QuorumSigns, error) { return thresholdSigns, nil } +// Helper function that returns deep copy of recovered vote extensions with signatures from QuorumSigns. +// +// Note that this method doesn't recover threshold signatures. +// It requires to call Recover() method first. +// +// ## Panics +// +// Panics when the count of threshold vote extension signatures in QuorumSigns doesn't match recoverable vote extensions +func (v *SignsRecoverer) GetVoteExtensions(qs QuorumSigns) VoteExtensions { + if len(qs.VoteExtensionSignatures) != len(v.voteExtensions) { + panic(fmt.Sprintf("count of threshold vote extension signatures (%d) doesn't match recoverable vote extensions (%d)", + len(qs.VoteExtensionSignatures), len(v.voteExtensions))) + } + exts := v.voteExtensions.Copy() + for i, ext := range exts { + ext.SetSignature(qs.VoteExtensionSignatures[i]) + } + + return exts +} + func (v *SignsRecoverer) init(votes []*Vote) { v.blockSigs = nil v.stateSigs = nil v.validatorProTxHashes = nil - v.voteExtSigs = nil + for _, vote := range votes { v.addVoteSigs(vote) } @@ -69,19 +93,41 @@ func (v *SignsRecoverer) addVoteSigs(vote *Vote) { if vote == nil { return } + v.blockSigs = append(v.blockSigs, vote.BlockSignature) v.validatorProTxHashes = append(v.validatorProTxHashes, vote.ValidatorProTxHash) - v.addVoteExtensions(vote.VoteExtensions) + v.addVoteExtensionSigs(vote) } -func (v *SignsRecoverer) addVoteExtensions(voteExtensions VoteExtensions) { - extensions := voteExtensions[tmproto.VoteExtensionType_THRESHOLD_RECOVER] - for i, ext := range extensions { - if len(extensions) > len(v.voteExtSigs) { - v.voteExts = append(v.voteExts, ext.Extension) - v.voteExtSigs = append(v.voteExtSigs, nil) +// Add threshold-recovered vote extensions +func (v *SignsRecoverer) addVoteExtensionSigs(vote *Vote) { + if len(vote.VoteExtensions) == 0 { + return + } + + // initialize vote extensions + if v.voteExtensions.IsEmpty() { + v.voteExtensions = vote.VoteExtensions.Copy() + } + + // sanity check; this should be detected on higher layers + if vote.Type != types.PrecommitType || vote.BlockID.IsNil() { + panic(fmt.Sprintf("only non-nil precommits can have vote extensions, got: %s", vote.String())) + } + + if len(vote.VoteExtensions) != len(v.voteExtensions) { + panic(fmt.Sprintf("received vote extensions with different length: current %d, received %d", + len(v.voteExtensions), len(vote.VoteExtensions))) + } + + // append signatures from this vote to each extension + for i, ext := range vote.VoteExtensions { + if recoverable, ok := (v.voteExtensions[i]).(ThresholdVoteExtensionIf); ok { + if err := recoverable.AddThresholdSignature(vote.ValidatorProTxHash, ext.GetSignature()); err != nil { + panic(fmt.Errorf("failed to add vote %s to recover vote extension threshold sig: %w", vote.String(), err)) + } + v.voteExtensions[i] = recoverable } - v.voteExtSigs[i] = append(v.voteExtSigs[i], ext.Signature) } } @@ -94,20 +140,32 @@ func (v *SignsRecoverer) recoverBlockSig(thresholdSigns *QuorumSigns) error { return nil } -func (v *SignsRecoverer) recoverVoteExtensionSigs(thresholdSigns *QuorumSigns) error { +// recoverVoteExtensionSigs recovers threshold signatures for vote-extensions +func (v *SignsRecoverer) recoverVoteExtensionSigs(quorumSigs *QuorumSigns) error { if !v.quorumReached { return nil } - var err error - thresholdSigns.ExtensionSigns = make([]ThresholdExtensionSign, len(v.voteExtSigs)) - for i, sigs := range v.voteExtSigs { - if len(sigs) > 0 { - thresholdSigns.ExtensionSigns[i].Extension = v.voteExts[i] - thresholdSigns.ExtensionSigns[i].ThresholdSignature, err = bls12381.RecoverThresholdSignatureFromShares(sigs, v.validatorProTxHashes) + + if quorumSigs.VoteExtensionSignatures == nil { + quorumSigs.VoteExtensionSignatures = make([][]byte, len(v.voteExtensions)) + } + + if len(v.voteExtensions) != len(quorumSigs.VoteExtensionSignatures) { + return fmt.Errorf("count of threshold vote extension signatures (%d) doesn't match recoverable vote extensions (%d)", + len(quorumSigs.VoteExtensionSignatures), len(v.voteExtensions)) + } + + for i, ext := range v.voteExtensions { + if extension, ok := ext.(ThresholdVoteExtensionIf); ok { + sig, err := extension.ThresholdRecover() if err != nil { - return fmt.Errorf("error recovering threshold vote-extension sig: %w", err) + return fmt.Errorf("error recovering threshold signature for vote extension %d: %w", i, err) } + quorumSigs.VoteExtensionSignatures[i] = sig } } + + v.voteExtensionsRecovered = true + return nil } diff --git a/types/signs_recoverer_test.go b/types/signs_recoverer_test.go index e805df56f8..940197fe79 100644 --- a/types/signs_recoverer_test.go +++ b/types/signs_recoverer_test.go @@ -23,7 +23,8 @@ func TestSigsRecoverer(t *testing.T) { quorumType := crypto.SmallQuorumType() quorumHash := crypto.RandQuorumHash() testCases := []struct { - votes []*Vote + expectInvalidSig bool + votes []*Vote }{ { votes: []*Vote{ @@ -32,8 +33,8 @@ func TestSigsRecoverer(t *testing.T) { Type: tmproto.PrecommitType, BlockID: blockID, VoteExtensions: mockVoteExtensions(t, - tmproto.VoteExtensionType_DEFAULT, "default", tmproto.VoteExtensionType_THRESHOLD_RECOVER, "threshold", + tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, crypto.Checksum([]byte("threshold-raw")), ), }, { @@ -41,7 +42,75 @@ func TestSigsRecoverer(t *testing.T) { Type: tmproto.PrecommitType, BlockID: blockID, VoteExtensions: mockVoteExtensions(t, - tmproto.VoteExtensionType_DEFAULT, "default", + tmproto.VoteExtensionType_THRESHOLD_RECOVER, "threshold", + tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, crypto.Checksum([]byte("threshold-raw")), + ), + }, + }, + }, + { + votes: []*Vote{ + { + ValidatorProTxHash: crypto.RandProTxHash(), + Type: tmproto.PrecommitType, + BlockID: blockID, + VoteExtensions: mockVoteExtensions(t, + tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, crypto.Checksum([]byte("threshold-raw")), + tmproto.VoteExtensionType_THRESHOLD_RECOVER, "threshold", + ), + }, + { + ValidatorProTxHash: crypto.RandProTxHash(), + Type: tmproto.PrecommitType, + BlockID: blockID, + VoteExtensions: mockVoteExtensions(t, + tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, crypto.Checksum([]byte("threshold-raw")), + tmproto.VoteExtensionType_THRESHOLD_RECOVER, "threshold", + ), + }, + }, + }, + { + expectInvalidSig: true, + votes: []*Vote{ + { + ValidatorProTxHash: crypto.RandProTxHash(), + Type: tmproto.PrecommitType, + BlockID: blockID, + VoteExtensions: mockVoteExtensions(t, + tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, crypto.Checksum([]byte("threshold-raw")), + tmproto.VoteExtensionType_THRESHOLD_RECOVER, "threshold", + ), + }, + { + ValidatorProTxHash: crypto.RandProTxHash(), + Type: tmproto.PrecommitType, + BlockID: blockID, + VoteExtensions: mockVoteExtensions(t, + tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, crypto.Checksum([]byte("threshold-raw")), + tmproto.VoteExtensionType_THRESHOLD_RECOVER, "threshold1", + ), + }, + }, + }, + { + expectInvalidSig: true, + votes: []*Vote{ + { + ValidatorProTxHash: crypto.RandProTxHash(), + Type: tmproto.PrecommitType, + BlockID: blockID, + VoteExtensions: mockVoteExtensions(t, + tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, crypto.Checksum([]byte("threshold-raw1")), + tmproto.VoteExtensionType_THRESHOLD_RECOVER, "threshold", + ), + }, + { + ValidatorProTxHash: crypto.RandProTxHash(), + Type: tmproto.PrecommitType, + BlockID: blockID, + VoteExtensions: mockVoteExtensions(t, + tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, crypto.Checksum([]byte("threshold-raw")), tmproto.VoteExtensionType_THRESHOLD_RECOVER, "threshold", ), }, @@ -78,7 +147,11 @@ func TestSigsRecoverer(t *testing.T) { thresholdVoteSigns, err := sr.Recover() require.NoError(t, err) err = quorumSigns.Verify(thresholdPubKey, *thresholdVoteSigns) - require.NoError(t, err) + if tc.expectInvalidSig { + require.Error(t, err) + } else { + require.NoError(t, err) + } }) } } @@ -108,7 +181,7 @@ func TestSigsRecoverer_UsingVoteSet(t *testing.T) { Type: tmproto.PrecommitType, BlockID: blockID, VoteExtensions: mockVoteExtensions(t, - tmproto.VoteExtensionType_DEFAULT, "default", + tmproto.VoteExtensionType_THRESHOLD_RECOVER, "default", tmproto.VoteExtensionType_THRESHOLD_RECOVER, "threshold", ), } @@ -130,18 +203,20 @@ func TestSigsRecoverer_UsingVoteSet(t *testing.T) { // the format of pairs is // 1. the length of pairs must be even // 2. each pair consist of 2 elements: type and extension value -// example: types.VoteExtensionType_DEFAULT, "defailt", types.VoteExtensionType_THRESHOLD_RECOVER, "threshold" +// example: types.VoteExtensionType_THRESHOLD_RECOVER, "defailt", types.VoteExtensionType_THRESHOLD_RECOVER, "threshold" func mockVoteExtensions(t *testing.T, pairs ...interface{}) VoteExtensions { if len(pairs)%2 != 0 { t.Fatalf("the pairs length must be even") } - ve := make(VoteExtensions) + ve := make(VoteExtensions, 0) for i := 0; i < len(pairs); i += 2 { - et, ok := pairs[i].(tmproto.VoteExtensionType) + extensionType, ok := pairs[i].(tmproto.VoteExtensionType) if !ok { t.Fatalf("given unsupported type %T", pairs[i]) } - ext := VoteExtension{} + ext := tmproto.VoteExtension{ + Type: extensionType, + } switch v := pairs[i+1].(type) { case string: ext.Extension = []byte(v) @@ -150,7 +225,8 @@ func mockVoteExtensions(t *testing.T, pairs ...interface{}) VoteExtensions { default: t.Fatalf("given unsupported type %T", pairs[i+1]) } - ve[et] = append(ve[et], ext) + ve.Add(ext) + } return ve } diff --git a/types/test_util.go b/types/test_util.go index 1f0ca2b7f7..0418d1f082 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -45,10 +45,14 @@ func makeCommit( Round: round, Type: tmproto.PrecommitType, BlockID: blockID, - VoteExtensions: VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []VoteExtension{{Extension: []byte("default")}}, - tmproto.VoteExtensionType_THRESHOLD_RECOVER: []VoteExtension{{Extension: []byte("threshold")}}, - }, + VoteExtensions: VoteExtensionsFromProto( + &tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, + Extension: crypto.Checksum([]byte("raw"))}, + &tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("threshold")}, + ), } _, err = signAddVote(ctx, validators[i], vote, voteSet) @@ -68,9 +72,11 @@ func signAddVote(ctx context.Context, privVal PrivValidator, vote *Vote, voteSet if err != nil { return false, err } + err = vote.PopulateSignsFromProto(v) if err != nil { return false, err } + return voteSet.AddVote(vote) } diff --git a/types/tx.go b/types/tx.go index 6c5abb898a..27688ea9f5 100644 --- a/types/tx.go +++ b/types/tx.go @@ -7,6 +7,8 @@ import ( "fmt" "sort" + "github.com/rs/zerolog" + abci "github.com/dashpay/tenderdash/abci/types" "github.com/dashpay/tenderdash/crypto" "github.com/dashpay/tenderdash/crypto/merkle" @@ -14,6 +16,9 @@ import ( tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" ) +// maxLoggedTxs is the maximum number of transactions to log in a Txs object. +const maxLoggedTxs = 20 + // Tx is an arbitrary byte array. // NOTE: Tx has no types at this level, so when wire encoded it's just length-prefixed. // Might we want types here ? @@ -103,6 +108,26 @@ func (txs Txs) ToSliceOfBytes() [][]byte { return txBzs } +func (txs Txs) MarshalZerologArray(e *zerolog.Array) { + if txs == nil { + return + } + + for i, tx := range txs { + if i >= maxLoggedTxs { + e.Str("...") + return + } + + e.Str(tx.Hash().ShortString()) + } +} + +func (txs Txs) MarshalZerologObject(e *zerolog.Event) { + e.Int("num_txs", len(txs)) + e.Array("hashes", txs) +} + // TxRecordSet contains indexes into an underlying set of transactions. // These indexes are useful for validating and working with a list of TxRecords // from the PrepareProposal response. @@ -120,7 +145,7 @@ type TxRecordSet struct { // in the list of TxRecords. included Txs - // added, unmodified, removed, and unknown are indexes for each of the actions + // added, unmodified, delayed, removed, and unknown are indexes for each of the actions // that may be supplied with a transaction. // // Because each transaction only has one action, it can be referenced by @@ -129,6 +154,7 @@ type TxRecordSet struct { added Txs unmodified Txs removed Txs + delayed Txs unknown Txs } @@ -156,6 +182,8 @@ func NewTxRecordSet(trs []*abci.TxRecord) TxRecordSet { txrSet.included = append(txrSet.included, txrSet.all[i]) case abci.TxRecord_REMOVED: txrSet.removed = append(txrSet.removed, txrSet.all[i]) + case abci.TxRecord_DELAYED: + txrSet.delayed = append(txrSet.delayed, txrSet.all[i]) } } return txrSet @@ -173,6 +201,11 @@ func (t TxRecordSet) RemovedTxs() []Tx { return t.removed } +// DelayedTxs returns the transactions that should be delivered in future blocks. +func (t TxRecordSet) DelayedTxs() []Tx { + return t.delayed +} + // Validate checks that the record set was correctly constructed from the original // list of transactions. func (t TxRecordSet) Validate(maxSizeBytes int64, otxs Txs) error { @@ -208,6 +241,7 @@ func (t TxRecordSet) Validate(maxSizeBytes int64, otxs Txs) error { // indexes can be preserved. addedCopy := sortedCopy(t.added) removedCopy := sortedCopy(t.removed) + delayedCopy := sortedCopy(t.delayed) unmodifiedCopy := sortedCopy(t.unmodified) var size int64 @@ -229,6 +263,9 @@ func (t TxRecordSet) Validate(maxSizeBytes int64, otxs Txs) error { if ix, ok := containsAll(otxsCopy, removedCopy); !ok { return fmt.Errorf("new transaction incorrectly marked as removed, transaction hash: %x", removedCopy[ix].Hash()) } + if ix, ok := containsAll(otxsCopy, delayedCopy); !ok { + return fmt.Errorf("new transaction incorrectly marked as delayed, transaction hash: %x", delayedCopy[ix].Hash()) + } if ix, ok := containsAny(otxsCopy, addedCopy); ok { return fmt.Errorf("existing transaction incorrectly marked as added, transaction hash: %x", addedCopy[ix].Hash()) } diff --git a/types/tx_test.go b/types/tx_test.go index 19bcdf18a6..7e52d50901 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -80,7 +80,7 @@ func TestValidateTxRecordSet(t *testing.T) { }, } txrSet := NewTxRecordSet(trs) - err := txrSet.Validate(9, []Tx{[]byte{10}}) + err := txrSet.Validate(9, []Tx{[]byte{10}, []byte{11}}) require.NoError(t, err) }) t.Run("should error on duplicate transactions with the same action", func(t *testing.T) { @@ -151,6 +151,17 @@ func TestValidateTxRecordSet(t *testing.T) { err := txrSet.Validate(100, []Tx{}) require.Error(t, err) }) + t.Run("should error on new transactions marked DELAYED", func(t *testing.T) { + trs := []*abci.TxRecord{ + { + Action: abci.TxRecord_DELAYED, + Tx: Tx([]byte{1, 2, 3, 4, 5}), + }, + } + txrSet := NewTxRecordSet(trs) + err := txrSet.Validate(100, []Tx{}) + require.Error(t, err) + }) t.Run("should error on existing transaction marked as ADDED", func(t *testing.T) { trs := []*abci.TxRecord{ { diff --git a/types/validation.go b/types/validation.go index 0dfd8aceba..4e4f39f695 100644 --- a/types/validation.go +++ b/types/validation.go @@ -51,7 +51,8 @@ func ValidateSignatureSize(keyType crypto.KeyType, h []byte) error { return nil } -func isTimely(timestamp time.Time, recvTime time.Time, sp SynchronyParams, round int32) bool { +// checkTimely returns 0 when message is timely, -1 when received too early, 1 when received too late. +func checkTimely(timestamp time.Time, recvTime time.Time, sp SynchronyParams, round int32) int { // The message delay values are scaled as rounds progress. // Every 10 rounds, the message delay is doubled to allow consensus to // proceed in the case that the chosen value was too small for the given network conditions. @@ -72,8 +73,11 @@ func isTimely(timestamp time.Time, recvTime time.Time, sp SynchronyParams, round // rhs is `proposedBlockTime + MsgDelay + Precision` in the second inequality rhs := timestamp.Add(msgDelay).Add(sp.Precision) - if recvTime.Before(lhs) || recvTime.After(rhs) { - return false + if recvTime.Before(lhs) { + return -1 } - return true + if recvTime.After(rhs) { + return 1 + } + return 0 } diff --git a/types/validation_test.go b/types/validation_test.go index 8f5809cd44..f518d1f7ed 100644 --- a/types/validation_test.go +++ b/types/validation_test.go @@ -1,6 +1,7 @@ package types import ( + "bytes" "context" "testing" @@ -31,21 +32,24 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { vote.ValidatorProTxHash = proTxHash v := vote.ToProto() - quorumSigns, err := MakeQuorumSigns(chainID, btcjson.LLMQType_5_60, quorumHash, v) + dataToSign, err := MakeQuorumSigns(chainID, btcjson.LLMQType_5_60, quorumHash, v) require.NoError(t, err) - blockSig, err := privKey.SignDigest(quorumSigns.Block.ID) + sig, err := dataToSign.SignWithPrivkey(privKey) + require.NoError(t, err) + + vote.BlockSignature = sig.BlockSign + err = vote.VoteExtensions.SetSignatures(sig.VoteExtensionSignatures) require.NoError(t, err) - vote.BlockSignature = blockSig commit := NewCommit(vote.Height, vote.Round, vote.BlockID, + vote.VoteExtensions, + &CommitSigns{ - QuorumSigns: QuorumSigns{ - BlockSign: blockSig, - }, - QuorumHash: quorumHash, + QuorumSigns: sig, + QuorumHash: quorumHash, }, ) @@ -65,8 +69,7 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { expErr bool }{ {"good", chainID, vote.BlockID, vote.Height, commit, false}, - - {"threshold block signature is invalid", "EpsilonEridani", vote.BlockID, vote.Height, commit, true}, + {"invalid block signature", "EpsilonEridani", vote.BlockID, vote.Height, commit, true}, {"wrong block ID", chainID, makeBlockIDRandom(), vote.Height, commit, true}, { description: "invalid commit -- wrong block ID", @@ -81,15 +84,31 @@ func TestValidatorSet_VerifyCommit_All(t *testing.T) { expErr: true, }, {"wrong height", chainID, vote.BlockID, vote.Height - 1, commit, true}, - - {"threshold block signature is invalid", chainID, vote.BlockID, vote.Height, - NewCommit(vote.Height, vote.Round, vote.BlockID, &CommitSigns{QuorumHash: quorumHash}), true}, - - {"threshold block signature is invalid", chainID, vote.BlockID, vote.Height, + // block sign malformed + {"invalid block signature", chainID, vote.BlockID, vote.Height, + NewCommit(vote.Height, vote.Round, vote.BlockID, vote.VoteExtensions, + &CommitSigns{ + QuorumHash: quorumHash, + QuorumSigns: QuorumSigns{ + BlockSign: []byte("invalid block signature"), + VoteExtensionSignatures: sig.VoteExtensionSignatures, + }}), true}, + // quorum signs are replaced with vote2 non-threshold signature + {"invalid block signature", chainID, vote.BlockID, vote.Height, NewCommit(vote.Height, vote.Round, vote.BlockID, + vote.VoteExtensions, &CommitSigns{ QuorumHash: quorumHash, - QuorumSigns: QuorumSigns{BlockSign: vote2.BlockSignature}, + QuorumSigns: QuorumSigns{BlockSign: vote2.BlockSignature, VoteExtensionSignatures: vote2.VoteExtensions.GetSignatures()}, + }, + ), true}, + // quorum hash mismatch + {"wrong quorum hash", chainID, vote.BlockID, vote.Height, + NewCommit(vote.Height, vote.Round, vote.BlockID, + vote.VoteExtensions, + &CommitSigns{ + QuorumHash: bytes.Repeat([]byte{0xaa}, crypto.QuorumHashSize), + QuorumSigns: QuorumSigns{BlockSign: vote2.BlockSignature, VoteExtensionSignatures: vote2.VoteExtensions.GetSignatures()}, }, ), true}, } @@ -133,14 +152,19 @@ func TestValidatorSet_VerifyCommit_CheckThresholdSignatures(t *testing.T) { commit.ThresholdBlockSignature = v.BlockSignature err = valSet.VerifyCommit(chainID, blockID, h, commit) if assert.Error(t, err) { - assert.Contains(t, err.Error(), "threshold block signature is invalid") + assert.Contains(t, err.Error(), "invalid block signature") } + goodVote := voteSet.GetByIndex(0) recoverer := NewSignsRecoverer(voteSet.votes) thresholdSigns, err := recoverer.Recover() require.NoError(t, err) commit.ThresholdBlockSignature = thresholdSigns.BlockSign - commit.ThresholdVoteExtensions = thresholdSigns.ExtensionSigns + exts := goodVote.VoteExtensions.Copy() + for i, ext := range exts { + ext.SetSignature(thresholdSigns.VoteExtensionSignatures[i]) + } + commit.ThresholdVoteExtensions = exts.ToProto() err = valSet.VerifyCommit(chainID, blockID, h, commit) require.NoError(t, err) } diff --git a/types/validator_set.go b/types/validator_set.go index 01e0cf2925..94dae79198 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -100,21 +100,21 @@ func NewValidatorSet(valz []*Validator, newThresholdPublicKey crypto.PubKey, quo return vals } -// NewValidatorSetWithLocalNodeProTxHash initializes a ValidatorSet the same way as NewValidatorSet does, -// however it does allows to set the localNodeProTxHash to more easily identify if the validator set should have public -// keys. If the local node is part of the validator set the public keys must be present -func NewValidatorSetWithLocalNodeProTxHash( +// NewValidatorSetCheckPublicKeys initializes a ValidatorSet the same way as NewValidatorSet does, +// but determines if the public keys are present. +func NewValidatorSetCheckPublicKeys( valz []*Validator, newThresholdPublicKey crypto.PubKey, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, - localNodeProTxHash crypto.ProTxHash, ) *ValidatorSet { - vals := NewValidatorSet(valz, newThresholdPublicKey, quorumType, quorumHash, false) - if vals.HasProTxHash(localNodeProTxHash) { - vals.HasPublicKeys = true + hasPublicKeys := true + for _, val := range valz { + if val.PubKey == nil || len(val.PubKey.Bytes()) == 0 { + hasPublicKeys = false + } } - return vals + return NewValidatorSet(valz, newThresholdPublicKey, quorumType, quorumHash, hasPublicKeys) } // NewEmptyValidatorSet initializes a ValidatorSet with no validators @@ -199,6 +199,9 @@ func (vals *ValidatorSet) ThresholdPublicKeyValid() error { return errors.New("threshold public key is wrong size") } if len(vals.Validators) == 1 && vals.HasPublicKeys { + if vals.Validators[0].PubKey == nil { + return errors.New("validator public key is not set") + } if !vals.Validators[0].PubKey.Equals(vals.ThresholdPublicKey) { return errors.New("incorrect threshold public key") } @@ -911,6 +914,11 @@ func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, if err != nil { return err } + if !vals.QuorumHash.Equal(commit.QuorumHash) { + return fmt.Errorf("invalid commit -- wrong quorum hash: validator set uses %X, commit has %X", + vals.QuorumHash, commit.QuorumHash) + + } err = quorumSigns.Verify(vals.ThresholdPublicKey, NewQuorumSignsFromCommit(commit)) if err != nil { return fmt.Errorf("invalid commit signatures for quorum(type=%v, hash=%X), thresholdPubKey=%X: %w", @@ -988,7 +996,7 @@ func (vals *ValidatorSet) StringIndented(indent string) string { return "nil-ValidatorSet" } var valStrings []string - vals.Iterate(func(index int, val *Validator) bool { + vals.Iterate(func(_index int, val *Validator) bool { valStrings = append(valStrings, val.String()) return false }) @@ -1018,7 +1026,7 @@ func (vals *ValidatorSet) StringIndentedBasic(indent string) string { return "nil-ValidatorSet" } var valStrings []string - vals.Iterate(func(index int, val *Validator) bool { + vals.Iterate(func(_index int, val *Validator) bool { valStrings = append(valStrings, val.ShortStringBasic()) return false }) diff --git a/types/vote.go b/types/vote.go index bf6b19c205..ea844a1a46 100644 --- a/types/vote.go +++ b/types/vote.go @@ -11,7 +11,6 @@ import ( "github.com/dashpay/tenderdash/crypto" "github.com/dashpay/tenderdash/crypto/bls12381" - "github.com/dashpay/tenderdash/internal/libs/protoio" tmbytes "github.com/dashpay/tenderdash/libs/bytes" tmcons "github.com/dashpay/tenderdash/proto/tendermint/consensus" tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" @@ -25,12 +24,6 @@ const ( MaxVoteBytesEd25519 int64 = 209 ) -// VoteExtensionTypes is a list of all possible vote-extension types -var VoteExtensionTypes = []tmproto.VoteExtensionType{ - tmproto.VoteExtensionType_DEFAULT, - tmproto.VoteExtensionType_THRESHOLD_RECOVER, -} - func MaxVoteBytesForKeyType(keyType crypto.KeyType) int64 { switch keyType { case crypto.Ed25519: @@ -42,19 +35,21 @@ func MaxVoteBytesForKeyType(keyType crypto.KeyType) int64 { } var ( - ErrVoteUnexpectedStep = errors.New("unexpected step") - ErrVoteInvalidValidatorIndex = errors.New("invalid validator index") - ErrVoteInvalidValidatorAddress = errors.New("invalid validator address") - ErrVoteInvalidSignature = errors.New("invalid signature") - ErrVoteInvalidBlockHash = errors.New("invalid block hash") - ErrVoteNonDeterministicSignature = errors.New("non-deterministic signature") - ErrVoteNil = errors.New("nil vote") - ErrVoteInvalidExtension = errors.New("invalid vote extension") - ErrVoteInvalidValidatorProTxHash = errors.New("invalid validator pro_tx_hash") - ErrVoteInvalidValidatorPubKeySize = errors.New("invalid validator public key size") - ErrVoteInvalidBlockSignature = errors.New("invalid block signature") - ErrVoteInvalidStateSignature = errors.New("invalid state signature") - ErrVoteStateSignatureShouldBeNil = errors.New("state signature when voting for nil block") + ErrVoteUnexpectedStep = errors.New("unexpected step") + ErrVoteInvalidValidatorIndex = errors.New("invalid validator index") + ErrVoteInvalidValidatorAddress = errors.New("invalid validator address") + ErrVoteInvalidSignature = errors.New("invalid signature") + ErrVoteInvalidBlockHash = errors.New("invalid block hash") + ErrVoteNonDeterministicSignature = errors.New("non-deterministic signature") + ErrVoteNil = errors.New("nil vote") + ErrVoteInvalidExtension = errors.New("invalid vote extension") + ErrVoteExtensionTypeWrongForRequestID = errors.New("provided vote extension type does not support sign request ID") + ErrVoteInvalidValidatorProTxHash = errors.New("invalid validator pro_tx_hash") + ErrVoteInvalidValidatorPubKeySize = errors.New("invalid validator public key size") + ErrVoteMissingValidatorPubKey = errors.New("missing validator public key") + ErrVoteInvalidBlockSignature = errors.New("invalid block signature") + ErrVoteInvalidStateSignature = errors.New("invalid state signature") + ErrVoteStateSignatureShouldBeNil = errors.New("state signature when voting for nil block") ) type ErrVoteConflictingVotes struct { @@ -108,33 +103,14 @@ func VoteFromProto(pv *tmproto.Vote) (*Vote, error) { ValidatorProTxHash: pv.ValidatorProTxHash, ValidatorIndex: pv.ValidatorIndex, BlockSignature: pv.BlockSignature, - VoteExtensions: VoteExtensionsFromProto(pv.VoteExtensions), + VoteExtensions: VoteExtensionsFromProto(pv.VoteExtensions...), }, nil } -// VoteExtensionSignBytes returns the proto-encoding of the canonicalized vote -// extension for signing. Panics if the marshaling fails. -// -// Similar to VoteSignBytes, the encoded Protobuf message is varint -// length-prefixed for backwards-compatibility with the Amino encoding. -func VoteExtensionSignBytes(chainID string, height int64, round int32, ext *tmproto.VoteExtension) []byte { - pb := CanonicalizeVoteExtension(chainID, ext, height, round) - bz, err := protoio.MarshalDelimited(&pb) - if err != nil { - panic(err) - } - return bz -} - -// VoteExtensionRequestID returns vote extension request ID -func VoteExtensionRequestID(height int64, round int32) []byte { - return heightRoundRequestID("dpevote", height, round) -} - // VoteBlockSignID returns signID that should be signed for the block func VoteBlockSignID(chainID string, vote *tmproto.Vote, quorumType btcjson.LLMQType, quorumHash []byte) []byte { signID := MakeBlockSignItem(chainID, vote, quorumType, quorumHash) - return signID.ID + return signID.SignHash } // Copy creates a deep copy of the vote @@ -197,11 +173,9 @@ func (vote *Vote) String() string { ) } -// VerifyVoteAndExtension performs the same verification as Verify, but -// additionally checks whether the vote extension signature corresponds to the -// given chain ID and public key. We only verify vote extension signatures for -// precommits. -func (vote *Vote) VerifyWithExtension( +// Verify performs vote signature verification. It checks whether the block signature +// and vote extensions signatures correspond to the given chain ID and public key. +func (vote *Vote) Verify( chainID string, quorumType btcjson.LLMQType, quorumHash crypto.QuorumHash, @@ -216,37 +190,27 @@ func (vote *Vote) VerifyWithExtension( if err != nil { return err } - return vote.verifySign(pubKey, quorumSignData, WithVerifyExtensions(vote.Type == tmproto.PrecommitType)) -} - -func (vote *Vote) Verify( - chainID string, - quorumType btcjson.LLMQType, - quorumHash []byte, - pubKey crypto.PubKey, - proTxHash crypto.ProTxHash, - stateID tmproto.StateID, -) error { - err := vote.verifyBasic(proTxHash, pubKey) - if err != nil { - return err - } - quorumSignData, err := MakeQuorumSigns(chainID, quorumType, quorumHash, vote.ToProto()) - if err != nil { - return err - } - return vote.verifySign(pubKey, quorumSignData, WithVerifyExtensions(false)) + return quorumSignData.Verify(pubKey, vote.makeQuorumSigns()) } func (vote *Vote) verifyBasic(proTxHash ProTxHash, pubKey crypto.PubKey) error { if !bytes.Equal(proTxHash, vote.ValidatorProTxHash) { return ErrVoteInvalidValidatorProTxHash } + + if pubKey == nil { + return ErrVoteMissingValidatorPubKey + } + if len(pubKey.Bytes()) != bls12381.PubKeySize { return ErrVoteInvalidValidatorPubKeySize } + if vote.Type != tmproto.PrecommitType && vote.VoteExtensions.Len() > 0 { + return ErrVoteInvalidExtension + } + return nil } @@ -260,29 +224,16 @@ func (vote *Vote) VerifyExtensionSign(chainID string, pubKey crypto.PubKey, quor if err != nil { return err } - verifier := NewQuorumSignsVerifier( - quorumSignData, - WithVerifyBlock(false), - ) - return verifier.Verify(pubKey, vote.makeQuorumSigns()) -} -func (vote *Vote) verifySign( - pubKey crypto.PubKey, - quorumSignData QuorumSignData, - opts ...func(verifier *QuorumSingsVerifier), -) error { - verifier := NewQuorumSignsVerifier( - quorumSignData, - opts..., - ) - return verifier.Verify(pubKey, vote.makeQuorumSigns()) + return quorumSignData.VerifyVoteExtensions(pubKey, vote.makeQuorumSigns()) } func (vote *Vote) makeQuorumSigns() QuorumSigns { return QuorumSigns{ - BlockSign: vote.BlockSignature, - ExtensionSigns: MakeThresholdExtensionSigns(vote.VoteExtensions), + BlockSign: vote.BlockSignature, + VoteExtensionSignatures: vote.VoteExtensions.Filter(func(ext VoteExtensionIf) bool { + return ext.IsThresholdRecoverable() + }).GetSignatures(), } } @@ -340,8 +291,7 @@ func (vote *Vote) ValidateBasic() error { } if vote.Type == tmproto.PrecommitType && !vote.BlockID.IsNil() { - err := vote.VoteExtensions.Validate() - if err != nil { + if err := vote.VoteExtensions.Validate(); err != nil { return err } } @@ -403,6 +353,7 @@ func (vote *Vote) MarshalZerologObject(e *zerolog.Event) { e.Str("val_proTxHash", vote.ValidatorProTxHash.ShortString()) e.Int32("val_index", vote.ValidatorIndex) e.Bool("nil", vote.BlockID.IsNil()) + e.Array("extensions", vote.VoteExtensions) } func (vote *Vote) HasVoteMessage() *tmcons.HasVote { diff --git a/types/vote_dash.go b/types/vote_dash.go index aae8f430b8..f2a5b8fb3d 100644 --- a/types/vote_dash.go +++ b/types/vote_dash.go @@ -5,23 +5,11 @@ import tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" // PopulateSignsFromProto updates the signatures of the current Vote with values are taken from the Vote's protobuf func (vote *Vote) PopulateSignsFromProto(pv *tmproto.Vote) error { vote.BlockSignature = pv.BlockSignature - return vote.VoteExtensions.CopySignsFromProto(pv.VoteExtensionsToMap()) + return vote.VoteExtensions.CopySignsFromProto(pv.VoteExtensions) } // PopulateSignsToProto updates the signatures of the given protobuf Vote entity with values are taken from the current Vote's func (vote *Vote) PopulateSignsToProto(pv *tmproto.Vote) error { pv.BlockSignature = vote.BlockSignature - return vote.VoteExtensions.CopySignsToProto(pv.VoteExtensionsToMap()) -} - -// GetVoteExtensionsSigns returns the list of signatures for given vote-extension type -func (vote *Vote) GetVoteExtensionsSigns(extType tmproto.VoteExtensionType) [][]byte { - if vote.VoteExtensions == nil { - return nil - } - sigs := make([][]byte, len(vote.VoteExtensions[extType])) - for i, ext := range vote.VoteExtensions[extType] { - sigs[i] = ext.Signature - } - return sigs + return vote.VoteExtensions.CopySignsToProto(pv.VoteExtensions) } diff --git a/types/vote_extension.go b/types/vote_extension.go index dcce659a33..22e7a49bb8 100644 --- a/types/vote_extension.go +++ b/types/vote_extension.go @@ -2,206 +2,494 @@ package types import ( "bytes" + "crypto/sha256" "errors" "fmt" + "math/big" + + "github.com/dashpay/dashd-go/btcjson" + "github.com/hashicorp/go-multierror" + "github.com/rs/zerolog" abci "github.com/dashpay/tenderdash/abci/types" + "github.com/dashpay/tenderdash/crypto" + "github.com/dashpay/tenderdash/crypto/bls12381" + "github.com/dashpay/tenderdash/internal/libs/protoio" tmbytes "github.com/dashpay/tenderdash/libs/bytes" tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" ) var ( - errExtensionSignEmpty = errors.New("vote extension signature is missing") - errExtensionSignTooBig = fmt.Errorf("vote extension signature is too big (max: %d)", SignatureSize) - errUnableCopySigns = errors.New("unable copy signatures the sizes of extensions are not equal") + errUnableCopySigns = errors.New("unable to copy signatures: the sizes of extensions are not equal") ) // VoteExtensions is a container where the key is vote-extension type and value is a list of VoteExtension -type VoteExtensions map[tmproto.VoteExtensionType][]VoteExtension +type VoteExtensions []VoteExtensionIf // NewVoteExtensionsFromABCIExtended returns vote-extensions container for given ExtendVoteExtension func NewVoteExtensionsFromABCIExtended(exts []*abci.ExtendVoteExtension) VoteExtensions { - voteExtensions := make(VoteExtensions) + voteExtensions := make(VoteExtensions, 0, len(exts)) + for _, ext := range exts { - voteExtensions.Add(ext.Type, ext.Extension) + ve := ext.ToVoteExtension() + voteExtensions.Add(ve) } return voteExtensions } -// Add creates and adds VoteExtension into a container by vote-extension type -func (e VoteExtensions) Add(t tmproto.VoteExtensionType, ext []byte) { - e[t] = append(e[t], VoteExtension{Extension: ext}) +// Add creates and adds protobuf VoteExtension into a container by vote-extension type +func (e *VoteExtensions) Add(ext tmproto.VoteExtension) { + *e = append(*e, VoteExtensionFromProto(ext)) +} + +// MakeVoteExtensionSignItems creates a list SignItem structs for a vote extensions +func (e VoteExtensions) SignItems( + chainID string, + quorumType btcjson.LLMQType, + quorumHash []byte, + height int64, + round int32, +) ([]SignItem, error) { + + items := make([]SignItem, 0, e.Len()) + + for _, ext := range e { + item, err := ext.SignItem(chainID, height, round, quorumType, quorumHash) + if err != nil { + return nil, err + } + + items = append(items, item) + } + + return items, nil +} + +func (e VoteExtensions) GetSignatures() [][]byte { + signatures := make([][]byte, 0, e.Len()) + + for _, ext := range e { + signatures = append(signatures, ext.GetSignature()) + } + + return signatures +} + +func (e VoteExtensions) GetExtensions() [][]byte { + exts := make([][]byte, 0, e.Len()) + + for _, ext := range e { + exts = append(exts, ext.GetExtension()) + } + + return exts } // Validate returns error if an added vote-extension is invalid func (e VoteExtensions) Validate() error { - for _, et := range VoteExtensionTypes { - for _, ext := range e[et] { - err := ext.Validate() - if err != nil { - return err - } + var errs error + + for i, ext := range e { + if err := ext.Validate(); err != nil { + errs = multierror.Append(errs, fmt.Errorf("invalid %s vote extension %d: %w", ext.ToProto().Type, i, err)) } } - return nil + + return errs } // IsEmpty returns true if a vote-extension container is empty, otherwise false func (e VoteExtensions) IsEmpty() bool { - for _, exts := range e { - if len(exts) > 0 { - return false - } - } - return true + return len(e) == 0 } // ToProto transforms the current state of vote-extension container into VoteExtensions's protobuf func (e VoteExtensions) ToProto() []*tmproto.VoteExtension { - extensions := make([]*tmproto.VoteExtension, 0, e.totalCount()) - for _, t := range VoteExtensionTypes { - for _, ext := range e[t] { - extensions = append(extensions, &tmproto.VoteExtension{ - Type: t, - Extension: ext.Extension, - Signature: ext.Signature, - }) - } + extensions := make([]*tmproto.VoteExtension, 0, e.Len()) + for _, ext := range e { + pbExt := ext.ToProto() + extensions = append(extensions, &pbExt) } + return extensions } // ToExtendProto transforms the current state of vote-extension container into ExtendVoteExtension's protobuf func (e VoteExtensions) ToExtendProto() []*abci.ExtendVoteExtension { - proto := make([]*abci.ExtendVoteExtension, 0, e.totalCount()) - for _, et := range VoteExtensionTypes { - for _, ext := range e[et] { - proto = append(proto, &abci.ExtendVoteExtension{ - Type: et, - Extension: ext.Extension, - }) + proto := make([]*abci.ExtendVoteExtension, 0, e.Len()) + + for _, ext := range e { + pb := ext.ToProto() + eve := &abci.ExtendVoteExtension{ + Type: pb.Type, + Extension: pb.Extension, } + + if pb.XSignRequestId != nil { + if src := pb.GetSignRequestId(); len(src) > 0 { + eve.XSignRequestId = &abci.ExtendVoteExtension_SignRequestId{ + SignRequestId: bytes.Clone(src), + } + } + } + + proto = append(proto, eve) } + return proto } // Fingerprint returns a fingerprint of all vote-extensions in a state of this container func (e VoteExtensions) Fingerprint() []byte { - cnt := 0 - for _, v := range e { - cnt += len(v) - } - l := make([][]byte, 0, cnt) - for _, et := range VoteExtensionTypes { - for _, ext := range e[et] { - l = append(l, ext.Extension) + if e.IsEmpty() { + return tmbytes.Fingerprint(nil) + } + sha := sha256.New() + for _, ext := range e { + pb := ext.ToProto() + // type + extension + if _, err := sha.Write(big.NewInt(int64(pb.Type)).Bytes()); err != nil { + panic(err) + } + if _, err := sha.Write(pb.Extension); err != nil { + panic(err) } } - return tmbytes.Fingerprint(bytes.Join(l, nil)) + return tmbytes.Fingerprint(sha.Sum(nil)) } // IsSameWithProto compares the current state of the vote-extension with the same in VoteExtensions's protobuf // checks only the value of extensions -func (e VoteExtensions) IsSameWithProto(proto tmproto.VoteExtensions) bool { - for t, extensions := range e { - if len(proto[t]) != len(extensions) { +func (e VoteExtensions) IsSameWithProto(right tmproto.VoteExtensions) bool { + if len(e) != len(right) { + return false + } + + for t, ext := range e { + pb := ext.ToProto() + other := right[t] + if !pb.Equal(other) { return false } - for i, ext := range extensions { - if !bytes.Equal(ext.Extension, proto[t][i].Extension) { - return false - } - } } return true } -func (e VoteExtensions) totalCount() int { - cnt := 0 - for _, exts := range e { - cnt += len(exts) +func (e VoteExtensions) Len() int { + return len(e) +} + +// VoteExtensionsFromProto creates VoteExtensions container from VoteExtensions's protobuf +func VoteExtensionsFromProto(pve ...*tmproto.VoteExtension) VoteExtensions { + if len(pve) == 0 { + return nil + } + voteExtensions := make(VoteExtensions, 0, len(pve)) + for _, ext := range pve { + voteExtensions = append(voteExtensions, VoteExtensionFromProto(*ext)) } - return cnt + + return voteExtensions } -// VoteExtension represents a vote extension data, with possible types: default or threshold recover -type VoteExtension struct { - Extension []byte `json:"extension"` - Signature tmbytes.HexBytes `json:"signature"` +// Copy creates a deep copy of VoteExtensions +func (e VoteExtensions) Copy() VoteExtensions { + if e == nil || e.IsEmpty() { + return nil + } + + copied := make(VoteExtensions, 0, len(e)) + for _, ext := range e { + copied = append(copied, ext.Copy()) + } + + return copied } -// Validate ... -func (v *VoteExtension) Validate() error { - if len(v.Extension) > 0 && len(v.Signature) == 0 { - return errExtensionSignEmpty +// Filter returns a new VoteExtensions container with vote-extensions filtered by provided function. +// It does not copy data, just creates a new container with references to the same data +func (e VoteExtensions) Filter(fn func(ext VoteExtensionIf) bool) VoteExtensions { + result := make(VoteExtensions, 0, len(e)) + for _, ext := range e { + if fn(ext) { + result = append(result, ext) + } } - if len(v.Signature) > SignatureSize { - return errExtensionSignTooBig + + return result[:] +} + +// CopySignsFromProto copies the signatures from VoteExtensions's protobuf into the current VoteExtension state +func (e VoteExtensions) CopySignsFromProto(src tmproto.VoteExtensions) error { + if len(e) != len(src) { + return errUnableCopySigns + } + + for i, ext := range e { + ext.SetSignature(src[i].Signature) } + return nil } -// Clone returns a copy of current vote-extension -func (v *VoteExtension) Clone() VoteExtension { - return VoteExtension{ - Extension: v.Extension, - Signature: v.Signature, +func (e VoteExtensions) SetSignatures(src [][]byte) error { + if len(e) != len(src) { + return errUnableCopySigns } + + for i, ext := range e { + ext.SetSignature(src[i]) + } + + return nil } -// VoteExtensionsFromProto creates VoteExtensions container from VoteExtensions's protobuf -func VoteExtensionsFromProto(pve []*tmproto.VoteExtension) VoteExtensions { - if pve == nil { +// CopySignsToProto copies the signatures from the current VoteExtensions into VoteExtension's protobuf +func (e VoteExtensions) CopySignsToProto(dest tmproto.VoteExtensions) error { + if len(e) != len(dest) { + return errUnableCopySigns + } + for i, ext := range e { + pb := ext.ToProto() + dest[i].Signature = pb.Signature + } + + return nil +} + +// Marshal VoteExtensions as zerolog array +func (e VoteExtensions) MarshalZerologArray(a *zerolog.Array) { + for _, ext := range e { + a.Object(ext) + } +} + +type VoteExtensionIf interface { + // Return type of this vote extension + GetType() tmproto.VoteExtensionType + // Return extension bytes + GetExtension() []byte + // Return signature bytes + GetSignature() []byte + // Copy creates a deep copy of VoteExtension + Copy() VoteExtensionIf + // ToProto transforms the current state of vote-extension into VoteExtension's proto-generated object. + // It should prioritize performance and can do a shallow copy of the vote-extension, + // so the returned object should not be modified. + ToProto() tmproto.VoteExtension + SignItem(chainID string, height int64, round int32, quorumType btcjson.LLMQType, quorumHash []byte) (SignItem, error) + IsThresholdRecoverable() bool + // Validate returns error if a vote-extension is invalid. + // It should not modify the state of the vote-extension. + Validate() error + + SetSignature(sig []byte) + + zerolog.LogObjectMarshaler +} + +type ThresholdVoteExtensionIf interface { + VoteExtensionIf + + AddThresholdSignature(validator ProTxHash, sig []byte) error + // Recover threshold signature from collected signatures + // + // Returns recovered signature or error. VoteExtension, including any signature already set, is not modified. + ThresholdRecover() ([]byte, error) +} + +func VoteExtensionFromProto(ve tmproto.VoteExtension) VoteExtensionIf { + switch ve.Type { + case tmproto.VoteExtensionType_DEFAULT: + return &GenericVoteExtension{VoteExtension: ve} + case tmproto.VoteExtensionType_THRESHOLD_RECOVER: + ext := newThresholdVoteExtension(ve) + return &ext + case tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW: + ext := newThresholdVoteExtension(ve) + return &ThresholdRawVoteExtension{ThresholdVoteExtension: ext} + default: + panic(fmt.Errorf("unknown vote extension type: %s", ve.Type.String())) + } +} + +func newThresholdVoteExtension(ve tmproto.VoteExtension) ThresholdVoteExtension { + return ThresholdVoteExtension{GenericVoteExtension: GenericVoteExtension{VoteExtension: ve}} +} + +// VOTE EXTENSION TYPES + +// GenericVoteExtension is a default type of VoteExtension +type GenericVoteExtension struct { + tmproto.VoteExtension +} + +func (e GenericVoteExtension) Copy() VoteExtensionIf { + return &GenericVoteExtension{VoteExtension: e.VoteExtension.Copy()} +} + +func (e GenericVoteExtension) ToProto() tmproto.VoteExtension { + return e.VoteExtension.Clone() +} + +func (e GenericVoteExtension) SignItem(chainID string, height int64, round int32, quorumType btcjson.LLMQType, quorumHash []byte) (SignItem, error) { + requestID, err := voteExtensionRequestID(height, round) + if err != nil { + return SignItem{}, err + } + canonical, err := CanonicalizeVoteExtension(chainID, &e.VoteExtension, height, round) + if err != nil { + panic(err) + } + + signBytes, err := protoio.MarshalDelimited(&canonical) + if err != nil { + panic(err) + } + + si := NewSignItem(quorumType, quorumHash, requestID, signBytes) + // we do not reverse fields when calculating SignHash for vote extensions + // si.UpdateSignHash(false) + return si, nil +} + +func (e GenericVoteExtension) IsThresholdRecoverable() bool { + return false +} + +func (e *GenericVoteExtension) SetSignature(sig []byte) { + e.Signature = sig +} + +func (e GenericVoteExtension) MarshalZerologObject(o *zerolog.Event) { + o.Str("type", e.GetType().String()) + o.Hex("extension", e.GetExtension()) + o.Hex("signature", e.GetSignature()) + o.Hex("sign_request_id", e.GetSignRequestId()) +} + +//nolint:stylecheck // name is the same as in protobuf-generated code +func (e GenericVoteExtension) GetSignRequestId() []byte { + if e.XSignRequestId == nil { return nil } - voteExtensions := make(VoteExtensions) - for _, ext := range pve { - voteExtensions[ext.Type] = append(voteExtensions[ext.Type], VoteExtension{ - Extension: ext.Extension, - Signature: ext.Signature, - }) + id, ok := e.XSignRequestId.(*tmproto.VoteExtension_SignRequestId) + if !ok || id == nil { + return nil } - return voteExtensions + + return id.SignRequestId } -// Copy creates a deep copy of VoteExtensions -func (e VoteExtensions) Copy() VoteExtensions { - copied := make(VoteExtensions, len(e)) - for extType, extensions := range e { - copied[extType] = make([]VoteExtension, len(extensions)) - for k, v := range extensions { - copied[extType][k] = v.Clone() - } +type ThresholdSignature [bls12381.SignatureSize]byte + +// ThresholdVoteExtension is a threshold type of VoteExtension +type ThresholdVoteExtension struct { + GenericVoteExtension + // threshold signatures for this vote extension, collected from validators + thresholdSignatures map[[crypto.ProTxHashSize]byte]ThresholdSignature +} + +func (e ThresholdVoteExtension) Copy() VoteExtensionIf { + return &ThresholdVoteExtension{GenericVoteExtension: GenericVoteExtension{ + VoteExtension: e.VoteExtension.Copy(), + }, } +} - return copied +func (e ThresholdVoteExtension) IsThresholdRecoverable() bool { + return true } -// CopySignsFromProto copies the signatures from VoteExtensions's protobuf into the current VoteExtension state -func (e VoteExtensions) CopySignsFromProto(src tmproto.VoteExtensions) error { - return e.copySigns(src, func(a *tmproto.VoteExtension, b *VoteExtension) { - b.Signature = a.Signature - }) +func (e *ThresholdVoteExtension) AddThresholdSignature(validator ProTxHash, sig []byte) error { + if e.thresholdSignatures == nil { + e.thresholdSignatures = make(map[[crypto.ProTxHashSize]byte]ThresholdSignature) + } + + proTxHash := [crypto.ProTxHashSize]byte(validator) + e.thresholdSignatures[proTxHash] = ThresholdSignature(sig) + + return nil } -// CopySignsToProto copies the signatures from the current VoteExtensions into VoteExtension's protobuf -func (e VoteExtensions) CopySignsToProto(dist tmproto.VoteExtensions) error { - return e.copySigns(dist, func(a *tmproto.VoteExtension, b *VoteExtension) { - a.Signature = b.Signature - }) -} - -func (e VoteExtensions) copySigns( - protoMap tmproto.VoteExtensions, - modifier func(a *tmproto.VoteExtension, b *VoteExtension), -) error { - for t, exts := range e { - if len(exts) != len(protoMap[t]) { - return errUnableCopySigns +// ThresholdRecover recovers threshold signature from collected signatures +func (e *ThresholdVoteExtension) ThresholdRecover() ([]byte, error) { + proTxHashes := make([][]byte, 0, len(e.thresholdSignatures)) + signatures := make([][]byte, 0, len(e.thresholdSignatures)) + + // collect signatures and proTxHashes + for proTxHash, signature := range e.thresholdSignatures { + if len(signature) != bls12381.SignatureSize { + return nil, fmt.Errorf("invalid vote extension signature len from validator %s: got %d, expected %d", + proTxHash, len(signature), bls12381.SignatureSize) } - for i := range exts { - modifier(protoMap[t][i], &exts[i]) + + proTxHashes = append(proTxHashes, bytes.Clone(proTxHash[:])) + signatures = append(signatures, bytes.Clone(signature[:])) + } + + if len(signatures) > 0 { + thresholdSignature, err := bls12381.RecoverThresholdSignatureFromShares(signatures, proTxHashes) + if err != nil { + return nil, fmt.Errorf("error recovering vote extension %s %X threshold signature: %w", + e.GetType().String(), e.GetExtension(), err) } + + return thresholdSignature, nil } - return nil + + return nil, fmt.Errorf("vote extension %s of type %X does not have any signatures for threshold-recovering", + e.GetType().String(), e.GetExtension()) +} + +// ThresholdRawVoteExtension is a threshold raw type of VoteExtension +type ThresholdRawVoteExtension struct { + ThresholdVoteExtension +} + +func (e ThresholdRawVoteExtension) Copy() VoteExtensionIf { + inner := e.ThresholdVoteExtension.Copy().(*ThresholdVoteExtension) + return &ThresholdRawVoteExtension{ThresholdVoteExtension: *inner} +} + +// SignItem creates a SignItem for a threshold raw vote extension +// +// Note: signItem.Msg left empty by purpose, as we don't want hash to be checked in Verify() +func (e ThresholdRawVoteExtension) SignItem(_ string, height int64, round int32, quorumType btcjson.LLMQType, quorumHash []byte) (SignItem, error) { + var signRequestID []byte + var err error + + ext := &e.VoteExtension + + if ext.XSignRequestId != nil && ext.XSignRequestId.Size() > 0 { + receivedReqID := ext.GetSignRequestId() + signRequestID = crypto.Checksum(crypto.Checksum(receivedReqID)) // reverse ext.GetSignRequestId()? + signRequestID = tmbytes.Reverse(signRequestID) + } else { + if signRequestID, err = voteExtensionRequestID(height, round); err != nil { + return SignItem{}, err + } + } + + // ensure Extension is 32 bytes long + if len(ext.Extension) != crypto.DefaultHashSize { + return SignItem{}, fmt.Errorf("invalid vote extension %s %X: extension must be %d bytes long", + ext.Type.String(), ext.Extension, crypto.DefaultHashSize) + } + + // We sign extension as it is, without any hashing, etc. + // However, as it is reversed in SignItem.UpdateSignHash, we need to reverse it also here to undo + // that reversal. + msgHash := tmbytes.Reverse(ext.Extension) + + signItem := NewSignItemFromHash(quorumType, quorumHash, signRequestID, msgHash) + // signItem.Msg left empty by purpose, as we don't want hash to be checked in Verify() + + return signItem, nil +} + +// voteExtensionRequestID returns vote extension sign request ID used to generate +// threshold signatures +func voteExtensionRequestID(height int64, round int32) ([]byte, error) { + return heightRoundRequestID("dpevote", height, round), nil } diff --git a/types/vote_extension_test.go b/types/vote_extension_test.go new file mode 100644 index 0000000000..0bf6cdb779 --- /dev/null +++ b/types/vote_extension_test.go @@ -0,0 +1,201 @@ +package types + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "fmt" + "testing" + + "github.com/dashpay/dashd-go/btcjson" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/dashpay/tenderdash/crypto/bls12381" + tmbytes "github.com/dashpay/tenderdash/libs/bytes" + "github.com/dashpay/tenderdash/libs/log" + tmproto "github.com/dashpay/tenderdash/proto/tendermint/types" +) + +func TestVoteExtensionCopySignsFromProto(t *testing.T) { + src := tmproto.VoteExtensions{ + &tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("threshold"), + Signature: []byte("signature"), + }, + } + + dst := VoteExtensions{&ThresholdVoteExtension{}} + err := dst.CopySignsFromProto(src) + require.NoError(t, err) + assert.EqualValues(t, src[0].GetSignature(), dst[0].GetSignature()) +} + +func TestMakeVoteExtensionsSignItems(t *testing.T) { + const chainID = "dash-platform" + const quorumType = btcjson.LLMQType_5_60 + + logger := log.NewTestingLogger(t) + testCases := []struct { + vote Vote + quorumHash []byte + want []SignItem + wantHash [][]byte + }{ + { + vote: Vote{ + Type: tmproto.PrecommitType, + Height: 1001, + ValidatorProTxHash: tmbytes.MustHexDecode("9CC13F685BC3EA0FCA99B87F42ABCC934C6305AA47F62A32266A2B9D55306B7B"), + VoteExtensions: VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_DEFAULT, + Extension: []byte("default")}, + &tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("threshold")}, + ), + }, + quorumHash: tmbytes.MustHexDecode("6A12D9CF7091D69072E254B297AEF15997093E480FDE295E09A7DE73B31CEEDD"), + want: []SignItem{ + newSignItem( + "FB95F2CA6530F02AC623589D7938643FF22AE79A75DD79AEA1C8871162DE675E", + "533524404D3A905F5AC9A30FCEB5A922EAD96F30DA02F979EE41C4342F540467", + "210A0764656661756C7411E903000000000000220D646173682D706C6174666F726D", + "6A12D9CF7091D69072E254B297AEF15997093E480FDE295E09A7DE73B31CEEDD", + quorumType, + ), + newSignItem( + "fb95f2ca6530f02ac623589d7938643ff22ae79a75dd79aea1c8871162de675e", + "D3B7D53A0F9CA8072D47D6C18E782EE3155EF8DCDDB010087030B6CBC63978BC", + "250a097468726573686f6c6411e903000000000000220d646173682d706c6174666f726d2801", + "6A12D9CF7091D69072E254B297AEF15997093E480FDE295E09A7DE73B31CEEDD", + quorumType, + ), + }, + wantHash: [][]byte{ + tmbytes.MustHexDecode("61519D79DE4C4D5AC5DD210C1BCE81AA24F76DD5581A24970E60112890C68FB7"), + tmbytes.MustHexDecode("46C72C423B74034E1AF574A99091B017C0698FEAA55C8B188BFD512FCADD3143"), + }, + }, + } + for i, tc := range testCases { + t.Run(fmt.Sprintf("test-case #%d", i), func(t *testing.T) { + signItems, err := tc.vote.VoteExtensions.SignItems(chainID, quorumType, tc.quorumHash, tc.vote.Height, tc.vote.Round) + + require.NoError(t, err) + + for i, sign := range signItems { + assert.Equal(t, tc.wantHash[i], sign.MsgHash, "want %X, actual %X", tc.wantHash[i], sign.MsgHash) + if !assert.Equal(t, tc.want[i], sign, "Got ID(%d): %X", i, sign.SignHash) { + logger.Error("invalid sign", "sign", sign, "i", i) + } + } + }) + } +} + +// Test vectors of THRESHOLD_RECOVER_RAW vote extensions hashing, as used by Dash Platform withdrawals mechanism. +// +// Given some vote extension, llmq type, quorum hash and sign request id, sign data should match predefined test vector. +func TestVoteExtensionsRaw_SignDataRawVector_Withdrawals(t *testing.T) { + const chainID = "some-chain" // unused but required for VoteExtension.SignItem + const llmqType = btcjson.LLMQType_TEST_PLATFORM + + testCases := []struct { + llmqType btcjson.LLMQType + quorumHash []byte + requestID []byte + extension []byte + + // optional + quorumSig []byte + quorumPubKey []byte + + expectedMsgHash []byte + expectedSignHash []byte + expectedRequestID []byte + }{ + { // llmqType:106 + // quorumHash:53c006055af6d0ae9aa9627df8615a71c312421a28c4712c8add83c8e1bfdadd + // requestID:922a8fc39b6e265ca761eaaf863387a5e2019f4795a42260805f5562699fd9fa + // messageHash:2a3b788b83a8a3877d618874c0987ce62b43762ea18362cd336f4a79402d25c0 + // ==== signHash:9753911839e0a8304626b95ada276b55a3785bca657294a153bd5d66301756b7 + llmqType: btcjson.LLMQType_TEST_PLATFORM, + + quorumHash: tmbytes.MustHexDecode("53c006055af6d0ae9aa9627df8615a71c312421a28c4712c8add83c8e1bfdadd"), + requestID: binary.LittleEndian.AppendUint64([]byte("\x06plwdtx"), 0), + extension: []byte{192, 37, 45, 64, 121, 74, 111, 51, 205, 98, 131, 161, 46, 118, 67, 43, 230, 124, 152, 192, 116, 136, 97, 125, 135, 163, 168, 131, 139, 120, 59, 42}, + expectedSignHash: tmbytes.Reverse(tmbytes.MustHexDecode("9753911839e0a8304626b95ada276b55a3785bca657294a153bd5d66301756b7")), + expectedRequestID: tmbytes.MustHexDecode("922a8fc39b6e265ca761eaaf863387a5e2019f4795a42260805f5562699fd9fa"), + }, + { // test that requestID is correct for index 102 + quorumHash: tmbytes.MustHexDecode("53c006055af6d0ae9aa9627df8615a71c312421a28c4712c8add83c8e1bfdadd"), + requestID: binary.LittleEndian.AppendUint64([]byte("\x06plwdtx"), 102), + extension: []byte{192, 37, 45, 64, 121, 74, 111, 51, 205, 98, 131, 161, 46, 118, 67, 43, 230, 124, 152, 192, 116, 136, 97, 125, 135, 163, 168, 131, 139, 120, 59, 42}, + expectedRequestID: tmbytes.MustHexDecode("7a1b17a4542f4748c6b91bd46c7daa4f26f77f67cd2d9d405c8d956c77a44764"), + }, + { + // tx = 03000900000190cff4050000000023210375aae0756e8115ea064b46705c7b0a8ffad3d79688d910ef0337239fc + // 1b3760dac000000009101650000000000000070110100db04000031707c372e1a75dab8455659a2c7757842aa80 + // 998eae146847f1372447a5d02585fe5c9e8f7985ac27d41b1ca654e783bd7eaab484882ceae3cb3511acefac0e8 + // 875b691813ec26101c3384a6e506be9133ba977ae12be89ffa1a1105968fc01c7de4e02ac8c689989a5677ceb2e + // 284a57d57f7885c40658096d7c6294f9fa7a + + llmqType: btcjson.LLMQType_TEST, + quorumHash: tmbytes.MustHexDecode("25d0a5472437f1476814ae8e9980aa427875c7a2595645b8da751a2e377c7031"), + requestID: binary.LittleEndian.AppendUint64([]byte("\x06plwdtx"), 101), + extension: tmbytes.MustHexDecode("68CA7F464880F4040AF87DBE79F725C74398C3A2001700D7A0FEDB9417FD622F"), + + // TODO: Uncomment when we have public key + // quorumSig: tmbytes.MustHexDecode("85fe5c9e8f7985ac27d41b1ca654e783bd7eaab484882ceae3cb3511acefac0e8875b691813ec26101c3384a6e506be9133ba977ae12be89ffa1a1105968fc01c7de4e02ac8c689989a5677ceb2e284a57d57f7885c40658096d7c6294f9fa7a"), + // quorumPubKey: tmbytes.MustHexDecode("210375aae0756e8115ea064b46705c7b0a8ffad3d79688d910ef0337239fc1b3760dac"), + + // expectedSignHash: tmbytes.Reverse(tmbytes.MustHexDecode("9753911839e0a8304626b95ada276b55a3785bca657294a153bd5d66301756b7")), + expectedMsgHash: tmbytes.Reverse(tmbytes.MustHexDecode("68ca7f464880f4040af87dbe79f725c74398c3a2001700d7a0fedb9417fd622f")), + expectedRequestID: tmbytes.MustHexDecode("fcc76a643c5c668244fdcef09833955d6f4b803fa6c459f7732983c2332389fd"), + }, + } + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + + ve := tmproto.VoteExtension{ + Extension: tc.extension, + Signature: []byte{}, + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, + XSignRequestId: &tmproto.VoteExtension_SignRequestId{ + SignRequestId: bytes.Clone(tc.requestID), + }, + } + voteExtension := VoteExtensionFromProto(ve) + signItem, err := voteExtension.SignItem(chainID, 1, 0, llmqType, tc.quorumHash) + require.NoError(t, err) + + // t.Logf("LLMQ type: %s (%d)\n", llmqType.Name(), llmqType) + // t.Logf("extension: %X\n", extension) + // t.Logf("sign requestID: %X\n", requestID) + // t.Logf("quorum hash: %X\n", quorumHash) + + t.Logf("RESULT: sign hash: %X", signItem.SignHash) + if len(tc.expectedSignHash) > 0 { + assert.EqualValues(t, tc.expectedSignHash, signItem.SignHash, "sign hash mismatch") + } + if len(tc.expectedRequestID) > 0 { + t.Logf("requestID: %s", hex.EncodeToString(tc.requestID)) + assert.EqualValues(t, tc.expectedRequestID, signItem.ID, "sign request id mismatch") + } + if len(tc.expectedMsgHash) > 0 { + assert.EqualValues(t, tc.expectedMsgHash, signItem.MsgHash, "msg hash mismatch") + } + + if len(tc.quorumSig) > 0 { + require.Len(t, tc.quorumPubKey, bls12381.PubKeySize) + pubKey := bls12381.PubKey(tc.quorumPubKey) + require.NoError(t, pubKey.Validate(), "invalid public key") + assert.True(t, pubKey.VerifySignatureDigest(signItem.SignHash, tc.quorumSig), "signature verification failed") + } + }) + } + +} diff --git a/types/vote_set.go b/types/vote_set.go index 103e86dcab..bf4520cb0d 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -70,8 +70,8 @@ type VoteSet struct { peerMaj23s map[string]maj23Info // Maj23 for each peer // dash fields - thresholdBlockSig []byte // If a 2/3 majority is seen, recover the block sig - thresholdVoteExtSigs []ThresholdExtensionSign // If a 2/3 majority is seen, recover the vote extension sigs + thresholdBlockSig []byte // If a 2/3 majority is seen, recover the block sig + thresholdVoteExtSigs VoteExtensions // If a 2/3 majority is seen, recover the vote extension sigs } type maj23Info struct { @@ -216,7 +216,7 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { } // Check signature. - err = vote.VerifyWithExtension( + err = vote.Verify( voteSet.chainID, voteSet.valSet.QuorumType, voteSet.valSet.QuorumHash, @@ -341,6 +341,8 @@ func (voteSet *VoteSet) addVerifiedVote( return true, conflicting } +// recoverThresholdSignsAndVerify recovers threshold signatures and verifies them. +// precondition: quorum reached func (voteSet *VoteSet) recoverThresholdSignsAndVerify(blockVotes *blockVotes, quorumDataSigns QuorumSignData) error { if len(blockVotes.votes) == 0 { return nil @@ -349,21 +351,20 @@ func (voteSet *VoteSet) recoverThresholdSignsAndVerify(blockVotes *blockVotes, q // there is only 1 validator vote := blockVotes.votes[0] voteSet.thresholdBlockSig = vote.BlockSignature - voteSet.thresholdVoteExtSigs = MakeThresholdVoteExtensions( - vote.VoteExtensions[tmproto.VoteExtensionType_THRESHOLD_RECOVER], - vote.GetVoteExtensionsSigns(tmproto.VoteExtensionType_THRESHOLD_RECOVER), - ) + voteSet.thresholdVoteExtSigs = vote.VoteExtensions. + Filter(func(ext VoteExtensionIf) bool { + return ext.IsThresholdRecoverable() + }).Copy() return nil } err := voteSet.recoverThresholdSigns(blockVotes) if err != nil { return err } - verifier := NewQuorumSignsVerifier( - quorumDataSigns, - WithVerifyReachedQuorum(voteSet.IsQuorumReached()), - ) - return verifier.Verify(voteSet.valSet.ThresholdPublicKey, voteSet.makeQuorumSigns()) + + sigs := voteSet.makeQuorumSigns() + // we assume quorum is reached + return quorumDataSigns.Verify(voteSet.valSet.ThresholdPublicKey, sigs) } func (voteSet *VoteSet) recoverThresholdSigns(blockVotes *blockVotes) error { @@ -376,8 +377,9 @@ func (voteSet *VoteSet) recoverThresholdSigns(blockVotes *blockVotes) error { if err != nil { return err } + voteSet.thresholdBlockSig = thresholdSigns.BlockSign - voteSet.thresholdVoteExtSigs = thresholdSigns.ExtensionSigns + voteSet.thresholdVoteExtSigs = signsRecoverer.GetVoteExtensions(*thresholdSigns) return nil } @@ -735,6 +737,7 @@ func (voteSet *VoteSet) MakeCommit() *Commit { voteSet.GetHeight(), voteSet.GetRound(), *voteSet.maj23, + voteSet.thresholdVoteExtSigs, voteSet.makeCommitSigns(), ) } @@ -742,8 +745,8 @@ func (voteSet *VoteSet) MakeCommit() *Commit { func (voteSet *VoteSet) makeCommitSigns() *CommitSigns { return &CommitSigns{ QuorumSigns: QuorumSigns{ - BlockSign: voteSet.thresholdBlockSig, - ExtensionSigns: voteSet.thresholdVoteExtSigs, + BlockSign: voteSet.thresholdBlockSig, + VoteExtensionSignatures: voteSet.thresholdVoteExtSigs.GetSignatures(), }, QuorumHash: voteSet.valSet.QuorumHash, } @@ -751,8 +754,8 @@ func (voteSet *VoteSet) makeCommitSigns() *CommitSigns { func (voteSet *VoteSet) makeQuorumSigns() QuorumSigns { return QuorumSigns{ - BlockSign: voteSet.thresholdBlockSig, - ExtensionSigns: voteSet.thresholdVoteExtSigs, + BlockSign: voteSet.thresholdBlockSig, + VoteExtensionSignatures: voteSet.thresholdVoteExtSigs.GetSignatures(), } } diff --git a/types/vote_set_test.go b/types/vote_set_test.go index 6be1275fde..f3a47aaa54 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -598,7 +598,7 @@ func castVote( // NOTE: privValidators are in order func randVoteSet( - ctx context.Context, + _ctx context.Context, t testing.TB, height int64, round int32, diff --git a/types/vote_test.go b/types/vote_test.go index 0c05e9d502..b7d413a373 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -21,7 +21,7 @@ import ( const ( //nolint: lll - preCommitTestStr = `Vote{56789:959A8F5EF2BE 12345/02/Precommit(8B01023386C3) 000000000000 000000000000}` + preCommitTestStr = `Vote{56789:959A8F5EF2BE 12345/02/Precommit(8B01023386C3) 000000000000 03962B14DA9F}` //nolint: lll preVoteTestStr = `Vote{56789:959A8F5EF2BE 12345/02/Prevote(8B01023386C3) 000000000000 000000000000}` ) @@ -39,9 +39,11 @@ func examplePrevote(t *testing.T) *Vote { func examplePrecommit(t testing.TB) *Vote { t.Helper() vote := exampleVote(t, byte(tmproto.PrecommitType)) - vote.VoteExtensions = VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []VoteExtension{{Signature: []byte("signature")}}, - } + vote.VoteExtensions = VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("extension"), + Signature: make([]byte, SignatureSize), + }) return vote } @@ -179,11 +181,9 @@ func TestVoteSignBytesTestVectors(t *testing.T) { // containing vote extension 5: { "test_chain_id", &Vote{ - Height: 1, - Round: 1, - VoteExtensions: VoteExtensions{ - tmproto.VoteExtensionType_DEFAULT: []VoteExtension{{Extension: []byte("extension")}}, - }, + Height: 1, + Round: 1, + VoteExtensions: VoteExtensionsFromProto(&tmproto.VoteExtension{Extension: []byte("extension")}), }, []byte{ 0x0, 0x0, 0x0, 0x0, //type @@ -331,18 +331,40 @@ func TestVoteExtension(t *testing.T) { expectError bool }{ { - name: "all fields present", - extensions: VoteExtensions{ - tmproto.VoteExtensionType_THRESHOLD_RECOVER: []VoteExtension{{Extension: []byte("extension")}}, - }, + name: "valid THRESHOLD_RECOVER", + extensions: VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("extension")}), + includeSignature: true, + expectError: false, + }, + { + name: "valid THRESHOLD_RECOVER_RAW plwdtx", + extensions: VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, + XSignRequestId: &tmproto.VoteExtension_SignRequestId{ + SignRequestId: []byte("\x06plwdtx"), + }, + Extension: bytes.Repeat([]byte("extensio"), 4)}), // must be 32 bytes + includeSignature: true, + expectError: false, + }, + { + name: "valid THRESHOLD_RECOVER_RAW dpevote", + extensions: VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, + XSignRequestId: &tmproto.VoteExtension_SignRequestId{ + SignRequestId: []byte("dpevote"), + }, + Extension: bytes.Repeat([]byte("extensio"), 4)}), // must be 32 bytes includeSignature: true, expectError: false, }, { name: "no extension signature", - extensions: VoteExtensions{ - tmproto.VoteExtensionType_THRESHOLD_RECOVER: []VoteExtension{{Extension: []byte("extension")}}, - }, + extensions: VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("extension")}), includeSignature: false, expectError: true, }, @@ -380,14 +402,12 @@ func TestVoteExtension(t *testing.T) { require.NoError(t, err) vote.BlockSignature = v.BlockSignature if tc.includeSignature { - protoExtensionsMap := v.VoteExtensionsToMap() - for et, extensions := range protoExtensionsMap { - for i, ext := range extensions { - vote.VoteExtensions[et][i].Signature = ext.Signature - } + for i, ext := range v.VoteExtensions { + vote.VoteExtensions[i].SetSignature(ext.Signature) } } - err = vote.VerifyWithExtension("test_chain_id", btcjson.LLMQType_5_60, quorumHash, pk, proTxHash) + + err = vote.Verify("test_chain_id", btcjson.LLMQType_5_60, quorumHash, pk, proTxHash) if tc.expectError { require.Error(t, err) } else { @@ -438,13 +458,13 @@ func TestVoteVerify(t *testing.T) { stateID := RandStateID() stateID.Height = uint64(vote.Height - 1) pubKey := bls12381.GenPrivKey().PubKey() - err = vote.Verify("test_chain_id", quorumType, quorumHash, pubKey, crypto.RandProTxHash(), stateID) + err = vote.Verify("test_chain_id", quorumType, quorumHash, pubKey, crypto.RandProTxHash()) if assert.Error(t, err) { assert.Equal(t, ErrVoteInvalidValidatorProTxHash, err) } - err = vote.Verify("test_chain_id", quorumType, quorumHash, pubkey, proTxHash, stateID) + err = vote.Verify("test_chain_id", quorumType, quorumHash, pubkey, proTxHash) if assert.Error(t, err) { assert.ErrorIs(t, err, ErrVoteInvalidBlockSignature) // since block signatures are verified first } @@ -470,6 +490,7 @@ func TestVoteString(t *testing.T) { vote: func() *Vote { v := examplePrecommit(t) v.BlockID.Hash = nil + v.VoteExtensions = nil return v }(), expectedResult: nilVoteTestStr, @@ -515,7 +536,11 @@ func TestValidVotes(t *testing.T) { { "good precommit with vote extension", examplePrecommit(t), func(v *Vote) { - v.VoteExtensions[tmproto.VoteExtensionType_DEFAULT][0].Extension = []byte("extension") + v.VoteExtensions[0] = VoteExtensionFromProto(tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Extension: []byte("extension"), + Signature: make([]byte, SignatureSize), + }) }, }, } @@ -579,13 +604,13 @@ func TestInvalidPrevotes(t *testing.T) { { "vote extension present", func(v *Vote) { - v.VoteExtensions = VoteExtensions{tmproto.VoteExtensionType_DEFAULT: []VoteExtension{{Extension: []byte("extension")}}} + v.VoteExtensions = VoteExtensionsFromProto(&tmproto.VoteExtension{Extension: []byte("extension")}) }, }, { "vote extension signature present", func(v *Vote) { - v.VoteExtensions = VoteExtensions{tmproto.VoteExtensionType_DEFAULT: []VoteExtension{{Signature: []byte("signature")}}} + v.VoteExtensions = VoteExtensionsFromProto(&tmproto.VoteExtension{Signature: []byte("signature")}) }, }, } @@ -610,9 +635,7 @@ func TestInvalidPrecommitExtensions(t *testing.T) { }{ { "vote extension present without signature", func(v *Vote) { - v.VoteExtensions = VoteExtensions{ - tmproto.VoteExtensionType_THRESHOLD_RECOVER: {{Extension: []byte("extension")}}, - } + v.VoteExtensions = VoteExtensionsFromProto(&tmproto.VoteExtension{Extension: []byte("extension")}) }, }, // TODO(thane): Re-enable once https://github.com/tendermint/tendermint/issues/8272 is resolved @@ -620,9 +643,9 @@ func TestInvalidPrecommitExtensions(t *testing.T) { { "oversized vote extension signature", func(v *Vote) { - v.VoteExtensions = VoteExtensions{ - tmproto.VoteExtensionType_THRESHOLD_RECOVER: []VoteExtension{{Signature: make([]byte, SignatureSize+1)}}, - } + v.VoteExtensions = VoteExtensionsFromProto(&tmproto.VoteExtension{ + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, + Signature: make([]byte, SignatureSize+1)}) }, }, } @@ -648,11 +671,44 @@ func TestVoteExtensionsSignBytes(t *testing.T) { Signature: []byte{}, Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER, } - actual := VoteExtensionSignBytes("some-chain", 1, 2, &ve) + signItem, err := VoteExtensionFromProto(ve).SignItem("some-chain", 1, 2, btcjson.LLMQType_TEST_PLATFORM, crypto.RandQuorumHash()) + assert.NoError(t, err) + + actual := signItem.Msg + t.Logf("sign bytes: %x", actual) assert.EqualValues(t, expect, actual) } +// TestVoteExtensionsSignBytesRaw checks vote extension sign bytes for a raw vote extension type. +// +// Given some vote extension, SignBytes or THRESHOLD_RECOVER_RAW returns that extension. +func TestVoteExtensionsSignBytesRaw(t *testing.T) { + extension := bytes.Repeat([]byte{1, 2, 3, 4, 5, 6, 7, 8}, 4) + quorumHash := bytes.Repeat([]byte{8, 7, 6, 5, 4, 3, 2, 1}, 4) + expectedSignHash := []byte{0xe, 0x88, 0x8d, 0xa8, 0x97, 0xf1, 0xc0, 0xfd, 0x6a, 0xe8, 0x3b, 0x77, 0x9b, 0x5, 0xdd, + 0x28, 0xc, 0xe2, 0x58, 0xf6, 0x4c, 0x86, 0x1, 0x34, 0xfa, 0x4, 0x27, 0xe1, 0xaa, 0xab, 0x1a, 0xde} + + assert.Len(t, extension, 32) + + ve := tmproto.VoteExtension{ + Extension: extension, + Signature: []byte{}, + Type: tmproto.VoteExtensionType_THRESHOLD_RECOVER_RAW, + XSignRequestId: &tmproto.VoteExtension_SignRequestId{ + SignRequestId: []byte("dpevote-someSignRequestID"), + }, + } + + signItem, err := VoteExtensionFromProto(ve).SignItem("some-chain", 1, 2, btcjson.LLMQType_TEST_PLATFORM, quorumHash) + assert.NoError(t, err) + + actual := signItem.SignHash + + t.Logf("sign hash: %x", actual) + assert.EqualValues(t, expectedSignHash, actual) +} + func TestVoteProtobuf(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -675,22 +731,24 @@ func TestVoteProtobuf(t *testing.T) { {"fail vote validate basic", &Vote{}, true, false}, } for _, tc := range testCases { - protoProposal := tc.vote.ToProto() + t.Run(tc.msg, func(t *testing.T) { + protoProposal := tc.vote.ToProto() - v, err := VoteFromProto(protoProposal) - if tc.convertsOk { - require.NoError(t, err) - } else { - require.Error(t, err) - } + v, err := VoteFromProto(protoProposal) + if tc.convertsOk { + require.NoError(t, err) + } else { + require.Error(t, err) + } - err = v.ValidateBasic() - if tc.passesValidateBasic { - require.NoError(t, err) - require.Equal(t, tc.vote, v, tc.msg) - } else { - require.Error(t, err) - } + err = v.ValidateBasic() + if tc.passesValidateBasic { + require.NoError(t, err) + require.Equal(t, tc.vote, v, tc.msg) + } else { + require.Error(t, err) + } + }) } } diff --git a/version/version.go b/version/version.go index 58b28384fb..07e14f7bd6 100644 --- a/version/version.go +++ b/version/version.go @@ -9,9 +9,9 @@ var ( const ( // TMVersionDefault is the used as the fallback version for Tenderdash // when not using git describe. It is formatted with semantic versioning. - TMVersionDefault = "0.13.4" + TMVersionDefault = "1.0.0" // ABCISemVer is the semantic version of the ABCI library - ABCISemVer = "0.23.0" + ABCISemVer = "1.0.0" ABCIVersion = ABCISemVer ) @@ -19,11 +19,11 @@ const ( var ( // P2PProtocol versions all p2p behavior and msgs. // This includes proposer selection. - P2PProtocol uint64 = 8 + P2PProtocol uint64 = 10 // BlockProtocol versions all block data structures and processing. // This includes validity of blocks and state updates. - BlockProtocol uint64 = 13 + BlockProtocol uint64 = 14 ) type Consensus struct {