diff --git a/.github/dependabot.yml b/.github/dependabot.yml index b444581e62d0..3570d9fcf2d6 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -9,3 +9,4 @@ updates: directory: "/" # Location of package manifests schedule: interval: "daily" + open-pull-requests-limit: 0 # Disable non-security version updates diff --git a/.github/workflows/build-deb-pkg.sh b/.github/workflows/build-deb-pkg.sh index fc2f06ed6d36..e01fe6017dcd 100755 --- a/.github/workflows/build-deb-pkg.sh +++ b/.github/workflows/build-deb-pkg.sh @@ -4,34 +4,34 @@ set -euo pipefail DEBIAN_BASE_DIR=$PKG_ROOT/debian AVALANCHE_BUILD_BIN_DIR=$DEBIAN_BASE_DIR/usr/local/bin -TEMPLATE=.github/workflows/debian/template +TEMPLATE=.github/workflows/debian/template DEBIAN_CONF=$DEBIAN_BASE_DIR/DEBIAN -mkdir -p $DEBIAN_BASE_DIR -mkdir -p $DEBIAN_CONF -mkdir -p $AVALANCHE_BUILD_BIN_DIR +mkdir -p "$DEBIAN_BASE_DIR" +mkdir -p "$DEBIAN_CONF" +mkdir -p "$AVALANCHE_BUILD_BIN_DIR" # Assume binaries are at default locations -OK=`cp ./build/avalanchego $AVALANCHE_BUILD_BIN_DIR` +OK=$(cp ./build/avalanchego "$AVALANCHE_BUILD_BIN_DIR") if [[ $OK -ne 0 ]]; then - exit $OK; + exit "$OK"; fi -OK=`cp $TEMPLATE/control $DEBIAN_CONF/control` +OK=$(cp $TEMPLATE/control "$DEBIAN_CONF"/control) if [[ $OK -ne 0 ]]; then - exit $OK; + exit "$OK"; fi echo "Build debian package..." -cd $PKG_ROOT +cd "$PKG_ROOT" echo "Tag: $TAG" VER=$TAG if [[ $TAG =~ ^v ]]; then - VER=$(echo $TAG | tr -d 'v') + VER=$(echo "$TAG" | tr -d 'v') fi NEW_VERSION_STRING="Version: $VER" NEW_ARCH_STRING="Architecture: $ARCH" sed -i "s/Version.*/$NEW_VERSION_STRING/g" debian/DEBIAN/control sed -i "s/Architecture.*/$NEW_ARCH_STRING/g" debian/DEBIAN/control -dpkg-deb --build debian avalanchego-$TAG-$ARCH.deb -aws s3 cp avalanchego-$TAG-$ARCH.deb s3://${BUCKET}/linux/debs/ubuntu/$RELEASE/$ARCH/ +dpkg-deb --build debian "avalanchego-$TAG-$ARCH.deb" +aws s3 cp "avalanchego-$TAG-$ARCH.deb" "s3://${BUCKET}/linux/debs/ubuntu/$RELEASE/$ARCH/" diff --git a/.github/workflows/build-tgz-pkg.sh b/.github/workflows/build-tgz-pkg.sh index c90dfdc9dcab..4d6e11bfbd42 100755 --- a/.github/workflows/build-tgz-pkg.sh +++ b/.github/workflows/build-tgz-pkg.sh @@ -4,16 +4,16 @@ set -euo pipefail AVALANCHE_ROOT=$PKG_ROOT/avalanchego-$TAG -mkdir -p $AVALANCHE_ROOT +mkdir -p "$AVALANCHE_ROOT" -OK=`cp ./build/avalanchego $AVALANCHE_ROOT` +OK=$(cp ./build/avalanchego "$AVALANCHE_ROOT") if [[ $OK -ne 0 ]]; then - exit $OK; + exit "$OK"; fi echo "Build tgz package..." -cd $PKG_ROOT +cd "$PKG_ROOT" echo "Tag: $TAG" -tar -czvf "avalanchego-linux-$ARCH-$TAG.tar.gz" avalanchego-$TAG -aws s3 cp avalanchego-linux-$ARCH-$TAG.tar.gz s3://$BUCKET/linux/binaries/ubuntu/$RELEASE/$ARCH/ +tar -czvf "avalanchego-linux-$ARCH-$TAG.tar.gz" "avalanchego-$TAG" +aws s3 cp "avalanchego-linux-$ARCH-$TAG.tar.gz" "s3://$BUCKET/linux/binaries/ubuntu/$RELEASE/$ARCH/" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7961d7d33280..0641c2907912 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,7 +111,7 @@ jobs: # with: # go-version: ${{ env.go_version }} # check-latest: true - # - name: Build caminogo Binary + # - name: Build caminogo binaries # shell: bash # run: ./scripts/build.sh # - name: Run e2e tests @@ -135,6 +135,9 @@ jobs: - name: Run static analysis tests shell: bash run: scripts/lint.sh + - name: Run shellcheck + shell: bash + run: scripts/shellcheck.sh buf-lint: name: Protobuf Lint runs-on: ubuntu-latest diff --git a/.github/workflows/publish_image.sh b/.github/workflows/publish_image.sh index 34298f53b8e6..0aef0feb5cec 100755 --- a/.github/workflows/publish_image.sh +++ b/.github/workflows/publish_image.sh @@ -18,7 +18,7 @@ source "$AVALANCHE_PATH"/scripts/build_image.sh -r if [[ $current_branch == "master" ]]; then echo "Tagging current avalanchego image as $avalanchego_dockerhub_repo:latest" - docker tag $avalanchego_dockerhub_repo:$current_branch $avalanchego_dockerhub_repo:latest + docker tag "$avalanchego_dockerhub_repo:$current_branch" "$avalanchego_dockerhub_repo:latest" fi echo "Pushing: $avalanchego_dockerhub_repo:$current_branch" @@ -26,4 +26,4 @@ echo "Pushing: $avalanchego_dockerhub_repo:$current_branch" echo "$DOCKER_PASS" | docker login --username "$DOCKER_USERNAME" --password-stdin ## pushing image with tags -docker image push -a $avalanchego_dockerhub_repo +docker image push -a "$avalanchego_dockerhub_repo" diff --git a/.github/workflows/scripts/cleanup-net-outage-sim.sh b/.github/workflows/scripts/cleanup-net-outage-sim.sh index ee4a7c07e022..b073f336a7c9 100644 --- a/.github/workflows/scripts/cleanup-net-outage-sim.sh +++ b/.github/workflows/scripts/cleanup-net-outage-sim.sh @@ -5,8 +5,9 @@ set -euo pipefail ### # cleanup removes the docker instance and the network echo "Cleaning up..." +# shellcheck disable=SC2046 docker rm $(sudo docker stop $(sudo docker ps -a -q --filter ancestor=c4tplatform/camino-node:latest --format="{{.ID}}")) #if the filter returns nothing the command fails, so ignore errors -docker network rm controlled-net -rm /opt/mainnet-db-daily* 2>/dev/null -rm -rf /var/lib/caminogo 2>/dev/null +docker network rm controlled-net +rm /opt/mainnet-db-daily* 2>/dev/null +rm -rf /var/lib/caminogo 2>/dev/null echo "Done cleaning up" diff --git a/.github/workflows/scripts/run-net-outage-sim.sh b/.github/workflows/scripts/run-net-outage-sim.sh index 40b78fb444ab..7d1adf3ada1f 100644 --- a/.github/workflows/scripts/run-net-outage-sim.sh +++ b/.github/workflows/scripts/run-net-outage-sim.sh @@ -14,7 +14,7 @@ wait_until_healthy () { # store the response code here response=0 # while the endpoint doesn't return 200 - while [ $response -ne 200 ] + while [ "$response" -ne 200 ] do echo "Checking if local node is healthy..." # Ignore error in case of ephemeral failure to hit node's API @@ -22,12 +22,12 @@ wait_until_healthy () { echo "got status code $response from health endpoint" # check that 3 hours haven't passed now=$(date +%s) - if [ $now -ge $stop ]; - then + if [ "$now" -ge "$stop" ]; + then # timeout: exit SUCCESS=1 return - fi + fi # no timeout yet, wait 30s until retry sleep 30 done @@ -44,20 +44,20 @@ echo "done existing database files" #download latest mainnet DB backup FILENAME="mainnet-db-daily-" -DATE=`date +'%m-%d-%Y'` +DATE=$(date +'%m-%d-%Y') DB_FILE="$FILENAME$DATE" echo "Copying database file $DB_FILE from S3 to local..." -aws s3 cp s3://camino-db-daily/ /opt/ --no-progress --recursive --exclude "*" --include "$DB_FILE*" +aws s3 cp s3://camino-db-daily/ /opt/ --no-progress --recursive --exclude "*" --include "$DB_FILE*" echo "Done downloading database" # extract DB echo "Extracting database..." -mkdir -p /var/lib/caminogo/db -tar -zxf /opt/$DB_FILE*-tar.gz -C /var/lib/caminogo/db +mkdir -p /var/lib/caminogo/db +tar -zxf /opt/"$DB_FILE"*-tar.gz -C /var/lib/caminogo/db echo "Done extracting database" echo "Creating Docker network..." -docker network create controlled-net +docker network create controlled-net echo "Starting Docker container..." containerID=$(docker run --name="net_outage_simulation" --memory="12g" --memory-reservation="11g" --cpus="6.0" --net=controlled-net -p 9650:9650 -p 9651:9651 -v /var/lib/caminogo/db:/db -d c4tplatform/camino-node:latest /caminogo/build/caminogo --db-dir /db --http-host=0.0.0.0) @@ -69,16 +69,16 @@ wait_until_healthy if [ $SUCCESS -eq 1 ]; then echo "Timed out waiting for node to become healthy; exiting." - exit 1 + exit 1 fi -# To simulate internet outage, we will disable the docker network connection +# To simulate internet outage, we will disable the docker network connection echo "Disconnecting node from internet..." -docker network disconnect controlled-net $containerID +docker network disconnect controlled-net "$containerID" echo "Sleeping 60 minutes..." -sleep 3600 +sleep 3600 echo "Reconnecting node to internet..." -docker network connect controlled-net $containerID +docker network connect controlled-net "$containerID" echo "Reconnected to internet. Waiting until healthy..." # now repeatedly check the node's health until it returns healthy @@ -88,12 +88,12 @@ wait_until_healthy if [ $SUCCESS -eq 1 ]; then echo "Timed out waiting for node to become healthy after outage; exiting." - exit 1 + exit 1 fi # The node returned healthy, print how long it took end=$(date +%s) -DELAY=$(($end - $start)) +DELAY=$((end - start)) echo "Node became healthy again after complete outage after $DELAY seconds." echo "Test completed" diff --git a/.kurtosis/kurtosis.sh b/.kurtosis/kurtosis.sh deleted file mode 100644 index a3d1cd85e3bb..000000000000 --- a/.kurtosis/kurtosis.sh +++ /dev/null @@ -1,226 +0,0 @@ -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! -# -# Do not modify this file! It will get overwritten when you upgrade Kurtosis! -# -# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! WARNING !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - -set -euo pipefail - - - -# ============================================================================================ -# Constants -# ============================================================================================ -# The directory where Kurtosis will store files it uses in between executions, e.g. access tokens -# Can make this configurable if needed -KURTOSIS_DIRPATH="${HOME}/.kurtosis" - -KURTOSIS_CORE_TAG="1.8" -KURTOSIS_DOCKERHUB_ORG="kurtosistech" -INITIALIZER_IMAGE="${KURTOSIS_DOCKERHUB_ORG}/kurtosis-core_initializer:${KURTOSIS_CORE_TAG}" -API_IMAGE="${KURTOSIS_DOCKERHUB_ORG}/kurtosis-core_api:${KURTOSIS_CORE_TAG}" - -POSITIONAL_ARG_DEFINITION_FRAGMENTS=2 - - - -# ============================================================================================ -# Arg Parsing -# ============================================================================================ -function print_help_and_exit() { - echo "" - echo "$(basename "${0}") [--custom-params custom_params_json] [--client-id client_id] [--client-secret client_secret] [--help] [--kurtosis-log-level kurtosis_log_level] [--list] [--parallelism parallelism] [--tests test_names] [--test-suite-log-level test_suite_log_level] test_suite_image" - echo "" - echo " --custom-params custom_params_json JSON string containing arbitrary data that will be passed as-is to your testsuite, so it can modify its behaviour based on input (default: {})" - echo " --client-id client_id An OAuth client ID which is needed for running Kurtosis in CI, and should be left empty when running Kurtosis on a local machine" - echo " --client-secret client_secret An OAuth client secret which is needed for running Kurtosis in CI, and should be left empty when running Kurtosis on a local machine" - echo " --help Display this message" - echo " --kurtosis-log-level kurtosis_log_level The log level that all output generated by the Kurtosis framework itself should log at (panic|fatal|error|warning|info|debug|trace) (default: info)" - echo " --list Rather than running the tests, lists the tests available to run" - echo " --parallelism parallelism The number of texts to execute in parallel (default: 4)" - echo " --tests test_names List of test names to run, separated by ',' (default or empty: run all tests)" - echo " --test-suite-log-level test_suite_log_level A string that will be passed as-is to the test suite container to indicate what log level the test suite container should output at; this string should be meaningful to the test suite container because Kurtosis won't know what logging framework the testsuite uses (default: info)" - echo " test_suite_image The Docker image containing the testsuite to execute" - - echo "" - exit 1 # Exit with an error code, so that if it gets accidentally called in parent scripts/CI it fails loudly -} - - - -# ============================================================================================ -# Arg Parsing -# ============================================================================================ -client_id="" -client_secret="" -custom_params_json="{}" -do_list="false" -kurtosis_log_level="info" -parallelism="4" -show_help="false" -test_names="" -test_suite_image="" -test_suite_log_level="info" - - - -POSITIONAL=() -while [ ${#} -gt 0 ]; do - key="${1}" - case "${key}" in - - --custom-params) - - custom_params_json="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --client-id) - - client_id="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --client-secret) - - client_secret="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --help) - show_help="true" - shift # Shift to clear out the flag - - ;; - - --kurtosis-log-level) - - kurtosis_log_level="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --list) - do_list="true" - shift # Shift to clear out the flag - - ;; - - --parallelism) - - parallelism="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --tests) - - test_names="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - --test-suite-log-level) - - test_suite_log_level="${2}" - shift # Shift to clear out the flag - shift # Shift again to clear out the value - ;; - - -*) - echo "ERROR: Unrecognized flag '${key}'" >&2 - exit 1 - ;; - *) - POSITIONAL+=("${1}") - shift - ;; - esac -done - -if "${show_help}"; then - print_help_and_exit -fi - -# Restore positional parameters and assign them to variables -set -- "${POSITIONAL[@]}" -test_suite_image="${1:-}" - - - - - -# ============================================================================================ -# Arg Validation -# ============================================================================================ -if [ "${#}" -ne 1 ]; then - echo "ERROR: Expected 1 positional variables but got ${#}" >&2 - print_help_and_exit -fi - -if [ -z "$test_suite_image" ]; then - echo "ERROR: Variable 'test_suite_image' cannot be empty" >&2 - exit 1 -fi - - - -# ============================================================================================ -# Main Logic -# ============================================================================================# Because Kurtosis X.Y.Z tags are normalized to X.Y so that minor patch updates are transparently -# used, we need to pull the latest API & initializer images -echo "Pulling latest versions of API & initializer image..." -if ! docker pull "${INITIALIZER_IMAGE}"; then - echo "WARN: An error occurred pulling the latest version of the initializer image (${INITIALIZER_IMAGE}); you may be running an out-of-date version" >&2 -else - echo "Successfully pulled latest version of initializer image" -fi -if ! docker pull "${API_IMAGE}"; then - echo "WARN: An error occurred pulling the latest version of the API image (${API_IMAGE}); you may be running an out-of-date version" >&2 -else - echo "Successfully pulled latest version of API image" -fi - -# Kurtosis needs a Docker volume to store its execution data in -# To learn more about volumes, see: https://docs.docker.com/storage/volumes/ -sanitized_image="$(echo "${test_suite_image}" | sed 's/[^a-zA-Z0-9_.-]/_/g')" -suite_execution_volume="$(date +%Y-%m-%dT%H.%M.%S)_${sanitized_image}" -if ! docker volume create "${suite_execution_volume}" > /dev/null; then - echo "ERROR: Failed to create a Docker volume to store the execution files in" >&2 - exit 1 -fi - -if ! mkdir -p "${KURTOSIS_DIRPATH}"; then - echo "ERROR: Failed to create the Kurtosis directory at '${KURTOSIS_DIRPATH}'" >&2 - exit 1 -fi - -docker run \ - `# The Kurtosis initializer runs inside a Docker container, but needs to access to the Docker engine; this is how to do it` \ - `# For more info, see the bottom of: http://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/` \ - --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \ - \ - `# Because the Kurtosis initializer runs inside Docker but needs to persist & read files on the host filesystem between execution,` \ - `# the container expects the Kurtosis directory to be bind-mounted at the special "/kurtosis" path` \ - --mount "type=bind,source=${KURTOSIS_DIRPATH},target=/kurtosis" \ - \ - `# The Kurtosis initializer image requires the volume for storing suite execution data to be mounted at the special "/suite-execution" path` \ - --mount "type=volume,source=${suite_execution_volume},target=/suite-execution" \ - \ - `# Keep these sorted alphabetically` \ - --env CLIENT_ID="${client_id}" \ - --env CLIENT_SECRET="${client_secret}" \ - --env CUSTOM_PARAMS_JSON="${custom_params_json}" \ - --env DO_LIST="${do_list}" \ - --env KURTOSIS_API_IMAGE="${API_IMAGE}" \ - --env KURTOSIS_LOG_LEVEL="${kurtosis_log_level}" \ - --env PARALLELISM="${parallelism}" \ - --env SUITE_EXECUTION_VOLUME="${suite_execution_volume}" \ - --env TEST_NAMES="${test_names}" \ - --env TEST_SUITE_IMAGE="${test_suite_image}" \ - --env TEST_SUITE_LOG_LEVEL="${test_suite_log_level}" \ - \ - "${INITIALIZER_IMAGE}" diff --git a/api/admin/client.go b/api/admin/client.go index ad97fbe12534..262c4352168b 100644 --- a/api/admin/client.go +++ b/api/admin/client.go @@ -17,7 +17,9 @@ import ( "context" "github.com/ava-labs/avalanchego/api" + "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/rpc" ) @@ -38,6 +40,7 @@ type Client interface { SetLoggerLevel(ctx context.Context, loggerName, logLevel, displayLevel string, options ...rpc.Option) (map[string]LogAndDisplayLevels, error) GetLoggerLevel(ctx context.Context, loggerName string, options ...rpc.Option) (map[string]LogAndDisplayLevels, error) GetConfig(ctx context.Context, options ...rpc.Option) (interface{}, error) + DBGet(ctx context.Context, key []byte, options ...rpc.Option) ([]byte, error) GetNodeSigner(ctx context.Context, _ string, options ...rpc.Option) (*GetNodeSignerReply, error) } @@ -163,3 +166,23 @@ func (c *client) GetNodeSigner(ctx context.Context, _ string, options ...rpc.Opt err := c.requester.SendRequest(ctx, "getNodeSigner", Secret{c.secret}, res, options...) return res, err } + +func (c *client) DBGet(ctx context.Context, key []byte, options ...rpc.Option) ([]byte, error) { + keyStr, err := formatting.Encode(formatting.HexNC, key) + if err != nil { + return nil, err + } + + res := &DBGetReply{} + err = c.requester.SendRequest(ctx, "admin.dbGet", &DBGetArgs{ + Key: keyStr, + }, res, options...) + if err != nil { + return nil, err + } + + if err := rpcdb.ErrEnumToError[res.ErrorCode]; err != nil { + return nil, err + } + return formatting.Decode(formatting.HexNC, res.Value) +} diff --git a/api/admin/client_test.go b/api/admin/client_test.go index 486a0a716564..ed352e1b2b35 100644 --- a/api/admin/client_test.go +++ b/api/admin/client_test.go @@ -16,24 +16,23 @@ import ( "github.com/ava-labs/avalanchego/utils/rpc" ) -var errTest = errors.New("non-nil error") +var ( + errTest = errors.New("non-nil error") -// SuccessResponseTest defines the expected result of an API call that returns SuccessResponse -type SuccessResponseTest struct { - Err error -} - -// GetSuccessResponseTests returns a list of possible SuccessResponseTests -func GetSuccessResponseTests() []SuccessResponseTest { - return []SuccessResponseTest{ + SuccessResponseTests = []struct { + name string + expectedErr error + }{ { - Err: nil, + name: "no error", + expectedErr: nil, }, { - Err: errTest, + name: "error", + expectedErr: errTest, }, } -} +) type mockClient struct { response interface{} @@ -76,74 +75,62 @@ func (mc *mockClient) SendRequest(_ context.Context, _ string, _ interface{}, re } func TestStartCPUProfiler(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.StartCPUProfiler(context.Background()) - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.StartCPUProfiler(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestStopCPUProfiler(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.StopCPUProfiler(context.Background()) - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.StopCPUProfiler(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestMemoryProfile(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.MemoryProfile(context.Background()) - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.MemoryProfile(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestLockProfile(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.LockProfile(context.Background()) - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.LockProfile(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestAlias(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.Alias(context.Background(), "alias", "alias2") - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.Alias(context.Background(), "alias", "alias2") + require.ErrorIs(t, err, test.expectedErr) + }) } } func TestAliasChain(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.AliasChain(context.Background(), "chain", "chain-alias") - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.AliasChain(context.Background(), "chain", "chain-alias") + require.ErrorIs(t, err, test.expectedErr) + }) } } @@ -169,14 +156,12 @@ func TestGetChainAliases(t *testing.T) { } func TestStacktrace(t *testing.T) { - require := require.New(t) - - tests := GetSuccessResponseTests() - - for _, test := range tests { - mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.Err)} - err := mockClient.Stacktrace(context.Background()) - require.ErrorIs(err, test.Err) + for _, test := range SuccessResponseTests { + t.Run(test.name, func(t *testing.T) { + mockClient := client{requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} + err := mockClient.Stacktrace(context.Background()) + require.ErrorIs(t, err, test.expectedErr) + }) } } diff --git a/api/admin/key_value_reader.go b/api/admin/key_value_reader.go new file mode 100644 index 000000000000..bfc7b2cced06 --- /dev/null +++ b/api/admin/key_value_reader.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package admin + +import ( + "context" + + "github.com/ava-labs/avalanchego/database" +) + +var _ database.KeyValueReader = (*KeyValueReader)(nil) + +type KeyValueReader struct { + client Client +} + +func NewKeyValueReader(client Client) *KeyValueReader { + return &KeyValueReader{ + client: client, + } +} + +func (r *KeyValueReader) Has(key []byte) (bool, error) { + _, err := r.client.DBGet(context.Background(), key) + if err == database.ErrNotFound { + return false, nil + } + return err == nil, err +} + +func (r *KeyValueReader) Get(key []byte) ([]byte, error) { + return r.client.DBGet(context.Background(), key) +} diff --git a/api/admin/service.go b/api/admin/service.go index ff140e6188ac..0549e630fe22 100644 --- a/api/admin/service.go +++ b/api/admin/service.go @@ -27,11 +27,14 @@ import ( "github.com/ava-labs/avalanchego/api" "github.com/ava-labs/avalanchego/api/server" "github.com/ava-labs/avalanchego/chains" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/rpcdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/cb58" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/hashing" "github.com/ava-labs/avalanchego/utils/json" "github.com/ava-labs/avalanchego/utils/logging" @@ -39,6 +42,8 @@ import ( "github.com/ava-labs/avalanchego/utils/profiler" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/registry" + + rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) const ( @@ -59,6 +64,7 @@ type Config struct { ProfileDir string LogFactory logging.Factory NodeConfig interface{} + DB database.Database ChainManager chains.Manager HTTPServer server.PathAdderWithReadLock VMRegistry registry.VMRegistry @@ -420,6 +426,38 @@ func (a *Admin) getLogLevels(loggerNames []string) (map[string]LogAndDisplayLeve return loggerLevels, nil } +type DBGetArgs struct { + Key string `json:"key"` +} + +type DBGetReply struct { + Value string `json:"value"` + ErrorCode rpcdbpb.Error `json:"errorCode"` +} + +//nolint:stylecheck // renaming this method to DBGet would change the API method from "dbGet" to "dBGet" +func (a *Admin) DbGet(_ *http.Request, args *DBGetArgs, reply *DBGetReply) error { + a.Log.Debug("API called", + zap.String("service", "admin"), + zap.String("method", "dbGet"), + logging.UserString("key", args.Key), + ) + + key, err := formatting.Decode(formatting.HexNC, args.Key) + if err != nil { + return err + } + + value, err := a.DB.Get(key) + if err != nil { + reply.ErrorCode = rpcdb.ErrorToErrEnum[err] + return rpcdb.ErrorToRPCError(err) + } + + reply.Value, err = formatting.Encode(formatting.HexNC, value) + return err +} + // See GetNodeSigner type GetNodeSignerReply struct { PrivateKey string `json:"privateKey"` diff --git a/api/admin/service_test.go b/api/admin/service_test.go index ea159c655c63..a1309a213f60 100644 --- a/api/admin/service_test.go +++ b/api/admin/service_test.go @@ -11,10 +11,14 @@ import ( "go.uber.org/mock/gomock" + "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/utils/formatting" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/registry" + + rpcdbpb "github.com/ava-labs/avalanchego/proto/pb/rpcdb" ) type loadVMsTest struct { @@ -111,3 +115,56 @@ func TestLoadVMsGetAliasesFails(t *testing.T) { err := resources.admin.LoadVMs(&http.Request{}, nil, &reply) require.ErrorIs(err, errTest) } + +func TestServiceDBGet(t *testing.T) { + a := &Admin{Config: Config{ + Log: logging.NoLog{}, + DB: memdb.New(), + }} + + helloBytes := []byte("hello") + helloHex, err := formatting.Encode(formatting.HexNC, helloBytes) + require.NoError(t, err) + + worldBytes := []byte("world") + worldHex, err := formatting.Encode(formatting.HexNC, worldBytes) + require.NoError(t, err) + + require.NoError(t, a.DB.Put(helloBytes, worldBytes)) + + tests := []struct { + name string + key string + expectedValue string + expectedErrorCode rpcdbpb.Error + }{ + { + name: "key exists", + key: helloHex, + expectedValue: worldHex, + expectedErrorCode: rpcdbpb.Error_ERROR_UNSPECIFIED, + }, + { + name: "key doesn't exist", + key: "", + expectedValue: "", + expectedErrorCode: rpcdbpb.Error_ERROR_NOT_FOUND, + }, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + require := require.New(t) + + reply := &DBGetReply{} + require.NoError(a.DbGet( + nil, + &DBGetArgs{ + Key: test.key, + }, + reply, + )) + require.Equal(test.expectedValue, reply.Value) + require.Equal(test.expectedErrorCode, reply.ErrorCode) + }) + } +} diff --git a/api/info/service.go b/api/info/service.go index 06450fe074ae..81056f59e309 100644 --- a/api/info/service.go +++ b/api/info/service.go @@ -35,7 +35,10 @@ import ( "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/platformvm/signer" + "github.com/ava-labs/avalanchego/vms/propertyfx" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" ) var errNoChainProvided = errors.New("argument 'chain' not given") @@ -431,6 +434,7 @@ func (i *Info) GetTxFee(_ *http.Request, _ *struct{}, reply *GetTxFeeResponse) e // GetVMsReply contains the response metadata for GetVMs type GetVMsReply struct { VMs map[ids.ID][]string `json:"vms"` + Fxs map[ids.ID]string `json:"fxs"` } // GetVMs lists the virtual machines installed on the node @@ -447,5 +451,10 @@ func (i *Info) GetVMs(_ *http.Request, _ *struct{}, reply *GetVMsReply) error { } reply.VMs, err = ids.GetRelevantAliases(i.VMManager, vmIDs) + reply.Fxs = map[ids.ID]string{ + secp256k1fx.ID: secp256k1fx.Name, + nftfx.ID: nftfx.Name, + propertyfx.ID: propertyfx.Name, + } return err } diff --git a/api/metrics/multi_gatherer.go b/api/metrics/multi_gatherer.go index 79affd4b7b2e..45d4439622b4 100644 --- a/api/metrics/multi_gatherer.go +++ b/api/metrics/multi_gatherer.go @@ -15,6 +15,7 @@ import ( "golang.org/x/exp/slices" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/metric" ) var ( @@ -50,23 +51,19 @@ func (g *multiGatherer) Gather() ([]*dto.MetricFamily, error) { var results []*dto.MetricFamily for namespace, gatherer := range g.gatherers { - metrics, err := gatherer.Gather() + gatheredMetrics, err := gatherer.Gather() if err != nil { return nil, err } - for _, metric := range metrics { + for _, gatheredMetric := range gatheredMetrics { var name string - if metric.Name != nil { - if len(namespace) > 0 { - name = fmt.Sprintf("%s_%s", namespace, *metric.Name) - } else { - name = *metric.Name - } + if gatheredMetric.Name != nil { + name = metric.AppendNamespace(namespace, *gatheredMetric.Name) } else { name = namespace } - metric.Name = &name - results = append(results, metric) + gatheredMetric.Name = &name + results = append(results, gatheredMetric) } } // Because we overwrite every metric's name, we are guaranteed that there diff --git a/chains/manager.go b/chains/manager.go index 6603520b58dd..8edab5351159 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -50,13 +50,18 @@ import ( "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/perms" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" "github.com/ava-labs/avalanchego/vms/metervm" + "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/platformvm/warp" + "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/proposervm" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" "github.com/ava-labs/avalanchego/vms/tracedvm" timetracker "github.com/ava-labs/avalanchego/snow/networking/tracker" @@ -78,16 +83,16 @@ const ( var ( // Commonly shared VM DB prefix - vmDBPrefix = []byte("vm") + VMDBPrefix = []byte("vm") // Bootstrapping prefixes for LinearizableVMs - vertexDBPrefix = []byte("vertex") - vertexBootstrappingDBPrefix = []byte("vertex_bs") - txBootstrappingDBPrefix = []byte("tx_bs") - blockBootstrappingDBPrefix = []byte("block_bs") + VertexDBPrefix = []byte("vertex") + VertexBootstrappingDBPrefix = []byte("vertex_bs") + TxBootstrappingDBPrefix = []byte("tx_bs") + BlockBootstrappingDBPrefix = []byte("block_bs") // Bootstrapping prefixes for ChainVMs - bootstrappingDB = []byte("bs") + ChainBootstrappingDBPrefix = []byte("bs") errUnknownVMType = errors.New("the vm should have type avalanche.DAGVM or snowman.ChainVM") errCreatePlatformVM = errors.New("attempted to create a chain running the PlatformVM") @@ -95,6 +100,12 @@ var ( errNoPrimaryNetworkConfig = errors.New("no subnet config for primary network found") errPartialSyncAsAValidator = errors.New("partial sync should not be configured for a validator") + fxs = map[ids.ID]fx.Factory{ + secp256k1fx.ID: &secp256k1fx.Factory{}, + nftfx.ID: &nftfx.Factory{}, + propertyfx.ID: &propertyfx.Factory{}, + } + _ Manager = (*manager)(nil) ) @@ -107,9 +118,6 @@ var ( type Manager interface { ids.Aliaser - // Return the router this Manager is using to route consensus messages to chains - Router() router.Router - // Queues a chain to be created in the future after chain creator is unblocked. // This is only called from the P-chain thread to create other chains // Queued chains are created only after P-chain is bootstrapped. @@ -282,11 +290,6 @@ func New(config *ManagerConfig) (Manager, error) { }, nil } -// Router that this chain manager is using to route consensus messages to chains -func (m *manager) Router() router.Router { - return m.ManagerConfig.Router -} - // QueueChainCreation queues a chain creation request // Invariant: Tracked Subnet must be checked before calling this function func (m *manager) QueueChainCreation(chainParams ChainParameters) { @@ -451,7 +454,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c } consensusMetrics := prometheus.NewRegistry() - chainNamespace := fmt.Sprintf("%s_%s", constants.PlatformName, primaryAlias) + chainNamespace := metric.AppendNamespace(constants.PlatformName, primaryAlias) if err := m.Metrics.Register(chainNamespace, consensusMetrics); err != nil { return nil, fmt.Errorf("error while registering chain's metrics %w", err) } @@ -460,13 +463,13 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c // `avalanche_{chainID}_` into `avalanche_{chainID}_avalanche_` so that // there are no conflicts when registering the Snowman consensus metrics. avalancheConsensusMetrics := prometheus.NewRegistry() - avalancheDAGNamespace := fmt.Sprintf("%s_avalanche", chainNamespace) + avalancheDAGNamespace := metric.AppendNamespace(chainNamespace, "avalanche") if err := m.Metrics.Register(avalancheDAGNamespace, avalancheConsensusMetrics); err != nil { return nil, fmt.Errorf("error while registering DAG metrics %w", err) } vmMetrics := metrics.NewOptionalGatherer() - vmNamespace := fmt.Sprintf("%s_vm", chainNamespace) + vmNamespace := metric.AppendNamespace(chainNamespace, "vm") if err := m.Metrics.Register(vmNamespace, vmMetrics); err != nil { return nil, fmt.Errorf("error while registering vm's metrics %w", err) } @@ -514,23 +517,16 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c } // TODO: Shutdown VM if an error occurs - fxs := make([]*common.Fx, len(chainParams.FxIDs)) + chainFxs := make([]*common.Fx, len(chainParams.FxIDs)) for i, fxID := range chainParams.FxIDs { - // Get a factory for the fx we want to use on our chain - fxFactory, err := m.VMManager.GetFactory(fxID) - if err != nil { - return nil, fmt.Errorf("error while getting fxFactory: %w", err) - } - - fx, err := fxFactory.New(chainLog) - if err != nil { - return nil, fmt.Errorf("error while creating fx: %w", err) + fxFactory, ok := fxs[fxID] + if !ok { + return nil, fmt.Errorf("fx %s not found", fxID) } - // Create the fx - fxs[i] = &common.Fx{ + chainFxs[i] = &common.Fx{ ID: fxID, - Fx: fx, + Fx: fxFactory.New(), } } @@ -542,7 +538,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c chainParams.GenesisData, m.Validators, vm, - fxs, + chainFxs, sb, ) if err != nil { @@ -560,7 +556,7 @@ func (m *manager) buildChain(chainParams ChainParameters, sb subnets.Subnet) (*c m.Validators, beacons, vm, - fxs, + chainFxs, sb, ) if err != nil { @@ -604,11 +600,11 @@ func (m *manager) createAvalancheChain( return nil, err } prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) - vmDB := prefixdb.New(vmDBPrefix, prefixDB) - vertexDB := prefixdb.New(vertexDBPrefix, prefixDB) - vertexBootstrappingDB := prefixdb.New(vertexBootstrappingDBPrefix, prefixDB) - txBootstrappingDB := prefixdb.New(txBootstrappingDBPrefix, prefixDB) - blockBootstrappingDB := prefixdb.New(blockBootstrappingDBPrefix, prefixDB) + vmDB := prefixdb.New(VMDBPrefix, prefixDB) + vertexDB := prefixdb.New(VertexDBPrefix, prefixDB) + vertexBootstrappingDB := prefixdb.New(VertexBootstrappingDBPrefix, prefixDB) + txBootstrappingDB := prefixdb.New(TxBootstrappingDBPrefix, prefixDB) + blockBootstrappingDB := prefixdb.New(BlockBootstrappingDBPrefix, prefixDB) vtxBlocker, err := queue.NewWithMissing(vertexBootstrappingDB, "vtx", ctx.AvalancheRegisterer) if err != nil { @@ -1007,8 +1003,8 @@ func (m *manager) createSnowmanChain( return nil, err } prefixDB := prefixdb.New(ctx.ChainID[:], meterDB) - vmDB := prefixdb.New(vmDBPrefix, prefixDB) - bootstrappingDB := prefixdb.New(bootstrappingDB, prefixDB) + vmDB := prefixdb.New(VMDBPrefix, prefixDB) + bootstrappingDB := prefixdb.New(ChainBootstrappingDBPrefix, prefixDB) blocked, err := queue.NewWithMissing(bootstrappingDB, "block", ctx.Registerer) if err != nil { diff --git a/chains/test_manager.go b/chains/test_manager.go index d142035b422c..f7b98b29b587 100644 --- a/chains/test_manager.go +++ b/chains/test_manager.go @@ -3,10 +3,7 @@ package chains -import ( - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/snow/networking/router" -) +import "github.com/ava-labs/avalanchego/ids" // TestManager implements Manager but does nothing. Always returns nil error. // To be used only in tests @@ -14,10 +11,6 @@ var TestManager Manager = testManager{} type testManager struct{} -func (testManager) Router() router.Router { - return nil -} - func (testManager) QueueChainCreation(ChainParameters) {} func (testManager) ForceCreateChain(ChainParameters) {} diff --git a/database/benchmark_database.go b/database/benchmark_database.go index b27eec902caa..43af10db1c2b 100644 --- a/database/benchmark_database.go +++ b/database/benchmark_database.go @@ -4,7 +4,6 @@ package database import ( - "fmt" "math/rand" "testing" @@ -15,16 +14,16 @@ import ( var ( // Benchmarks is a list of all database benchmarks - Benchmarks = []func(b *testing.B, db Database, name string, keys, values [][]byte){ - BenchmarkGet, - BenchmarkPut, - BenchmarkDelete, - BenchmarkBatchPut, - BenchmarkBatchDelete, - BenchmarkBatchWrite, - BenchmarkParallelGet, - BenchmarkParallelPut, - BenchmarkParallelDelete, + Benchmarks = map[string]func(b *testing.B, db Database, keys, values [][]byte){ + "Get": BenchmarkGet, + "Put": BenchmarkPut, + "Delete": BenchmarkDelete, + "BatchPut": BenchmarkBatchPut, + "BatchDelete": BenchmarkBatchDelete, + "BatchWrite": BenchmarkBatchWrite, + "ParallelGet": BenchmarkParallelGet, + "ParallelPut": BenchmarkParallelPut, + "ParallelDelete": BenchmarkParallelDelete, } // BenchmarkSizes to use with each benchmark BenchmarkSizes = [][]int{ @@ -56,169 +55,150 @@ func SetupBenchmark(b *testing.B, count int, keySize, valueSize int) ([][]byte, } // BenchmarkGet measures the time it takes to get an operation from a database. -func BenchmarkGet(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkGet(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.get", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - require := require.New(b) + require := require.New(b) - for i, key := range keys { - value := values[i] - require.NoError(db.Put(key, value)) - } + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) + } - b.ResetTimer() + b.ResetTimer() - // Reads b.N values from the db - for i := 0; i < b.N; i++ { - _, err := db.Get(keys[i%count]) - require.NoError(err) - } - }) + // Reads b.N values from the db + for i := 0; i < b.N; i++ { + _, err := db.Get(keys[i%count]) + require.NoError(err) + } } // BenchmarkPut measures the time it takes to write an operation to a database. -func BenchmarkPut(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkPut(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.put", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - // Writes b.N values to the db - for i := 0; i < b.N; i++ { - require.NoError(b, db.Put(keys[i%count], values[i%count])) - } - }) + // Writes b.N values to the db + for i := 0; i < b.N; i++ { + require.NoError(b, db.Put(keys[i%count], values[i%count])) + } } // BenchmarkDelete measures the time it takes to delete a (k, v) from a database. -func BenchmarkDelete(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkDelete(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.delete", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - require := require.New(b) + require := require.New(b) - // Writes random values of size _size_ to the database - for i, key := range keys { - value := values[i] - require.NoError(db.Put(key, value)) - } + // Writes random values of size _size_ to the database + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) + } - b.ResetTimer() + b.ResetTimer() - // Deletes b.N values from the db - for i := 0; i < b.N; i++ { - require.NoError(db.Delete(keys[i%count])) - } - }) + // Deletes b.N values from the db + for i := 0; i < b.N; i++ { + require.NoError(db.Delete(keys[i%count])) + } } // BenchmarkBatchPut measures the time it takes to batch put. -func BenchmarkBatchPut(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkBatchPut(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_batch.put", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - batch := db.NewBatch() - for i := 0; i < b.N; i++ { - require.NoError(b, batch.Put(keys[i%count], values[i%count])) - } - }) + batch := db.NewBatch() + for i := 0; i < b.N; i++ { + require.NoError(b, batch.Put(keys[i%count], values[i%count])) + } } // BenchmarkBatchDelete measures the time it takes to batch delete. -func BenchmarkBatchDelete(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkBatchDelete(b *testing.B, db Database, keys, _ [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_batch.delete", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - batch := db.NewBatch() - for i := 0; i < b.N; i++ { - require.NoError(b, batch.Delete(keys[i%count])) - } - }) + batch := db.NewBatch() + for i := 0; i < b.N; i++ { + require.NoError(b, batch.Delete(keys[i%count])) + } } // BenchmarkBatchWrite measures the time it takes to batch write. -func BenchmarkBatchWrite(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkBatchWrite(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) - count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_batch.write", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - require := require.New(b) + require := require.New(b) - batch := db.NewBatch() - for i, key := range keys { - value := values[i] - require.NoError(batch.Put(key, value)) - } + batch := db.NewBatch() + for i, key := range keys { + value := values[i] + require.NoError(batch.Put(key, value)) + } - b.ResetTimer() + b.ResetTimer() - for i := 0; i < b.N; i++ { - require.NoError(batch.Write()) - } - }) + for i := 0; i < b.N; i++ { + require.NoError(batch.Write()) + } } // BenchmarkParallelGet measures the time it takes to read in parallel. -func BenchmarkParallelGet(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkParallelGet(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.get_parallel", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - require := require.New(b) + require := require.New(b) - for i, key := range keys { - value := values[i] - require.NoError(db.Put(key, value)) - } + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) + } - b.ResetTimer() + b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for i := 0; pb.Next(); i++ { - _, err := db.Get(keys[i%count]) - require.NoError(err) - } - }) + b.RunParallel(func(pb *testing.PB) { + for i := 0; pb.Next(); i++ { + _, err := db.Get(keys[i%count]) + require.NoError(err) + } }) } // BenchmarkParallelPut measures the time it takes to write to the db in parallel. -func BenchmarkParallelPut(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkParallelPut(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.put_parallel", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - b.RunParallel(func(pb *testing.PB) { - // Write N values to the db - for i := 0; pb.Next(); i++ { - require.NoError(b, db.Put(keys[i%count], values[i%count])) - } - }) + b.RunParallel(func(pb *testing.PB) { + // Write N values to the db + for i := 0; pb.Next(); i++ { + require.NoError(b, db.Put(keys[i%count], values[i%count])) + } }) } // BenchmarkParallelDelete measures the time it takes to delete a (k, v) from the db. -func BenchmarkParallelDelete(b *testing.B, db Database, name string, keys, values [][]byte) { +func BenchmarkParallelDelete(b *testing.B, db Database, keys, values [][]byte) { require.NotEmpty(b, keys) count := len(keys) - b.Run(fmt.Sprintf("%s_%d_pairs_%d_keys_%d_values_db.delete_parallel", name, count, len(keys[0]), len(values[0])), func(b *testing.B) { - require := require.New(b) - for i, key := range keys { - value := values[i] - require.NoError(db.Put(key, value)) + require := require.New(b) + for i, key := range keys { + value := values[i] + require.NoError(db.Put(key, value)) + } + b.ResetTimer() + + b.RunParallel(func(pb *testing.PB) { + // Deletes b.N values from the db + for i := 0; pb.Next(); i++ { + require.NoError(db.Delete(keys[i%count])) } - b.ResetTimer() - - b.RunParallel(func(pb *testing.PB) { - // Deletes b.N values from the db - for i := 0; pb.Next(); i++ { - require.NoError(db.Delete(keys[i%count])) - } - }) }) } diff --git a/database/corruptabledb/db_test.go b/database/corruptabledb/db_test.go index 566a6b084e19..5c7a48a64c4d 100644 --- a/database/corruptabledb/db_test.go +++ b/database/corruptabledb/db_test.go @@ -18,19 +18,19 @@ import ( var errTest = errors.New("non-nil error") -func TestInterface(t *testing.T) { - for _, test := range database.Tests { - baseDB := memdb.New() - db := New(baseDB) - test(t, db) - } -} - func newDB() *Database { baseDB := memdb.New() return New(baseDB) } +func TestInterface(t *testing.T) { + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + test(t, newDB()) + }) + } +} + func FuzzKeyValue(f *testing.F) { database.FuzzKeyValue(f, newDB()) } diff --git a/database/encdb/db_test.go b/database/encdb/db_test.go index 871460a90a41..b3dfdfed68e7 100644 --- a/database/encdb/db_test.go +++ b/database/encdb/db_test.go @@ -4,6 +4,7 @@ package encdb import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -15,12 +16,14 @@ import ( const testPassword = "lol totally a secure password" //nolint:gosec func TestInterface(t *testing.T) { - for _, test := range database.Tests { - unencryptedDB := memdb.New() - db, err := New([]byte(testPassword), unencryptedDB) - require.NoError(t, err) - - test(t, db) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + unencryptedDB := memdb.New() + db, err := New([]byte(testPassword), unencryptedDB) + require.NoError(t, err) + + test(t, db) + }) } } @@ -46,8 +49,10 @@ func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - bench(b, newDB(b), "encdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("encdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + bench(b, newDB(b), keys, values) + }) } } } diff --git a/database/leveldb/db_test.go b/database/leveldb/db_test.go index bf70739ce222..ad8d60cbbc4f 100644 --- a/database/leveldb/db_test.go +++ b/database/leveldb/db_test.go @@ -4,6 +4,7 @@ package leveldb import ( + "fmt" "testing" "github.com/prometheus/client_golang/prometheus" @@ -15,14 +16,16 @@ import ( ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - folder := t.TempDir() - db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) - require.NoError(t, err) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + folder := t.TempDir() + db, err := New(folder, nil, logging.NoLog{}, "", prometheus.NewRegistry()) + require.NoError(t, err) - test(t, db) + test(t, db) - _ = db.Close() + _ = db.Close() + }) } } @@ -57,14 +60,16 @@ func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := newDB(b) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("leveldb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := newDB(b) - bench(b, db, "leveldb", keys, values) + bench(b, db, keys, values) - // The database may have been closed by the test, so we don't care if it - // errors here. - _ = db.Close() + // The database may have been closed by the test, so we don't care if it + // errors here. + _ = db.Close() + }) } } } diff --git a/database/memdb/db_test.go b/database/memdb/db_test.go index 21c97909c7fb..90dc459f3602 100644 --- a/database/memdb/db_test.go +++ b/database/memdb/db_test.go @@ -4,14 +4,17 @@ package memdb import ( + "fmt" "testing" "github.com/ava-labs/avalanchego/database" ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - test(t, New()) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + test(t, New()) + }) } } @@ -30,9 +33,11 @@ func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := New() - bench(b, db, "memdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("memdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := New() + bench(b, db, keys, values) + }) } } } diff --git a/database/meterdb/db_test.go b/database/meterdb/db_test.go index cd36a6fb34f2..eee3f1c23c01 100644 --- a/database/meterdb/db_test.go +++ b/database/meterdb/db_test.go @@ -4,6 +4,7 @@ package meterdb import ( + "fmt" "testing" "github.com/prometheus/client_golang/prometheus" @@ -15,12 +16,14 @@ import ( ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - baseDB := memdb.New() - db, err := New("", prometheus.NewRegistry(), baseDB) - require.NoError(t, err) - - test(t, db) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + baseDB := memdb.New() + db, err := New("", prometheus.NewRegistry(), baseDB) + require.NoError(t, err) + + test(t, db) + }) } } @@ -46,8 +49,10 @@ func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - bench(b, newDB(b), "meterdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("meterdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + bench(b, newDB(b), keys, values) + }) } } } diff --git a/database/pebble/db_test.go b/database/pebble/db_test.go index 2ea3c354f3d8..5c8650dbf183 100644 --- a/database/pebble/db_test.go +++ b/database/pebble/db_test.go @@ -4,6 +4,7 @@ package pebble import ( + "fmt" "testing" "github.com/prometheus/client_golang/prometheus" @@ -22,10 +23,12 @@ func newDB(t testing.TB) *Database { } func TestInterface(t *testing.T) { - for _, test := range database.Tests { - db := newDB(t) - test(t, db) - _ = db.Close() + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + db := newDB(t) + test(t, db) + _ = db.Close() + }) } } @@ -50,10 +53,12 @@ func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := newDB(b) - bench(b, db, "pebble", keys, values) - _ = db.Close() + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("pebble_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := newDB(b) + bench(b, db, keys, values) + _ = db.Close() + }) } } } diff --git a/database/prefixdb/db.go b/database/prefixdb/db.go index 64a644918241..d8af4b101900 100644 --- a/database/prefixdb/db.go +++ b/database/prefixdb/db.go @@ -39,29 +39,57 @@ type Database struct { closed bool } +func newDB(prefix []byte, db database.Database) *Database { + return &Database{ + dbPrefix: prefix, + db: db, + bufferPool: sync.Pool{ + New: func() interface{} { + return make([]byte, 0, defaultBufCap) + }, + }, + } +} + // New returns a new prefixed database func New(prefix []byte, db database.Database) *Database { if prefixDB, ok := db.(*Database); ok { - simplePrefix := make([]byte, len(prefixDB.dbPrefix)+len(prefix)) - copy(simplePrefix, prefixDB.dbPrefix) - copy(simplePrefix[len(prefixDB.dbPrefix):], prefix) - return NewNested(simplePrefix, prefixDB.db) + return newDB( + JoinPrefixes(prefixDB.dbPrefix, prefix), + prefixDB.db, + ) } - return NewNested(prefix, db) + return newDB( + MakePrefix(prefix), + db, + ) } // NewNested returns a new prefixed database without attempting to compress // prefixes. func NewNested(prefix []byte, db database.Database) *Database { - return &Database{ - dbPrefix: hashing.ComputeHash256(prefix), - db: db, - bufferPool: sync.Pool{ - New: func() interface{} { - return make([]byte, 0, defaultBufCap) - }, - }, - } + return newDB( + MakePrefix(prefix), + db, + ) +} + +func MakePrefix(prefix []byte) []byte { + return hashing.ComputeHash256(prefix) +} + +func JoinPrefixes(firstPrefix, secondPrefix []byte) []byte { + simplePrefix := make([]byte, len(firstPrefix)+len(secondPrefix)) + copy(simplePrefix, firstPrefix) + copy(simplePrefix[len(firstPrefix):], secondPrefix) + return MakePrefix(simplePrefix) +} + +func PrefixKey(prefix, key []byte) []byte { + prefixedKey := make([]byte, len(prefix)+len(key)) + copy(prefixedKey, prefix) + copy(prefixedKey[len(prefix):], key) + return prefixedKey } // Assumes that it is OK for the argument to db.db.Has diff --git a/database/prefixdb/db_test.go b/database/prefixdb/db_test.go index 109d8c4c9aba..f928d2f635a4 100644 --- a/database/prefixdb/db_test.go +++ b/database/prefixdb/db_test.go @@ -4,6 +4,7 @@ package prefixdb import ( + "fmt" "testing" "github.com/ava-labs/avalanchego/database" @@ -11,14 +12,16 @@ import ( ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - db := memdb.New() - test(t, New([]byte("hello"), db)) - test(t, New([]byte("world"), db)) - test(t, New([]byte("wor"), New([]byte("ld"), db))) - test(t, New([]byte("ld"), New([]byte("wor"), db))) - test(t, NewNested([]byte("wor"), New([]byte("ld"), db))) - test(t, NewNested([]byte("ld"), New([]byte("wor"), db))) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + db := memdb.New() + test(t, New([]byte("hello"), db)) + test(t, New([]byte("world"), db)) + test(t, New([]byte("wor"), New([]byte("ld"), db))) + test(t, New([]byte("ld"), New([]byte("wor"), db))) + test(t, NewNested([]byte("wor"), New([]byte("ld"), db))) + test(t, NewNested([]byte("ld"), New([]byte("wor"), db))) + }) } } @@ -37,9 +40,11 @@ func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := New([]byte("hello"), memdb.New()) - bench(b, db, "prefixdb", keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("prefixdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := New([]byte("hello"), memdb.New()) + bench(b, db, keys, values) + }) } } } diff --git a/database/rpcdb/db_client.go b/database/rpcdb/db_client.go index 9f91667b41b6..c71ccd0603e6 100644 --- a/database/rpcdb/db_client.go +++ b/database/rpcdb/db_client.go @@ -43,7 +43,7 @@ func (db *DatabaseClient) Has(key []byte) (bool, error) { if err != nil { return false, err } - return resp.Has, errEnumToError[resp.Err] + return resp.Has, ErrEnumToError[resp.Err] } // Get attempts to return the value that was mapped to the key that was provided @@ -54,7 +54,7 @@ func (db *DatabaseClient) Get(key []byte) ([]byte, error) { if err != nil { return nil, err } - return resp.Value, errEnumToError[resp.Err] + return resp.Value, ErrEnumToError[resp.Err] } // Put attempts to set the value this key maps to @@ -66,7 +66,7 @@ func (db *DatabaseClient) Put(key, value []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // Delete attempts to remove any mapping from the key @@ -77,7 +77,7 @@ func (db *DatabaseClient) Delete(key []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // NewBatch returns a new batch @@ -120,7 +120,7 @@ func (db *DatabaseClient) Compact(start, limit []byte) error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } // Close attempts to close the database @@ -130,7 +130,7 @@ func (db *DatabaseClient) Close() error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } func (db *DatabaseClient) HealthCheck(ctx context.Context) (interface{}, error) { @@ -175,7 +175,7 @@ func (b *batch) Write() error { if err != nil { return err } - return errEnumToError[resp.Err] + return ErrEnumToError[resp.Err] } func (b *batch) Inner() database.Batch { @@ -224,7 +224,7 @@ func (it *iterator) fetch() { if err != nil { it.setError(err) } else { - it.setError(errEnumToError[resp.Err]) + it.setError(ErrEnumToError[resp.Err]) } close(it.fetchedData) @@ -324,7 +324,7 @@ func (it *iterator) updateError() { if err != nil { it.setError(err) } else { - it.setError(errEnumToError[resp.Err]) + it.setError(ErrEnumToError[resp.Err]) } } diff --git a/database/rpcdb/db_server.go b/database/rpcdb/db_server.go index 6bcbd4e0276b..8a07a672d070 100644 --- a/database/rpcdb/db_server.go +++ b/database/rpcdb/db_server.go @@ -50,8 +50,8 @@ func (db *DatabaseServer) Has(_ context.Context, req *rpcdbpb.HasRequest) (*rpcd has, err := db.db.Has(req.Key) return &rpcdbpb.HasResponse{ Has: has, - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // Get delegates the Get call to the managed database and returns the result @@ -59,34 +59,34 @@ func (db *DatabaseServer) Get(_ context.Context, req *rpcdbpb.GetRequest) (*rpcd value, err := db.db.Get(req.Key) return &rpcdbpb.GetResponse{ Value: value, - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // Put delegates the Put call to the managed database and returns the result func (db *DatabaseServer) Put(_ context.Context, req *rpcdbpb.PutRequest) (*rpcdbpb.PutResponse, error) { err := db.db.Put(req.Key, req.Value) - return &rpcdbpb.PutResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.PutResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Delete delegates the Delete call to the managed database and returns the // result func (db *DatabaseServer) Delete(_ context.Context, req *rpcdbpb.DeleteRequest) (*rpcdbpb.DeleteResponse, error) { err := db.db.Delete(req.Key) - return &rpcdbpb.DeleteResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.DeleteResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Compact delegates the Compact call to the managed database and returns the // result func (db *DatabaseServer) Compact(_ context.Context, req *rpcdbpb.CompactRequest) (*rpcdbpb.CompactResponse, error) { err := db.db.Compact(req.Start, req.Limit) - return &rpcdbpb.CompactResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.CompactResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // Close delegates the Close call to the managed database and returns the result func (db *DatabaseServer) Close(context.Context, *rpcdbpb.CloseRequest) (*rpcdbpb.CloseResponse, error) { err := db.db.Close() - return &rpcdbpb.CloseResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.CloseResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // HealthCheck performs a heath check against the underlying database. @@ -109,22 +109,22 @@ func (db *DatabaseServer) WriteBatch(_ context.Context, req *rpcdbpb.WriteBatchR for _, put := range req.Puts { if err := batch.Put(put.Key, put.Value); err != nil { return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } } for _, del := range req.Deletes { if err := batch.Delete(del.Key); err != nil { return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } } err := batch.Write() return &rpcdbpb.WriteBatchResponse{ - Err: errorToErrEnum[err], - }, errorToRPCError(err) + Err: ErrorToErrEnum[err], + }, ErrorToRPCError(err) } // NewIteratorWithStartAndPrefix allocates an iterator and returns the iterator @@ -177,7 +177,7 @@ func (db *DatabaseServer) IteratorError(_ context.Context, req *rpcdbpb.Iterator return nil, errUnknownIterator } err := it.Error() - return &rpcdbpb.IteratorErrorResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.IteratorErrorResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } // IteratorRelease attempts to release the resources allocated to an iterator @@ -193,5 +193,5 @@ func (db *DatabaseServer) IteratorRelease(_ context.Context, req *rpcdbpb.Iterat err := it.Error() it.Release() - return &rpcdbpb.IteratorReleaseResponse{Err: errorToErrEnum[err]}, errorToRPCError(err) + return &rpcdbpb.IteratorReleaseResponse{Err: ErrorToErrEnum[err]}, ErrorToRPCError(err) } diff --git a/database/rpcdb/db_test.go b/database/rpcdb/db_test.go index 99323f8bd8c1..cc0cca3694b0 100644 --- a/database/rpcdb/db_test.go +++ b/database/rpcdb/db_test.go @@ -5,6 +5,7 @@ package rpcdb import ( "context" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -18,9 +19,8 @@ import ( ) type testDatabase struct { - client *DatabaseClient - server *memdb.Database - closeFn func() + client *DatabaseClient + server *memdb.Database } func setupDB(t testing.TB) *testDatabase { @@ -44,51 +44,48 @@ func setupDB(t testing.TB) *testDatabase { require.NoError(err) db.client = NewClient(rpcdbpb.NewDatabaseClient(conn)) - db.closeFn = func() { + + t.Cleanup(func() { serverCloser.Stop() _ = conn.Close() _ = listener.Close() - } + }) + return db } func TestInterface(t *testing.T) { - for _, test := range database.Tests { - db := setupDB(t) - test(t, db.client) - - db.closeFn() + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + db := setupDB(t) + test(t, db.client) + }) } } func FuzzKeyValue(f *testing.F) { db := setupDB(f) database.FuzzKeyValue(f, db.client) - - db.closeFn() } func FuzzNewIteratorWithPrefix(f *testing.F) { db := setupDB(f) database.FuzzNewIteratorWithPrefix(f, db.client) - - db.closeFn() } func FuzzNewIteratorWithStartAndPrefix(f *testing.F) { db := setupDB(f) database.FuzzNewIteratorWithStartAndPrefix(f, db.client) - - db.closeFn() } func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - db := setupDB(b) - bench(b, db.client, "rpcdb", keys, values) - db.closeFn() + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("rpcdb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + db := setupDB(b) + bench(b, db.client, keys, values) + }) } } } diff --git a/database/rpcdb/errors.go b/database/rpcdb/errors.go index 2cd759b6d612..52788cc0a42a 100644 --- a/database/rpcdb/errors.go +++ b/database/rpcdb/errors.go @@ -10,18 +10,18 @@ import ( ) var ( - errEnumToError = map[rpcdbpb.Error]error{ + ErrEnumToError = map[rpcdbpb.Error]error{ rpcdbpb.Error_ERROR_CLOSED: database.ErrClosed, rpcdbpb.Error_ERROR_NOT_FOUND: database.ErrNotFound, } - errorToErrEnum = map[error]rpcdbpb.Error{ + ErrorToErrEnum = map[error]rpcdbpb.Error{ database.ErrClosed: rpcdbpb.Error_ERROR_CLOSED, database.ErrNotFound: rpcdbpb.Error_ERROR_NOT_FOUND, } ) -func errorToRPCError(err error) error { - if _, ok := errorToErrEnum[err]; ok { +func ErrorToRPCError(err error) error { + if _, ok := ErrorToErrEnum[err]; ok { return nil } return err diff --git a/database/test_database.go b/database/test_database.go index 1ce32bb54257..792b038012cb 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -24,44 +24,44 @@ import ( ) // Tests is a list of all database tests -var Tests = []func(t *testing.T, db Database){ - TestSimpleKeyValue, - TestOverwriteKeyValue, - TestEmptyKey, - TestKeyEmptyValue, - TestSimpleKeyValueClosed, - TestNewBatchClosed, - TestBatchPut, - TestBatchDelete, - TestBatchReset, - TestBatchReuse, - TestBatchRewrite, - TestBatchReplay, - TestBatchReplayPropagateError, - TestBatchInner, - TestBatchLargeSize, - TestIteratorSnapshot, - TestIterator, - TestIteratorStart, - TestIteratorPrefix, - TestIteratorStartPrefix, - TestIteratorMemorySafety, - TestIteratorClosed, - TestIteratorError, - TestIteratorErrorAfterRelease, - TestCompactNoPanic, - TestMemorySafetyDatabase, - TestMemorySafetyBatch, - TestAtomicClear, - TestClear, - TestAtomicClearPrefix, - TestClearPrefix, - TestModifyValueAfterPut, - TestModifyValueAfterBatchPut, - TestModifyValueAfterBatchPutReplay, - TestConcurrentBatches, - TestManySmallConcurrentKVPairBatches, - TestPutGetEmpty, +var Tests = map[string]func(t *testing.T, db Database){ + "SimpleKeyValue": TestSimpleKeyValue, + "OverwriteKeyValue": TestOverwriteKeyValue, + "EmptyKey": TestEmptyKey, + "KeyEmptyValue": TestKeyEmptyValue, + "SimpleKeyValueClosed": TestSimpleKeyValueClosed, + "NewBatchClosed": TestNewBatchClosed, + "BatchPut": TestBatchPut, + "BatchDelete": TestBatchDelete, + "BatchReset": TestBatchReset, + "BatchReuse": TestBatchReuse, + "BatchRewrite": TestBatchRewrite, + "BatchReplay": TestBatchReplay, + "BatchReplayPropagateError": TestBatchReplayPropagateError, + "BatchInner": TestBatchInner, + "BatchLargeSize": TestBatchLargeSize, + "IteratorSnapshot": TestIteratorSnapshot, + "Iterator": TestIterator, + "IteratorStart": TestIteratorStart, + "IteratorPrefix": TestIteratorPrefix, + "IteratorStartPrefix": TestIteratorStartPrefix, + "IteratorMemorySafety": TestIteratorMemorySafety, + "IteratorClosed": TestIteratorClosed, + "IteratorError": TestIteratorError, + "IteratorErrorAfterRelease": TestIteratorErrorAfterRelease, + "CompactNoPanic": TestCompactNoPanic, + "MemorySafetyDatabase": TestMemorySafetyDatabase, + "MemorySafetyBatch": TestMemorySafetyBatch, + "AtomicClear": TestAtomicClear, + "Clear": TestClear, + "AtomicClearPrefix": TestAtomicClearPrefix, + "ClearPrefix": TestClearPrefix, + "ModifyValueAfterPut": TestModifyValueAfterPut, + "ModifyValueAfterBatchPut": TestModifyValueAfterBatchPut, + "ModifyValueAfterBatchPutReplay": TestModifyValueAfterBatchPutReplay, + "ConcurrentBatches": TestConcurrentBatches, + "ManySmallConcurrentKVPairBatches": TestManySmallConcurrentKVPairBatches, + "PutGetEmpty": TestPutGetEmpty, } // TestSimpleKeyValue tests to make sure that simple Put + Get + Delete + Has diff --git a/database/versiondb/db_test.go b/database/versiondb/db_test.go index c3093a9ee843..0ff801dfe0dd 100644 --- a/database/versiondb/db_test.go +++ b/database/versiondb/db_test.go @@ -4,6 +4,7 @@ package versiondb import ( + "fmt" "testing" "github.com/stretchr/testify/require" @@ -13,9 +14,11 @@ import ( ) func TestInterface(t *testing.T) { - for _, test := range database.Tests { - baseDB := memdb.New() - test(t, New(baseDB)) + for name, test := range database.Tests { + t.Run(name, func(t *testing.T) { + baseDB := memdb.New() + test(t, New(baseDB)) + }) } } @@ -299,11 +302,13 @@ func TestSetDatabaseClosed(t *testing.T) { func BenchmarkInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) - for _, bench := range database.Benchmarks { - baseDB := memdb.New() - db := New(baseDB) - bench(b, db, "versiondb", keys, values) - _ = db.Close() + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("versiondb_%d_pairs_%d_keys_%d_values_%s", size[0], size[1], size[2], name), func(b *testing.B) { + baseDB := memdb.New() + db := New(baseDB) + bench(b, db, keys, values) + _ = db.Close() + }) } } } diff --git a/go.mod b/go.mod index c4b4a6d248d8..fa94da112281 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/DataDog/zstd v1.5.2 github.com/Microsoft/go-winio v0.5.2 github.com/NYTimes/gziphandler v1.1.1 - github.com/ava-labs/coreth v0.12.10-rc.5 + github.com/ava-labs/coreth v0.12.11-rc.2 github.com/ava-labs/ledger-avalanche/go v0.0.0-20231102202641-ae2ebdaeac34 github.com/btcsuite/btcd/btcutil v1.1.3 github.com/cockroachdb/pebble v0.0.0-20230209160836-829675f94811 @@ -162,4 +162,4 @@ require ( replace github.com/ava-labs/avalanche-ledger-go => github.com/chain4travel/camino-ledger-go v0.0.13-c4t -replace github.com/ava-labs/coreth => github.com/chain4travel/caminoethvm v1.1.18-rc0 +replace github.com/ava-labs/coreth => github.com/chain4travel/caminoethvm v1.1.19-rc0 diff --git a/go.sum b/go.sum index 60f05f305b94..540f2ca9fcfd 100644 --- a/go.sum +++ b/go.sum @@ -106,8 +106,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chain4travel/caminoethvm v1.1.18-rc0 h1:pzYPYEv40IwbR8yNbGCn+Vms4Rh8iR5Y1I8VaeiZiAQ= -github.com/chain4travel/caminoethvm v1.1.18-rc0/go.mod h1:4lQHHwvaN5o1QzbcpqTI3qbVBulByo112welHnu2aEs= +github.com/chain4travel/caminoethvm v1.1.19-rc0 h1:HfthNcZLyL9HS2f2Sv529lCvdmlzY6hThRnCtzcXRN0= +github.com/chain4travel/caminoethvm v1.1.19-rc0/go.mod h1:F2be/crCphktEOCKfR4P7r0rV0fppOsFMj0mR3kvVqQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/logex v1.2.0/go.mod h1:9+9sk7u7pGNWYMkh0hdiL++6OeibzJccyQU4p4MedaY= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= diff --git a/message/creator.go b/message/creator.go index a5711375c331..8040bccb1861 100644 --- a/message/creator.go +++ b/message/creator.go @@ -4,13 +4,13 @@ package message import ( - "fmt" "time" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/utils/compression" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/metric" ) var _ Creator = (*creator)(nil) @@ -32,7 +32,7 @@ func NewCreator( compressionType compression.Type, maxMessageTimeout time.Duration, ) (Creator, error) { - namespace := fmt.Sprintf("%s_codec", parentNamespace) + namespace := metric.AppendNamespace(parentNamespace, "codec") builder, err := newMsgBuilder( log, namespace, diff --git a/network/ip_tracker.go b/network/ip_tracker.go index 60502dbbf4df..758c53494580 100644 --- a/network/ip_tracker.go +++ b/network/ip_tracker.go @@ -21,6 +21,7 @@ import ( "github.com/ava-labs/avalanchego/utils/ips" "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/utils/math" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/sampler" "github.com/ava-labs/avalanchego/utils/set" ) @@ -42,6 +43,11 @@ func newIPTracker( namespace string, registerer prometheus.Registerer, ) (*ipTracker, error) { + bloomNamespace := metric.AppendNamespace(namespace, "ip_bloom") + bloomMetrics, err := bloom.NewMetrics(bloomNamespace, registerer) + if err != nil { + return nil, err + } tracker := &ipTracker{ log: log, numValidatorIPs: prometheus.NewGauge(prometheus.GaugeOpts{ @@ -54,44 +60,15 @@ func newIPTracker( Name: "gossipable_ips", Help: "Number of IPs this node is willing to gossip", }), - bloomCount: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "ip_bloom_count", - Help: "Number of IP entries added to the bloom", - }), - bloomNumHashes: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "ip_bloom_hashes", - Help: "Number of hashes in the IP bloom", - }), - bloomNumEntries: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "ip_bloom_entries", - Help: "Number of entry slots in the IP bloom", - }), - bloomMaxCount: prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: namespace, - Name: "ip_bloom_max_count", - Help: "Maximum number of IP entries that can be added to the bloom before resetting", - }), - bloomResetCount: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "ip_bloom_reset_count", - Help: "Number times the IP bloom has been reset", - }), + bloomMetrics: bloomMetrics, connected: make(map[ids.NodeID]*ips.ClaimedIPPort), mostRecentValidatorIPs: make(map[ids.NodeID]*ips.ClaimedIPPort), gossipableIndicies: make(map[ids.NodeID]int), bloomAdditions: make(map[ids.NodeID]int), } - err := utils.Err( + err = utils.Err( registerer.Register(tracker.numValidatorIPs), registerer.Register(tracker.numGossipable), - registerer.Register(tracker.bloomCount), - registerer.Register(tracker.bloomNumHashes), - registerer.Register(tracker.bloomNumEntries), - registerer.Register(tracker.bloomMaxCount), - registerer.Register(tracker.bloomResetCount), ) if err != nil { return nil, err @@ -103,11 +80,7 @@ type ipTracker struct { log logging.Logger numValidatorIPs prometheus.Gauge numGossipable prometheus.Gauge - bloomCount prometheus.Gauge - bloomNumHashes prometheus.Gauge - bloomNumEntries prometheus.Gauge - bloomMaxCount prometheus.Gauge - bloomResetCount prometheus.Counter + bloomMetrics *bloom.Metrics lock sync.RWMutex // Manually tracked nodes are always treated like validators @@ -315,7 +288,7 @@ func (i *ipTracker) updateMostRecentValidatorIP(ip *ips.ClaimedIPPort) { i.bloomAdditions[ip.NodeID] = oldCount + 1 bloom.Add(i.bloom, ip.GossipID[:], i.bloomSalt) - i.bloomCount.Inc() + i.bloomMetrics.Count.Inc() } func (i *ipTracker) addGossipableIP(ip *ips.ClaimedIPPort) { @@ -428,10 +401,6 @@ func (i *ipTracker) resetBloom() error { bloom.Add(newFilter, ip.GossipID[:], newSalt) i.bloomAdditions[nodeID] = 1 } - i.bloomCount.Set(float64(len(i.mostRecentValidatorIPs))) - i.bloomNumHashes.Set(float64(numHashes)) - i.bloomNumEntries.Set(float64(numEntries)) - i.bloomMaxCount.Set(float64(i.maxBloomCount)) - i.bloomResetCount.Inc() + i.bloomMetrics.Reset(newFilter, i.maxBloomCount) return nil } diff --git a/network/ip_tracker_test.go b/network/ip_tracker_test.go index 921f2f33d655..052797d749dd 100644 --- a/network/ip_tracker_test.go +++ b/network/ip_tracker_test.go @@ -48,8 +48,8 @@ func requireMetricsConsistent(t *testing.T, tracker *ipTracker) { require := require.New(t) require.Equal(float64(len(tracker.mostRecentValidatorIPs)), testutil.ToFloat64(tracker.numValidatorIPs)) require.Equal(float64(len(tracker.gossipableIPs)), testutil.ToFloat64(tracker.numGossipable)) - require.Equal(float64(tracker.bloom.Count()), testutil.ToFloat64(tracker.bloomCount)) - require.Equal(float64(tracker.maxBloomCount), testutil.ToFloat64(tracker.bloomMaxCount)) + require.Equal(float64(tracker.bloom.Count()), testutil.ToFloat64(tracker.bloomMetrics.Count)) + require.Equal(float64(tracker.maxBloomCount), testutil.ToFloat64(tracker.bloomMetrics.MaxCount)) } func TestIPTracker_ManuallyTrack(t *testing.T) { diff --git a/network/network.go b/network/network.go index e4dca4b17825..cf8f0b3ac8cb 100644 --- a/network/network.go +++ b/network/network.go @@ -635,7 +635,8 @@ func (n *network) track(ip *ips.ClaimedIPPort) error { }, Signature: ip.Signature, } - if err := signedIP.Verify(ip.Cert); err != nil { + maxTimestamp := n.peerConfig.Clock.Time().Add(n.peerConfig.MaxClockDifference) + if err := signedIP.Verify(ip.Cert, maxTimestamp); err != nil { return err } diff --git a/network/p2p/client.go b/network/p2p/client.go index 6107f8abb7d9..b506baf9c630 100644 --- a/network/p2p/client.go +++ b/network/p2p/client.go @@ -75,7 +75,7 @@ func (c *Client) AppRequest( c.router.lock.Lock() defer c.router.lock.Unlock() - appRequestBytes = c.prefixMessage(appRequestBytes) + appRequestBytes = PrefixMessage(c.handlerPrefix, appRequestBytes) for nodeID := range nodeIDs { requestID := c.router.requestID if _, ok := c.router.pendingAppRequests[requestID]; ok { @@ -112,7 +112,7 @@ func (c *Client) AppGossip( ) error { return c.sender.SendAppGossip( ctx, - c.prefixMessage(appGossipBytes), + PrefixMessage(c.handlerPrefix, appGossipBytes), ) } @@ -125,7 +125,7 @@ func (c *Client) AppGossipSpecific( return c.sender.SendAppGossipSpecific( ctx, nodeIDs, - c.prefixMessage(appGossipBytes), + PrefixMessage(c.handlerPrefix, appGossipBytes), ) } @@ -153,7 +153,7 @@ func (c *Client) CrossChainAppRequest( ctx, chainID, requestID, - c.prefixMessage(appRequestBytes), + PrefixMessage(c.handlerPrefix, appRequestBytes), ); err != nil { return err } @@ -167,15 +167,14 @@ func (c *Client) CrossChainAppRequest( return nil } -// prefixMessage prefixes the original message with the handler identifier -// corresponding to this client. +// PrefixMessage prefixes the original message with the protocol identifier. // // Only gossip and request messages need to be prefixed. // Response messages don't need to be prefixed because request ids are tracked // which map to the expected response handler. -func (c *Client) prefixMessage(src []byte) []byte { - messageBytes := make([]byte, len(c.handlerPrefix)+len(src)) - copy(messageBytes, c.handlerPrefix) - copy(messageBytes[len(c.handlerPrefix):], src) +func PrefixMessage(prefix, msg []byte) []byte { + messageBytes := make([]byte, len(prefix)+len(msg)) + copy(messageBytes, prefix) + copy(messageBytes[len(prefix):], msg) return messageBytes } diff --git a/network/p2p/gossip/bloom.go b/network/p2p/gossip/bloom.go index 5c477826c9b6..14d7ece6db85 100644 --- a/network/p2p/gossip/bloom.go +++ b/network/p2p/gossip/bloom.go @@ -6,6 +6,8 @@ package gossip import ( "crypto/rand" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/math" @@ -18,29 +20,30 @@ import ( // Invariant: The returned bloom filter is not safe to reset concurrently with // other operations. However, it is otherwise safe to access concurrently. func NewBloomFilter( + registerer prometheus.Registerer, + namespace string, minTargetElements int, targetFalsePositiveProbability, resetFalsePositiveProbability float64, ) (*BloomFilter, error) { - numHashes, numEntries := bloom.OptimalParameters( - minTargetElements, - targetFalsePositiveProbability, - ) - b, err := bloom.New(numHashes, numEntries) + metrics, err := bloom.NewMetrics(namespace, registerer) if err != nil { return nil, err } - - salt, err := randomSalt() - return &BloomFilter{ + filter := &BloomFilter{ minTargetElements: minTargetElements, targetFalsePositiveProbability: targetFalsePositiveProbability, resetFalsePositiveProbability: resetFalsePositiveProbability, - maxCount: bloom.EstimateCount(numHashes, numEntries, resetFalsePositiveProbability), - bloom: b, - salt: salt, - }, err + metrics: metrics, + } + err = resetBloomFilter( + filter, + minTargetElements, + targetFalsePositiveProbability, + resetFalsePositiveProbability, + ) + return filter, err } type BloomFilter struct { @@ -48,6 +51,8 @@ type BloomFilter struct { targetFalsePositiveProbability float64 resetFalsePositiveProbability float64 + metrics *bloom.Metrics + maxCount int bloom *bloom.Filter // salt is provided to eventually unblock collisions in Bloom. It's possible @@ -59,6 +64,7 @@ type BloomFilter struct { func (b *BloomFilter) Add(gossipable Gossipable) { h := gossipable.GossipID() bloom.Add(b.bloom, h[:], b.salt[:]) + b.metrics.Count.Inc() } func (b *BloomFilter) Has(gossipable Gossipable) bool { @@ -88,26 +94,39 @@ func ResetBloomFilterIfNeeded( return false, nil } - numHashes, numEntries := bloom.OptimalParameters( - math.Max(bloomFilter.minTargetElements, targetElements), + targetElements = math.Max(bloomFilter.minTargetElements, targetElements) + err := resetBloomFilter( + bloomFilter, + targetElements, bloomFilter.targetFalsePositiveProbability, + bloomFilter.resetFalsePositiveProbability, + ) + return err == nil, err +} + +func resetBloomFilter( + bloomFilter *BloomFilter, + targetElements int, + targetFalsePositiveProbability, + resetFalsePositiveProbability float64, +) error { + numHashes, numEntries := bloom.OptimalParameters( + targetElements, + targetFalsePositiveProbability, ) newBloom, err := bloom.New(numHashes, numEntries) if err != nil { - return false, err + return err } - salt, err := randomSalt() - if err != nil { - return false, err + var newSalt ids.ID + if _, err := rand.Read(newSalt[:]); err != nil { + return err } - bloomFilter.maxCount = bloom.EstimateCount(numHashes, numEntries, bloomFilter.resetFalsePositiveProbability) + + bloomFilter.maxCount = bloom.EstimateCount(numHashes, numEntries, resetFalsePositiveProbability) bloomFilter.bloom = newBloom - bloomFilter.salt = salt - return true, nil -} + bloomFilter.salt = newSalt -func randomSalt() (ids.ID, error) { - salt := ids.ID{} - _, err := rand.Read(salt[:]) - return salt, err + bloomFilter.metrics.Reset(newBloom, bloomFilter.maxCount) + return nil } diff --git a/network/p2p/gossip/bloom_test.go b/network/p2p/gossip/bloom_test.go index 128ea97824c0..00f75165b467 100644 --- a/network/p2p/gossip/bloom_test.go +++ b/network/p2p/gossip/bloom_test.go @@ -10,8 +10,10 @@ import ( "golang.org/x/exp/slices" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/testutil" + "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/bloom" ) func TestBloomFilterRefresh(t *testing.T) { @@ -20,7 +22,7 @@ func TestBloomFilterRefresh(t *testing.T) { minTargetElements int targetFalsePositiveProbability float64 resetFalsePositiveProbability float64 - reset bool + resetCount uint64 add []*testTx expected []*testTx }{ @@ -29,7 +31,7 @@ func TestBloomFilterRefresh(t *testing.T) { minTargetElements: 1, targetFalsePositiveProbability: 0.01, resetFalsePositiveProbability: 1, - reset: false, // maxCount = 9223372036854775807 + resetCount: 0, // maxCount = 9223372036854775807 add: []*testTx{ {id: ids.ID{0}}, {id: ids.ID{1}}, @@ -46,7 +48,7 @@ func TestBloomFilterRefresh(t *testing.T) { minTargetElements: 1, targetFalsePositiveProbability: 0.01, resetFalsePositiveProbability: 0.0000000000000001, // maxCount = 1 - reset: true, + resetCount: 1, add: []*testTx{ {id: ids.ID{0}}, {id: ids.ID{1}}, @@ -56,35 +58,41 @@ func TestBloomFilterRefresh(t *testing.T) { {id: ids.ID{2}}, }, }, + { + name: "multiple refresh", + minTargetElements: 1, + targetFalsePositiveProbability: 0.01, + resetFalsePositiveProbability: 0.0000000000000001, // maxCount = 1 + resetCount: 2, + add: []*testTx{ + {id: ids.ID{0}}, + {id: ids.ID{1}}, + {id: ids.ID{2}}, + {id: ids.ID{3}}, + {id: ids.ID{4}}, + }, + expected: []*testTx{ + {id: ids.ID{4}}, + }, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - numHashes, numEntries := bloom.OptimalParameters( - tt.minTargetElements, - tt.targetFalsePositiveProbability, - ) - b, err := bloom.New(numHashes, numEntries) + bloom, err := NewBloomFilter(prometheus.NewRegistry(), "", tt.minTargetElements, tt.targetFalsePositiveProbability, tt.resetFalsePositiveProbability) require.NoError(err) - bloom := BloomFilter{ - bloom: b, - maxCount: bloom.EstimateCount(numHashes, numEntries, tt.resetFalsePositiveProbability), - minTargetElements: tt.minTargetElements, - targetFalsePositiveProbability: tt.targetFalsePositiveProbability, - resetFalsePositiveProbability: tt.resetFalsePositiveProbability, - } - var didReset bool + var resetCount uint64 for _, item := range tt.add { bloomBytes, saltBytes := bloom.Marshal() initialBloomBytes := slices.Clone(bloomBytes) initialSaltBytes := slices.Clone(saltBytes) - reset, err := ResetBloomFilterIfNeeded(&bloom, len(tt.add)) + reset, err := ResetBloomFilterIfNeeded(bloom, len(tt.add)) require.NoError(err) if reset { - didReset = reset + resetCount++ } bloom.Add(item) @@ -92,7 +100,8 @@ func TestBloomFilterRefresh(t *testing.T) { require.Equal(initialSaltBytes, saltBytes) } - require.Equal(tt.reset, didReset) + require.Equal(tt.resetCount, resetCount) + require.Equal(float64(tt.resetCount+1), testutil.ToFloat64(bloom.metrics.ResetCount)) for _, expected := range tt.expected { require.True(bloom.Has(expected)) } diff --git a/network/p2p/gossip/gossip.go b/network/p2p/gossip/gossip.go index 97e90e6e99af..ab90e593b5a3 100644 --- a/network/p2p/gossip/gossip.go +++ b/network/p2p/gossip/gossip.go @@ -14,11 +14,8 @@ import ( "go.uber.org/zap" - "google.golang.org/protobuf/proto" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" - "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/buffer" "github.com/ava-labs/avalanchego/utils/logging" @@ -151,12 +148,7 @@ type PullGossiper[T Gossipable] struct { } func (p *PullGossiper[_]) Gossip(ctx context.Context) error { - bloom, salt := p.set.GetFilter() - request := &sdk.PullGossipRequest{ - Filter: bloom, - Salt: salt, - } - msgBytes, err := proto.Marshal(request) + msgBytes, err := MarshalAppRequest(p.set.GetFilter()) if err != nil { return err } @@ -186,14 +178,14 @@ func (p *PullGossiper[_]) handleResponse( return } - response := &sdk.PullGossipResponse{} - if err := proto.Unmarshal(responseBytes, response); err != nil { + gossip, err := ParseAppResponse(responseBytes) + if err != nil { p.log.Debug("failed to unmarshal gossip response", zap.Error(err)) return } receivedBytes := 0 - for _, bytes := range response.Gossip { + for _, bytes := range gossip { receivedBytes += len(bytes) gossipable, err := p.marshaller.UnmarshalGossip(bytes) @@ -235,7 +227,7 @@ func (p *PullGossiper[_]) handleResponse( return } - receivedCountMetric.Add(float64(len(response.Gossip))) + receivedCountMetric.Add(float64(len(gossip))) receivedBytesMetric.Add(float64(receivedBytes)) } @@ -270,11 +262,8 @@ func (p *PushGossiper[T]) Gossip(ctx context.Context) error { return nil } - msg := &sdk.PushGossip{ - Gossip: make([][]byte, 0, p.pending.Len()), - } - sentBytes := 0 + gossip := make([][]byte, 0, p.pending.Len()) for sentBytes < p.targetGossipSize { gossipable, ok := p.pending.PeekLeft() if !ok { @@ -288,12 +277,12 @@ func (p *PushGossiper[T]) Gossip(ctx context.Context) error { return err } - msg.Gossip = append(msg.Gossip, bytes) + gossip = append(gossip, bytes) sentBytes += len(bytes) p.pending.PopLeft() } - msgBytes, err := proto.Marshal(msg) + msgBytes, err := MarshalAppGossip(gossip) if err != nil { return err } @@ -308,7 +297,7 @@ func (p *PushGossiper[T]) Gossip(ctx context.Context) error { return fmt.Errorf("failed to get sent bytes metric: %w", err) } - sentCountMetric.Add(float64(len(msg.Gossip))) + sentCountMetric.Add(float64(len(gossip))) sentBytesMetric.Add(float64(sentBytes)) return p.client.AppGossip(ctx, msgBytes) diff --git a/network/p2p/gossip/gossip_test.go b/network/p2p/gossip/gossip_test.go index 1c0941bd7eba..a58d98f8fe58 100644 --- a/network/p2p/gossip/gossip_test.go +++ b/network/p2p/gossip/gossip_test.go @@ -111,7 +111,7 @@ func TestGossiperGossip(t *testing.T) { responseNetwork, err := p2p.NewNetwork(logging.NoLog{}, responseSender, prometheus.NewRegistry(), "") require.NoError(err) - responseBloom, err := NewBloomFilter(1000, 0.01, 0.05) + responseBloom, err := NewBloomFilter(prometheus.NewRegistry(), "", 1000, 0.01, 0.05) require.NoError(err) responseSet := &testSet{ txs: make(map[ids.ID]*testTx), @@ -143,7 +143,7 @@ func TestGossiperGossip(t *testing.T) { require.NoError(err) require.NoError(requestNetwork.Connected(context.Background(), ids.EmptyNodeID, nil)) - bloom, err := NewBloomFilter(1000, 0.01, 0.05) + bloom, err := NewBloomFilter(prometheus.NewRegistry(), "", 1000, 0.01, 0.05) require.NoError(err) requestSet := &testSet{ txs: make(map[ids.ID]*testTx), @@ -365,7 +365,7 @@ func TestPushGossipE2E(t *testing.T) { knownTx := &testTx{id: ids.GenerateTestID()} log := logging.NoLog{} - bloom, err := NewBloomFilter(100, 0.01, 0.05) + bloom, err := NewBloomFilter(prometheus.NewRegistry(), "", 100, 0.01, 0.05) require.NoError(err) set := &testSet{ txs: make(map[ids.ID]*testTx), diff --git a/network/p2p/gossip/handler.go b/network/p2p/gossip/handler.go index 15ef1fe16684..38e883926366 100644 --- a/network/p2p/gossip/handler.go +++ b/network/p2p/gossip/handler.go @@ -10,11 +10,8 @@ import ( "go.uber.org/zap" - "google.golang.org/protobuf/proto" - "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" - "github.com/ava-labs/avalanchego/proto/pb/sdk" "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/logging" ) @@ -51,17 +48,7 @@ type Handler[T Gossipable] struct { } func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, requestBytes []byte) ([]byte, error) { - request := &sdk.PullGossipRequest{} - if err := proto.Unmarshal(requestBytes, request); err != nil { - return nil, err - } - - salt, err := ids.ToID(request.Salt) - if err != nil { - return nil, err - } - - filter, err := bloom.Parse(request.Filter) + filter, salt, err := ParseAppRequest(requestBytes) if err != nil { return nil, err } @@ -94,10 +81,6 @@ func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, req return nil, err } - response := &sdk.PullGossipResponse{ - Gossip: gossipBytes, - } - sentCountMetric, err := h.metrics.sentCount.GetMetricWith(pullLabels) if err != nil { return nil, fmt.Errorf("failed to get sent count metric: %w", err) @@ -108,21 +91,21 @@ func (h Handler[T]) AppRequest(_ context.Context, _ ids.NodeID, _ time.Time, req return nil, fmt.Errorf("failed to get sent bytes metric: %w", err) } - sentCountMetric.Add(float64(len(response.Gossip))) + sentCountMetric.Add(float64(len(gossipBytes))) sentBytesMetric.Add(float64(responseSize)) - return proto.Marshal(response) + return MarshalAppResponse(gossipBytes) } func (h Handler[_]) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipBytes []byte) { - msg := &sdk.PushGossip{} - if err := proto.Unmarshal(gossipBytes, msg); err != nil { + gossip, err := ParseAppGossip(gossipBytes) + if err != nil { h.log.Debug("failed to unmarshal gossip", zap.Error(err)) return } receivedBytes := 0 - for _, bytes := range msg.Gossip { + for _, bytes := range gossip { receivedBytes += len(bytes) gossipable, err := h.marshaller.UnmarshalGossip(bytes) if err != nil { @@ -164,6 +147,6 @@ func (h Handler[_]) AppGossip(ctx context.Context, nodeID ids.NodeID, gossipByte return } - receivedCountMetric.Add(float64(len(msg.Gossip))) + receivedCountMetric.Add(float64(len(gossip))) receivedBytesMetric.Add(float64(receivedBytes)) } diff --git a/network/p2p/gossip/message.go b/network/p2p/gossip/message.go new file mode 100644 index 000000000000..47e6784e43d8 --- /dev/null +++ b/network/p2p/gossip/message.go @@ -0,0 +1,59 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gossip + +import ( + "google.golang.org/protobuf/proto" + + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/proto/pb/sdk" + "github.com/ava-labs/avalanchego/utils/bloom" +) + +func MarshalAppRequest(filter, salt []byte) ([]byte, error) { + request := &sdk.PullGossipRequest{ + Filter: filter, + Salt: salt, + } + return proto.Marshal(request) +} + +func ParseAppRequest(bytes []byte) (*bloom.ReadFilter, ids.ID, error) { + request := &sdk.PullGossipRequest{} + if err := proto.Unmarshal(bytes, request); err != nil { + return nil, ids.Empty, err + } + + salt, err := ids.ToID(request.Salt) + if err != nil { + return nil, ids.Empty, err + } + + filter, err := bloom.Parse(request.Filter) + return filter, salt, err +} + +func MarshalAppResponse(gossip [][]byte) ([]byte, error) { + return proto.Marshal(&sdk.PullGossipResponse{ + Gossip: gossip, + }) +} + +func ParseAppResponse(bytes []byte) ([][]byte, error) { + response := &sdk.PullGossipResponse{} + err := proto.Unmarshal(bytes, response) + return response.Gossip, err +} + +func MarshalAppGossip(gossip [][]byte) ([]byte, error) { + return proto.Marshal(&sdk.PushGossip{ + Gossip: gossip, + }) +} + +func ParseAppGossip(bytes []byte) ([][]byte, error) { + msg := &sdk.PushGossip{} + err := proto.Unmarshal(bytes, msg) + return msg.Gossip, err +} diff --git a/network/p2p/network.go b/network/p2p/network.go index 604d06db617f..a98579c44183 100644 --- a/network/p2p/network.go +++ b/network/p2p/network.go @@ -217,7 +217,7 @@ func (n *Network) NewClient(handlerID uint64, options ...ClientOption) *Client { client := &Client{ handlerID: handlerID, handlerIDStr: strconv.FormatUint(handlerID, 10), - handlerPrefix: binary.AppendUvarint(nil, handlerID), + handlerPrefix: ProtocolPrefix(handlerID), sender: n.sender, router: n.router, options: &clientOptions{ @@ -281,3 +281,7 @@ type peerSampler struct { func (p peerSampler) Sample(_ context.Context, limit int) []ids.NodeID { return p.peers.Sample(limit) } + +func ProtocolPrefix(handlerID uint64) []byte { + return binary.AppendUvarint(nil, handlerID) +} diff --git a/network/p2p/network_test.go b/network/p2p/network_test.go index 3bf902c38035..40dc0ba54056 100644 --- a/network/p2p/network_test.go +++ b/network/p2p/network_test.go @@ -614,7 +614,7 @@ func TestNodeSamplerClientOption(t *testing.T) { close(done) } - require.ErrorIs(tt.expectedErr, err) + require.ErrorIs(err, tt.expectedErr) <-done }) } diff --git a/network/p2p/router.go b/network/p2p/router.go index 82fdbf24fbc3..13a38abc56c5 100644 --- a/network/p2p/router.go +++ b/network/p2p/router.go @@ -399,18 +399,19 @@ func (r *router) CrossChainAppResponse(ctx context.Context, chainID ids.ID, requ // - A boolean indicating that parsing succeeded. // // Invariant: Assumes [r.lock] isn't held. -func (r *router) parse(msg []byte) ([]byte, *meteredHandler, string, bool) { - handlerID, bytesRead := binary.Uvarint(msg) - if bytesRead <= 0 { +func (r *router) parse(prefixedMsg []byte) ([]byte, *meteredHandler, string, bool) { + handlerID, msg, ok := ParseMessage(prefixedMsg) + if !ok { return nil, nil, "", false } + handlerStr := strconv.FormatUint(handlerID, 10) + r.lock.RLock() defer r.lock.RUnlock() - handlerStr := strconv.FormatUint(handlerID, 10) handler, ok := r.handlers[handlerID] - return msg[bytesRead:], handler, handlerStr, ok + return msg, handler, handlerStr, ok } // Invariant: Assumes [r.lock] isn't held. @@ -432,3 +433,17 @@ func (r *router) clearCrossChainAppRequest(requestID uint32) (pendingCrossChainA delete(r.pendingCrossChainAppRequests, requestID) return callback, ok } + +// Parse a gossip or request message. +// +// Returns: +// - The protocol ID. +// - The unprefixed protocol message. +// - A boolean indicating that parsing succeeded. +func ParseMessage(msg []byte) (uint64, []byte, bool) { + handlerID, bytesRead := binary.Uvarint(msg) + if bytesRead <= 0 { + return 0, nil, false + } + return handlerID, msg[bytesRead:], true +} diff --git a/network/peer/ip.go b/network/peer/ip.go index 0112374b9b80..590003c850d8 100644 --- a/network/peer/ip.go +++ b/network/peer/ip.go @@ -6,6 +6,9 @@ package peer import ( "crypto" "crypto/rand" + "errors" + "fmt" + "time" "github.com/ava-labs/avalanchego/staking" "github.com/ava-labs/avalanchego/utils/hashing" @@ -13,6 +16,11 @@ import ( "github.com/ava-labs/avalanchego/utils/wrappers" ) +var ( + errTimestampTooFarInFuture = errors.New("timestamp too far in the future") + errInvalidSignature = errors.New("invalid signature") +) + // UnsignedIP is used for a validator to claim an IP. The [Timestamp] is used to // ensure that the most updated IP claim is tracked by peers for a given // validator. @@ -49,10 +57,24 @@ type SignedIP struct { Signature []byte } -func (ip *SignedIP) Verify(cert *staking.Certificate) error { - return staking.CheckSignature( +// Returns nil if: +// * [ip.Timestamp] is not after [maxTimestamp]. +// * [ip.Signature] is a valid signature over [ip.UnsignedIP] from [cert]. +func (ip *SignedIP) Verify( + cert *staking.Certificate, + maxTimestamp time.Time, +) error { + maxUnixTimestamp := uint64(maxTimestamp.Unix()) + if ip.Timestamp > maxUnixTimestamp { + return fmt.Errorf("%w: timestamp %d > maxTimestamp %d", errTimestampTooFarInFuture, ip.Timestamp, maxUnixTimestamp) + } + + if err := staking.CheckSignature( cert, ip.UnsignedIP.bytes(), ip.Signature, - ) + ); err != nil { + return fmt.Errorf("%w: %w", errInvalidSignature, err) + } + return nil } diff --git a/network/peer/ip_test.go b/network/peer/ip_test.go new file mode 100644 index 000000000000..3b4854562ec5 --- /dev/null +++ b/network/peer/ip_test.go @@ -0,0 +1,110 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package peer + +import ( + "crypto" + "net" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/staking" + "github.com/ava-labs/avalanchego/utils/ips" +) + +func TestSignedIpVerify(t *testing.T) { + tlsCert1, err := staking.NewTLSCert() + require.NoError(t, err) + cert1, err := staking.CertificateFromX509(tlsCert1.Leaf) + require.NoError(t, err) + require.NoError(t, staking.ValidateCertificate(cert1)) + + tlsCert2, err := staking.NewTLSCert() + require.NoError(t, err) + cert2, err := staking.CertificateFromX509(tlsCert2.Leaf) + require.NoError(t, err) + require.NoError(t, staking.ValidateCertificate(cert2)) + + now := time.Now() + + type test struct { + name string + signer crypto.Signer + expectedCert *staking.Certificate + ip UnsignedIP + maxTimestamp time.Time + expectedErr error + } + + tests := []test{ + { + name: "valid (before max time)", + signer: tlsCert1.PrivateKey.(crypto.Signer), + expectedCert: cert1, + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()) - 1, + }, + maxTimestamp: now, + expectedErr: nil, + }, + { + name: "valid (at max time)", + signer: tlsCert1.PrivateKey.(crypto.Signer), + expectedCert: cert1, + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()), + }, + maxTimestamp: now, + expectedErr: nil, + }, + { + name: "timestamp too far ahead", + signer: tlsCert1.PrivateKey.(crypto.Signer), + expectedCert: cert1, + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()) + 1, + }, + maxTimestamp: now, + expectedErr: errTimestampTooFarInFuture, + }, + { + name: "sig from wrong cert", + signer: tlsCert1.PrivateKey.(crypto.Signer), + expectedCert: cert2, // note this isn't cert1 + ip: UnsignedIP{ + IPPort: ips.IPPort{ + IP: net.IPv4(1, 2, 3, 4), + Port: 1, + }, + Timestamp: uint64(now.Unix()), + }, + maxTimestamp: now, + expectedErr: errInvalidSignature, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + signedIP, err := tt.ip.Sign(tt.signer) + require.NoError(t, err) + + err = signedIP.Verify(tt.expectedCert, tt.maxTimestamp) + require.ErrorIs(t, err, tt.expectedErr) + }) + } +} diff --git a/network/peer/peer.go b/network/peer/peer.go index 255f13821f23..f5cebb613e00 100644 --- a/network/peer/peer.go +++ b/network/peer/peer.go @@ -906,8 +906,9 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { return } - myTime := p.Clock.Unix() - clockDifference := math.Abs(float64(msg.MyTime) - float64(myTime)) + myTime := p.Clock.Time() + myTimeUnix := uint64(myTime.Unix()) + clockDifference := math.Abs(float64(msg.MyTime) - float64(myTimeUnix)) p.Metrics.ClockSkew.Observe(clockDifference) @@ -916,13 +917,13 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { p.Log.Warn("beacon reports out of sync time", zap.Stringer("nodeID", p.id), zap.Uint64("peerTime", msg.MyTime), - zap.Uint64("myTime", myTime), + zap.Uint64("myTime", myTimeUnix), ) } else { p.Log.Debug("peer reports out of sync time", zap.Stringer("nodeID", p.id), zap.Uint64("peerTime", msg.MyTime), - zap.Uint64("myTime", myTime), + zap.Uint64("myTime", myTimeUnix), ) } p.StartClose() @@ -974,18 +975,6 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { return } - // Note that it is expected that the [ipSigningTime] can be in the past. We - // are just verifying that the claimed signing time isn't too far in the - // future here. - if float64(msg.IpSigningTime)-float64(myTime) > p.MaxClockDifference.Seconds() { - p.Log.Debug("peer attempting to connect with version timestamp too far in the future", - zap.Stringer("nodeID", p.id), - zap.Uint64("ipSigningTime", msg.IpSigningTime), - ) - p.StartClose() - return - } - // handle subnet IDs for _, subnetIDBytes := range msg.TrackedSubnets { subnetID, err := ids.ToID(subnetIDBytes) @@ -1089,11 +1078,24 @@ func (p *peer) handleHandshake(msg *p2p.Handshake) { }, Signature: msg.Sig, } - if err := p.ip.Verify(p.cert); err != nil { - p.Log.Debug("signature verification failed", - zap.Stringer("nodeID", p.id), - zap.Error(err), - ) + maxTimestamp := myTime.Add(p.MaxClockDifference) + if err := p.ip.Verify(p.cert, maxTimestamp); err != nil { + if _, ok := p.Beacons.GetValidator(constants.PrimaryNetworkID, p.id); ok { + p.Log.Warn("beacon has invalid signature or is out of sync", + zap.Stringer("nodeID", p.id), + zap.Uint64("peerTime", msg.MyTime), + zap.Uint64("myTime", myTimeUnix), + zap.Error(err), + ) + } else { + p.Log.Debug("peer has invalid signature or is out of sync", + zap.Stringer("nodeID", p.id), + zap.Uint64("peerTime", msg.MyTime), + zap.Uint64("myTime", myTimeUnix), + zap.Error(err), + ) + } + p.StartClose() return } diff --git a/network/throttling/inbound_msg_throttler.go b/network/throttling/inbound_msg_throttler.go index 86e20085466c..ea9167deca15 100644 --- a/network/throttling/inbound_msg_throttler.go +++ b/network/throttling/inbound_msg_throttler.go @@ -5,7 +5,6 @@ package throttling import ( "context" - "fmt" "github.com/prometheus/client_golang/prometheus" @@ -13,6 +12,7 @@ import ( "github.com/ava-labs/avalanchego/snow/networking/tracker" "github.com/ava-labs/avalanchego/snow/validators" "github.com/ava-labs/avalanchego/utils/logging" + "github.com/ava-labs/avalanchego/utils/metric" ) var _ InboundMsgThrottler = (*inboundMsgThrottler)(nil) @@ -90,7 +90,7 @@ func NewInboundMsgThrottler( return nil, err } cpuThrottler, err := NewSystemThrottler( - fmt.Sprintf("%s_cpu", namespace), + metric.AppendNamespace(namespace, "cpu"), registerer, throttlerConfig.CPUThrottlerConfig, resourceTracker.CPUTracker(), @@ -100,7 +100,7 @@ func NewInboundMsgThrottler( return nil, err } diskThrottler, err := NewSystemThrottler( - fmt.Sprintf("%s_disk", namespace), + metric.AppendNamespace(namespace, "disk"), registerer, throttlerConfig.DiskThrottlerConfig, resourceTracker.DiskTracker(), diff --git a/node/node.go b/node/node.go index a9fb7f9ed22d..36fae00687ab 100644 --- a/node/node.go +++ b/node/node.go @@ -88,15 +88,12 @@ import ( "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms" "github.com/ava-labs/avalanchego/vms/avm" - "github.com/ava-labs/avalanchego/vms/nftfx" "github.com/ava-labs/avalanchego/vms/platformvm" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/txs" - "github.com/ava-labs/avalanchego/vms/propertyfx" "github.com/ava-labs/avalanchego/vms/registry" "github.com/ava-labs/avalanchego/vms/rpcchainvm/runtime" - "github.com/ava-labs/avalanchego/vms/secp256k1fx" ipcsapi "github.com/ava-labs/avalanchego/api/ipcs" avmconfig "github.com/ava-labs/avalanchego/vms/avm/config" @@ -1244,9 +1241,6 @@ func (n *Node) initVMs() error { }, }), n.VMManager.RegisterFactory(context.TODO(), constants.EVMID, &coreth.Factory{}), - n.VMManager.RegisterFactory(context.TODO(), secp256k1fx.ID, &secp256k1fx.Factory{}), - n.VMManager.RegisterFactory(context.TODO(), nftfx.ID, &nftfx.Factory{}), - n.VMManager.RegisterFactory(context.TODO(), propertyfx.ID, &propertyfx.Factory{}), ) if err != nil { return err @@ -1351,6 +1345,7 @@ func (n *Node) initAdminAPI() error { admin.Config{ Secret: n.Config.AdminAPIEnabledSecret, Log: n.Log, + DB: n.DB, ChainManager: n.chainManager, HTTPServer: n.APIServer, ProfileDir: n.Config.ProfilerConfig.Dir, diff --git a/scripts/build.sh b/scripts/build.sh index f268def6ba5a..988ec0678008 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -6,7 +6,7 @@ set -euo pipefail CAMINOGO_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) echo "Downloading dependencies..." -(cd $CAMINOGO_PATH && go mod download) +(cd "$CAMINOGO_PATH" && go mod download) # Build caminogo "$CAMINOGO_PATH"/scripts/build_camino.sh \ No newline at end of file diff --git a/scripts/build_avalanche.sh b/scripts/build_avalanche.sh index dcfaed4c420d..27e36d4913d7 100755 --- a/scripts/build_avalanche.sh +++ b/scripts/build_avalanche.sh @@ -1,4 +1,5 @@ #!/usr/bin/env bash +# shellcheck disable=all set -euo pipefail diff --git a/scripts/build_camino.sh b/scripts/build_camino.sh index 280dcd936ca9..f9f7d014b03c 100755 --- a/scripts/build_camino.sh +++ b/scripts/build_camino.sh @@ -51,11 +51,11 @@ LDFLAGS="$LDFLAGS $static_ld_flags" go build -ldflags "$LDFLAGS" -o "$CAMINOGO_BIN_PATH" "$CAMINOGO_PATH/main/"*.go # Make plugin folder -mkdir -p $plugin_dir +mkdir -p "$plugin_dir" # Exit build successfully if the binaries are created if [[ -f "$CAMINOGO_BIN_PATH" ]]; then - ln -sf caminogo $camino_node_symlink_path + ln -sf caminogo "$camino_node_symlink_path" echo "Build Successful" exit 0 else diff --git a/scripts/build_fuzz.sh b/scripts/build_fuzz.sh index 3884cd6d1818..25a1eff8cbf3 100755 --- a/scripts/build_fuzz.sh +++ b/scripts/build_fuzz.sh @@ -18,18 +18,17 @@ source "$CAMINOGO_PATH"/scripts/constants.sh fuzzTime=${1:-1} fuzzDir=${2:-.} -files=$(grep -r --include='**_test.go' --files-with-matches 'func Fuzz' $fuzzDir) +files=$(grep -r --include='**_test.go' --files-with-matches 'func Fuzz' "$fuzzDir") failed=false for file in ${files} do - funcs=$(grep -oP 'func \K(Fuzz\w*)' $file) + funcs=$(grep -oP 'func \K(Fuzz\w*)' "$file") for func in ${funcs} do echo "Fuzzing $func in $file" - parentDir=$(dirname $file) - go test $parentDir -run=$func -fuzz=$func -fuzztime=${fuzzTime}s + parentDir=$(dirname "$file") # If any of the fuzz tests fail, return exit code 1 - if [ $? -ne 0 ]; then + if ! go test "$parentDir" -run="$func" -fuzz="$func" -fuzztime="${fuzzTime}"s; then failed=true fi done diff --git a/scripts/build_genesis_generator.sh b/scripts/build_genesis_generator.sh index 9c548be69056..4f837849dc55 100755 --- a/scripts/build_genesis_generator.sh +++ b/scripts/build_genesis_generator.sh @@ -13,11 +13,11 @@ source "$CAMINOGO_PATH"/scripts/constants.sh source "$CAMINOGO_PATH"/scripts/constants.sh echo "Downloading dependencies..." -(cd $CAMINOGO_PATH && go mod download) +(cd "$CAMINOGO_PATH" && go mod download) # Create tools directory tools_dir=$build_dir/tools/ -mkdir -p $tools_dir +mkdir -p "$tools_dir" target="$tools_dir/genesis-generator" go build -ldflags="-s -w" -o "$target" "$CAMINOGO_PATH/tools/genesis/"*.go diff --git a/scripts/build_image.sh b/scripts/build_image.sh index 0a82c5342b12..9fb42964ae81 100755 --- a/scripts/build_image.sh +++ b/scripts/build_image.sh @@ -1,4 +1,6 @@ #!/usr/bin/env bash +# shellcheck disable=all +# TODO: re-assess shellcheck disable=all after workflow cleanup set -euo pipefail diff --git a/scripts/build_publish_image.sh b/scripts/build_publish_image.sh index 40c3cfc952e8..5341beb8cc92 100755 --- a/scripts/build_publish_image.sh +++ b/scripts/build_publish_image.sh @@ -23,4 +23,4 @@ echo "Pushing: $camino_node_dockerhub_repo:$current_branch" echo "$DOCKER_PASS" | docker login --username "$DOCKER_USERNAME" --password-stdin ## pushing image with tags -docker image push -a $camino_node_dockerhub_repo +docker image push -a "$camino_node_dockerhub_repo" diff --git a/scripts/build_releases.sh b/scripts/build_releases.sh index 9a79ad383890..46b7ab6281a8 100755 --- a/scripts/build_releases.sh +++ b/scripts/build_releases.sh @@ -39,19 +39,20 @@ publish () { --upload-file "$1" \ --header "Content-Type:application/octet-stream" \ --write-out "%{http_code}" \ - --output $logOut \ + --output "$logOut" \ "${UPLOAD_URL}") - if [ "$?" -ne 0 ]; then - echo "err: curl command failed!!!" - rm $logOut - return 1 - fi + curl_success=$? + if [ $curl_success -ne 0 ]; then + echo "err: curl command failed!!!" + rm "$logOut" + return 1 + fi - cat $logOut && echo "" - rm $logOut + cat "$logOut" && echo "" + rm "$logOut" - if [ $response -ge 400 ]; then + if [ "$response" -ge 400 ]; then echo "err: upload not successful ($response)!!!" return 1 fi @@ -59,35 +60,34 @@ publish () { if [ -n "$AUTH_HEADER" ]; then GH_API="https://api.github.com/repos/${GITHUB_REPOSITORY}" - GH_TAGS="${GH_API}/releases/tags/$RELEASE_TAG" # release the version - response=$(curl -sH "${AUTH_HEADER}" --data "{\"tag_name\":\"$RELEASE_TAG\", \"draft\":true, \"generate_release_notes\":true}" "$GH_API/releases") + response="$(curl -sH "${AUTH_HEADER}" --data "{\"tag_name\":\"$RELEASE_TAG\", \"draft\":true, \"generate_release_notes\":true}" "$GH_API/releases")" # extract id out of response - eval $(echo "$response" | grep -m 1 "id.:" | grep -w id | tr : = | tr -cd '[[:alnum:]]=') - [ "$id" ] || { echo "Error: Failed to get release id for tag: $tag"; echo "$response\n" >&2; exit 1; } + eval "$(echo "$response" | grep -m 1 "id.:" | grep -w id | tr : = | tr -cd '[[:alnum:]]=')" + [ "$id" ] || { echo "Error: Failed to get release id for tag: $RELEASE_TAG"; printf "%s\n" "$response" >&2; exit 1; } RELEASE_ID=$id fi echo "Building release OS=linux and ARCH=amd64 using GOAMD64 V2 for caminogo version $RELEASE_ID" -rm -rf $CAMINOGO_PATH/build/* +rm -rf "$CAMINOGO_PATH"/build/* DEST_PATH=$CAMINOGO_PATH/dist/ ARCHIVE_PATH=caminogo-$RELEASE_TAG # prepare a fresh dist folder -rm -rf $DEST_PATH && mkdir -p $DEST_PATH +rm -rf "$DEST_PATH" && mkdir -p "$DEST_PATH" # build executables into build dir -GOOS=linux GOARCH=amd64 GOAMD64=v2 $CAMINOGO_PATH/scripts/build.sh +GOOS=linux GOARCH=amd64 GOAMD64=v2 "$CAMINOGO_PATH"/scripts/build.sh # build tools into build dir -GOOS=linux GOARCH=amd64 GOAMD64=v2 $CAMINOGO_PATH/scripts/build_tools.sh +GOOS=linux GOARCH=amd64 GOAMD64=v2 "$CAMINOGO_PATH"/scripts/build_tools.sh # copy the license file -cp $CAMINOGO_PATH/LICENSE $CAMINOGO_PATH/build +cp "$CAMINOGO_PATH"/LICENSE "$CAMINOGO_PATH"/build # create the package echo "building artifact" ARTIFACT=$DEST_PATH/caminogo-linux-amd64-$RELEASE_TAG.tar.gz -tar -czf $ARTIFACT -C $CAMINOGO_PATH build --transform "s,build,$ARCHIVE_PATH," +tar -czf "$ARTIFACT" -C "$CAMINOGO_PATH" build --transform "s,build,$ARCHIVE_PATH," # publish the newly generated file -publish $ARTIFACT +publish "$ARTIFACT" diff --git a/scripts/build_test.sh b/scripts/build_test.sh index 0dc3135fd6fa..b05f3bc201a5 100755 --- a/scripts/build_test.sh +++ b/scripts/build_test.sh @@ -8,4 +8,5 @@ CAMINOGO_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) source "$CAMINOGO_PATH"/scripts/constants.sh # Ensure execution of fixture unit tests under tests/ but exclude ginkgo tests in tests/e2e and tests/upgrade -go test -shuffle=on -race -timeout=${TIMEOUT:-"120s"} -coverprofile="coverage.out" -covermode="atomic" $(go list ./... | grep -v /mocks | grep -v proto | grep -v tests/e2e | grep -v tests/upgrade) +# shellcheck disable=SC2046 +go test -shuffle=on -race -timeout="${TIMEOUT:-120s}" -coverprofile="coverage.out" -covermode="atomic" $(go list ./... | grep -v /mocks | grep -v proto | grep -v tests/e2e | grep -v tests/upgrade) diff --git a/scripts/build_tools.sh b/scripts/build_tools.sh index ef9e375ea494..ab469c480c6e 100755 --- a/scripts/build_tools.sh +++ b/scripts/build_tools.sh @@ -13,11 +13,11 @@ CAMINOGO_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) source "$CAMINOGO_PATH"/scripts/constants.sh echo "Downloading dependencies..." -(cd $CAMINOGO_PATH && go mod download) +(cd "$CAMINOGO_PATH" && go mod download) # Create tools directory tools_dir=$build_dir/tools/ -mkdir -p $tools_dir +mkdir -p "$tools_dir" echo "Building cert tool..." go build -ldflags="-s -w" -o "$tools_dir/cert" "$CAMINOGO_PATH/tools/cert/"*.go \ No newline at end of file diff --git a/scripts/constants.sh b/scripts/constants.sh index 2760462bc227..782b378c71e3 100755 --- a/scripts/constants.sh +++ b/scripts/constants.sh @@ -1,5 +1,10 @@ #!/usr/bin/env bash -# + +# Ignore warnings about variables appearing unused since this file is not the consumer of the variables it defines. +# shellcheck disable=SC2034 + +set -euo pipefail + # Use lower_case variables in the scripts and UPPER_CASE variables for override # Use the constants.sh for env overrides diff --git a/scripts/lint.sh b/scripts/lint.sh index 0e82af4a9940..110a719d38d6 100755 --- a/scripts/lint.sh +++ b/scripts/lint.sh @@ -36,31 +36,19 @@ function test_golangci_lint { golangci-lint run --config .golangci.yml } -# find_go_files [package] -# all go files except generated ones -function find_go_files { - local target="${1}" - go fmt -n "${target}" | grep -Eo "([^ ]*)$" | grep -vE "(\\.pb\\.go|\\.pb\\.gw.go)" -} - # automatically checks license headers -# to modify the file headers (if missing), remove "--check" flag -# TESTS='license_header' ADDLICENSE_FLAGS="-v" ./scripts/lint.sh -_addlicense_flags=${ADDLICENSE_FLAGS:-"--check -v"} +# to modify the file headers (if missing), remove "--verify" flag +# TESTS='license_header' ADDLICENSE_FLAGS="--debug" ./scripts/lint.sh +_addlicense_flags=${ADDLICENSE_FLAGS:-"--verify --debug"} function test_license_header { - go install -v github.com/google/addlicense@latest - local target="${1}" + go install -v github.com/palantir/go-license@v1.25.0 local files=() - while IFS= read -r line; do files+=("$line"); done < <(find_go_files "${target}") + while IFS= read -r line; do files+=("$line"); done < <(find . -type f -name '*.go' ! -name '*.pb.go' ! -name 'mock_*.go') - # ignore 3rd party code - addlicense \ - -f ./LICENSE.header \ + # shellcheck disable=SC2086 + go-license \ + --config=./header.yml \ ${_addlicense_flags} \ - --ignore 'utils/ip_test.go' \ - --ignore 'utils/logging/highlight.go' \ - --ignore 'utils/ulimit/ulimit_non_unix.go.go' \ - --ignore 'utils/ulimit/ulimit_unix.go' \ "${files[@]}" } diff --git a/scripts/mock.gen.sh b/scripts/mock.gen.sh index aa86bd21fff6..39eac0d9196d 100755 --- a/scripts/mock.gen.sh +++ b/scripts/mock.gen.sh @@ -18,40 +18,40 @@ outputted_files=() input="scripts/mocks.mockgen.txt" while IFS= read -r line do - IFS='=' read src_import_path interface_name output_path <<< "${line}" - package_name=$(basename $(dirname $output_path)) + IFS='=' read -r src_import_path interface_name output_path <<< "${line}" + package_name="$(basename "$(dirname "$output_path")")" echo "Generating ${output_path}..." - outputted_files+=(${output_path}) - mockgen -package=${package_name} -destination=${output_path} ${src_import_path} ${interface_name} - + outputted_files+=("${output_path}") + mockgen -package="${package_name}" -destination="${output_path}" "${src_import_path}" "${interface_name}" + done < "$input" # tuples of (source import path, comma-separated interface names to exclude, output file path) input="scripts/mocks.mockgen.source.txt" while IFS= read -r line do - IFS='=' read source_path exclude_interfaces output_path <<< "${line}" - package_name=$(basename $(dirname $output_path)) - outputted_files+=(${output_path}) + IFS='=' read -r source_path exclude_interfaces output_path <<< "${line}" + package_name=$(basename "$(dirname "$output_path")") + outputted_files+=("${output_path}") echo "Generating ${output_path}..." mockgen \ - -source=${source_path} \ - -destination=${output_path} \ - -package=${package_name} \ - -exclude_interfaces=${exclude_interfaces} - + -source="${source_path}" \ + -destination="${output_path}" \ + -package="${package_name}" \ + -exclude_interfaces="${exclude_interfaces}" + done < "$input" -all_generated_files=( $(grep -Rl 'Code generated by MockGen. DO NOT EDIT.') ) +mapfile -t all_generated_files < <(grep -Rl 'Code generated by MockGen. DO NOT EDIT.') # Exclude certain files outputted_files+=('scripts/mock.gen.sh') # This file -outputted_files+=('vms/components/avax/mock_transferable_out.go') # Embedded verify.IsState +outputted_files+=('vms/components/avax/mock_transferable_out.go') # Embedded verify.IsState outputted_files+=('vms/platformvm/fx/mock_fx.go') # Embedded verify.IsNotState outputted_files+=('vms/platformvm/state/mock_state.go') # Can't use 2 (or 3) different files to generate one mock file in source mode -diff_files=(`echo ${all_generated_files[@]} ${outputted_files[@]} | tr ' ' '\n' | sort | uniq -u`) +mapfile -t diff_files < <(echo "${all_generated_files[@]}" "${outputted_files[@]}" | tr ' ' '\n' | sort | uniq -u) if (( ${#diff_files[@]} )); then printf "\nFAILURE\n" diff --git a/scripts/protobuf_codegen.sh b/scripts/protobuf_codegen.sh index 291833a6e31a..7a02a72f8cdc 100755 --- a/scripts/protobuf_codegen.sh +++ b/scripts/protobuf_codegen.sh @@ -46,23 +46,19 @@ if [ -n "${1:-}" ]; then fi # move to api directory -cd $TARGET +cd "$TARGET" echo "Running protobuf fmt..." buf format -w echo "Running protobuf lint check..." -buf lint - -if [[ $? -ne 0 ]]; then +if ! buf lint; then echo "ERROR: protobuf linter failed" exit 1 fi echo "Re-generating protobuf..." -buf generate - -if [[ $? -ne 0 ]]; then +if ! buf generate; then echo "ERROR: protobuf generation failed" exit 1 fi diff --git a/scripts/shellcheck.sh b/scripts/shellcheck.sh new file mode 100755 index 000000000000..61fc09f90ba8 --- /dev/null +++ b/scripts/shellcheck.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +set -euo pipefail + +VERSION="v0.9.0" + +function get_version { + local target_path=$1 + if command -v "${target_path}" > /dev/null; then + echo "v$("${target_path}" --version | grep version: | awk '{print $2}')" + fi +} + +REPO_ROOT=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) + +SYSTEM_VERSION="$(get_version shellcheck)" +if [[ "${SYSTEM_VERSION}" == "${VERSION}" ]]; then + SHELLCHECK=shellcheck +else + # Try to install a local version + SHELLCHECK="${REPO_ROOT}/bin/shellcheck" + LOCAL_VERSION="$(get_version "${SHELLCHECK}")" + if [[ -z "${LOCAL_VERSION}" || "${LOCAL_VERSION}" != "${VERSION}" ]]; then + if which sw_vers &> /dev/null; then + echo "on macos, only x86_64 binaries are available so rosetta is required" + echo "to avoid using rosetta, install via homebrew: brew install shellcheck" + DIST=darwin.x86_64 + else + # Linux - binaries for common arches *should* be available + arch="$(uname -i)" + DIST="linux.${arch}" + fi + curl -s -L "https://github.com/koalaman/shellcheck/releases/download/${VERSION}/shellcheck-${VERSION}.${DIST}.tar.xz" | tar Jxv -C /tmp > /dev/null + mkdir -p "$(dirname "${SHELLCHECK}")" + cp /tmp/shellcheck-"${VERSION}"/shellcheck "${SHELLCHECK}" + fi +fi + +find "${REPO_ROOT}" -name "*.sh" -type f -print0 | xargs -0 "${SHELLCHECK}" "${@}" diff --git a/scripts/test.sh b/scripts/test.sh index 11b22531853e..7da252702d35 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -13,4 +13,4 @@ export CGO_CFLAGS="-O -D__BLST_PORTABLE__" # clear error due to the default value change in go1.20. export CGO_ENABLED=1 -go test ${1-} -timeout="120s" -coverprofile="coverage.out" -covermode="atomic" $(go list ./... | grep -v /mocks | grep -v proto | grep -v tests) +go test "${1-}" -timeout="120s" -coverprofile="coverage.out" -covermode="atomic" "$(go list ./... | grep -v /mocks | grep -v proto | grep -v tests)" diff --git a/scripts/tests.e2e.existing.sh b/scripts/tests.e2e.existing.sh index 2cdb05b2f48e..98f5e3f79482 100755 --- a/scripts/tests.e2e.existing.sh +++ b/scripts/tests.e2e.existing.sh @@ -22,7 +22,8 @@ fi # Ensure an absolute path to avoid dependency on the working directory # of script execution. -export CAMINOGO_BIN_PATH="$(realpath ${CAMINOGO_BIN_PATH:-./build/caminogo})" +CAMINOGO_BIN_PATH="$(realpath "${CAMINOGO_BIN_PATH:-./build/caminogo}")" +export CAMINOGO_BIN_PATH # Provide visual separation between testing and setup/teardown function print_separator { @@ -47,7 +48,8 @@ print_separator # Determine the network configuration path from the latest symlink LATEST_SYMLINK_PATH="${HOME}/.tmpnet/networks/latest" if [[ -h "${LATEST_SYMLINK_PATH}" ]]; then - export TMPNET_NETWORK_DIR="$(realpath ${LATEST_SYMLINK_PATH})" + TMPNET_NETWORK_DIR="$(realpath "${LATEST_SYMLINK_PATH}")" + export TMPNET_NETWORK_DIR else echo "failed to find configuration path: ${LATEST_SYMLINK_PATH} symlink not found" exit 255 diff --git a/scripts/tests.e2e.sh b/scripts/tests.e2e.sh index 8fdda8187745..89d37e9942cc 100755 --- a/scripts/tests.e2e.sh +++ b/scripts/tests.e2e.sh @@ -34,7 +34,7 @@ ACK_GINKGO_RC=true ginkgo build ./tests/e2e if [[ -n "${E2E_USE_EXISTING_NETWORK:-}" && -n "${TMPNET_NETWORK_DIR:-}" ]]; then E2E_ARGS="--use-existing-network" else - CAMINOGO_BIN_PATH="$(realpath ${CAMINOGO_BIN_PATH:-./build/caminogo})" + CAMINOGO_BIN_PATH="$(realpath "${CAMINOGO_BIN_PATH:-./build/caminogo}")" E2E_ARGS="--avalanchego-path=${CAMINOGO_BIN_PATH}" fi @@ -60,4 +60,4 @@ fi ################################# # - Execute in random order to identify unwanted dependency -ginkgo -p -v --randomize-all ./tests/e2e/e2e.test -- ${E2E_ARGS} "${@}" +ginkgo ${GINKGO_ARGS} -v --randomize-all ./tests/e2e/e2e.test -- "${E2E_ARGS[@]}" "${@}" diff --git a/scripts/tests.upgrade.sh b/scripts/tests.upgrade.sh index 49c402da29a1..c85940473e78 100755 --- a/scripts/tests.upgrade.sh +++ b/scripts/tests.upgrade.sh @@ -4,21 +4,21 @@ set -euo pipefail # e.g., # ./scripts/tests.upgrade.sh # Use default version -# ./scripts/tests.upgrade.sh 1.10.18 # Specify a version -# AVALANCHEGO_PATH=./path/to/avalanchego ./scripts/tests.upgrade.sh 1.10.18 # Customization of avalanchego path +# ./scripts/tests.upgrade.sh 1.1.18 # Specify a version +# CAMINOGO_BIN_PATH=./path/to/caminogo ./scripts/tests.upgrade.sh 1.1.18 # Customization of caminogo path if ! [[ "$0" =~ scripts/tests.upgrade.sh ]]; then echo "must be run from repository root" exit 255 fi -# The AvalancheGo local network does not support long-lived +# The CaminoGo local network does not support long-lived # backwards-compatible networks. When a breaking change is made to the # local network, this flag must be updated to the last compatible # version with the latest code. # -# v1.10.17 includes the AWM activation on the C-Chain local network -# and the inclusion of BLS Public Keys in the network genesis. -DEFAULT_VERSION="1.10.17" +# v1.1.18 includes restrictions on ports sent over the p2p network along with +# proposervm and P-chain rule changes on the local network. +DEFAULT_VERSION="1.1.18" VERSION="${1:-${DEFAULT_VERSION}}" if [[ -z "${VERSION}" ]]; then @@ -27,35 +27,35 @@ if [[ -z "${VERSION}" ]]; then exit 255 fi -AVALANCHEGO_PATH="$(realpath ${AVALANCHEGO_PATH:-./build/avalanchego})" +CAMINOGO_BIN_PATH="$(realpath "${CAMINOGO_BIN_PATH:-./build/caminogo}")" ################################# -# download avalanchego -# https://github.com/ava-labs/avalanchego/releases +# download caminogo +# https://github.com/ava-labs/caminogo/releases GOARCH=$(go env GOARCH) GOOS=$(go env GOOS) -DOWNLOAD_URL=https://github.com/ava-labs/avalanchego/releases/download/v${VERSION}/avalanchego-linux-${GOARCH}-v${VERSION}.tar.gz -DOWNLOAD_PATH=/tmp/avalanchego.tar.gz +DOWNLOAD_URL=https://github.com/ava-labs/caminogo/releases/download/v${VERSION}/caminogo-linux-${GOARCH}-v${VERSION}.tar.gz +DOWNLOAD_PATH=/tmp/caminogo.tar.gz if [[ ${GOOS} == "darwin" ]]; then - DOWNLOAD_URL=https://github.com/ava-labs/avalanchego/releases/download/v${VERSION}/avalanchego-macos-v${VERSION}.zip - DOWNLOAD_PATH=/tmp/avalanchego.zip + DOWNLOAD_URL=https://github.com/ava-labs/caminogo/releases/download/v${VERSION}/caminogo-macos-v${VERSION}.zip + DOWNLOAD_PATH=/tmp/caminogo.zip fi rm -f ${DOWNLOAD_PATH} -rm -rf /tmp/avalanchego-v${VERSION} -rm -rf /tmp/avalanchego-build +rm -rf "/tmp/caminogo-v${VERSION}" +rm -rf /tmp/caminogo-build -echo "downloading avalanchego ${VERSION} at ${DOWNLOAD_URL}" -curl -L ${DOWNLOAD_URL} -o ${DOWNLOAD_PATH} +echo "downloading caminogo ${VERSION} at ${DOWNLOAD_URL}" +curl -L "${DOWNLOAD_URL}" -o "${DOWNLOAD_PATH}" -echo "extracting downloaded avalanchego" +echo "extracting downloaded caminogo" if [[ ${GOOS} == "linux" ]]; then tar xzvf ${DOWNLOAD_PATH} -C /tmp elif [[ ${GOOS} == "darwin" ]]; then - unzip ${DOWNLOAD_PATH} -d /tmp/avalanchego-build - mv /tmp/avalanchego-build/build /tmp/avalanchego-v${VERSION} + unzip ${DOWNLOAD_PATH} -d /tmp/caminogo-build + mv /tmp/caminogo-build/build "/tmp/caminogo-v${VERSION}" fi -find /tmp/avalanchego-v${VERSION} +find "/tmp/caminogo-v${VERSION}" # Sourcing constants.sh ensures that the necessary CGO flags are set to # build the portable version of BLST. Without this, ginkgo may fail to @@ -72,8 +72,8 @@ ACK_GINKGO_RC=true ginkgo build ./tests/upgrade ################################# # By default, it runs all upgrade test cases! -echo "running upgrade tests against the local cluster with ${AVALANCHEGO_PATH}" +echo "running upgrade tests against the local cluster with ${CAMINOGO_BIN_PATH}" ./tests/upgrade/upgrade.test \ --ginkgo.v \ - --avalanchego-path=/tmp/avalanchego-v${VERSION}/avalanchego \ - --avalanchego-path-to-upgrade-to=${AVALANCHEGO_PATH} + --avalanchego-path="/tmp/caminogo-v${VERSION}/caminogo" \ + --avalanchego-path-to-upgrade-to="${CAMINOGO_BIN_PATH}" diff --git a/snow/consensus/snowman/oracle_block.go b/snow/consensus/snowman/oracle_block.go index 4688927e566e..2ca81680ae78 100644 --- a/snow/consensus/snowman/oracle_block.go +++ b/snow/consensus/snowman/oracle_block.go @@ -18,6 +18,5 @@ var ErrNotOracle = errors.New("block isn't an oracle") type OracleBlock interface { // Options returns the possible children of this block in the order this // validator prefers the blocks. - // Options is guaranteed to only be called on a verified block. Options(context.Context) ([2]Block, error) } diff --git a/snow/engine/common/queue/state.go b/snow/engine/common/queue/state.go index 68ba67ce38c1..76bce7c838c1 100644 --- a/snow/engine/common/queue/state.go +++ b/snow/engine/common/queue/state.go @@ -16,6 +16,7 @@ import ( "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/set" ) @@ -61,7 +62,7 @@ func newState( metricsNamespace string, metricsRegisterer prometheus.Registerer, ) (*state, error) { - jobsCacheMetricsNamespace := fmt.Sprintf("%s_jobs_cache", metricsNamespace) + jobsCacheMetricsNamespace := metric.AppendNamespace(metricsNamespace, "jobs_cache") jobsCache, err := metercacher.New[ids.ID, Job]( jobsCacheMetricsNamespace, metricsRegisterer, diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index bf76970b5295..6fe2b05351d6 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -1121,12 +1121,14 @@ func (t *Transitive) addUnverifiedBlockToConsensus( issuedMetric prometheus.Counter, ) (bool, error) { blkID := blk.ID() + blkHeight := blk.Height() // make sure this block is valid if err := blk.Verify(ctx); err != nil { t.Ctx.Log.Debug("block verification failed", zap.Stringer("nodeID", nodeID), zap.Stringer("blkID", blkID), + zap.Uint64("height", blkHeight), zap.Error(err), ) @@ -1143,6 +1145,7 @@ func (t *Transitive) addUnverifiedBlockToConsensus( t.Ctx.Log.Verbo("adding block to consensus", zap.Stringer("nodeID", nodeID), zap.Stringer("blkID", blkID), + zap.Uint64("height", blkHeight), ) return true, t.Consensus.Add(ctx, &memoryBlock{ Block: blk, diff --git a/snow/networking/handler/message_queue_metrics.go b/snow/networking/handler/message_queue_metrics.go index 429295ae04cb..20bc4c7766f9 100644 --- a/snow/networking/handler/message_queue_metrics.go +++ b/snow/networking/handler/message_queue_metrics.go @@ -9,6 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/avalanchego/message" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/wrappers" ) @@ -24,7 +25,7 @@ func (m *messageQueueMetrics) initialize( metricsRegisterer prometheus.Registerer, ops []message.Op, ) error { - namespace := fmt.Sprintf("%s_%s", metricsNamespace, "unprocessed_msgs") + namespace := metric.AppendNamespace(metricsNamespace, "unprocessed_msgs") m.len = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: namespace, Name: "len", diff --git a/snow/validators/gvalidators/validator_state_test.go b/snow/validators/gvalidators/validator_state_test.go index 9b6e692d8645..0dbf9ebe8952 100644 --- a/snow/validators/gvalidators/validator_state_test.go +++ b/snow/validators/gvalidators/validator_state_test.go @@ -24,9 +24,8 @@ import ( var errCustom = errors.New("custom") type testState struct { - client *Client - server *validators.MockState - closeFn func() + client *Client + server *validators.MockState } func setupState(t testing.TB, ctrl *gomock.Controller) *testState { @@ -52,11 +51,13 @@ func setupState(t testing.TB, ctrl *gomock.Controller) *testState { require.NoError(err) state.client = NewClient(pb.NewValidatorStateClient(conn)) - state.closeFn = func() { + + t.Cleanup(func() { serverCloser.Stop() _ = conn.Close() _ = listener.Close() - } + }) + return state } @@ -65,7 +66,6 @@ func TestGetMinimumHeight(t *testing.T) { ctrl := gomock.NewController(t) state := setupState(t, ctrl) - defer state.closeFn() // Happy path expectedHeight := uint64(1337) @@ -88,7 +88,6 @@ func TestGetCurrentHeight(t *testing.T) { ctrl := gomock.NewController(t) state := setupState(t, ctrl) - defer state.closeFn() // Happy path expectedHeight := uint64(1337) @@ -111,7 +110,6 @@ func TestGetSubnetID(t *testing.T) { ctrl := gomock.NewController(t) state := setupState(t, ctrl) - defer state.closeFn() // Happy path chainID := ids.GenerateTestID() @@ -135,7 +133,6 @@ func TestGetValidatorSet(t *testing.T) { ctrl := gomock.NewController(t) state := setupState(t, ctrl) - defer state.closeFn() // Happy path sk0, err := bls.NewSecretKey() @@ -209,9 +206,6 @@ func benchmarkGetValidatorSet(b *testing.B, vs map[ids.NodeID]*validators.GetVal require := require.New(b) ctrl := gomock.NewController(b) state := setupState(b, ctrl) - defer func() { - state.closeFn() - }() height := uint64(1337) subnetID := ids.GenerateTestID() diff --git a/tests/e2e/p/validator_sets.go b/tests/e2e/p/validator_sets.go new file mode 100644 index 000000000000..84df98979938 --- /dev/null +++ b/tests/e2e/p/validator_sets.go @@ -0,0 +1,113 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package p + +import ( + "fmt" + "time" + + ginkgo "github.com/onsi/ginkgo/v2" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/genesis" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/tests" + "github.com/ava-labs/avalanchego/tests/fixture/e2e" + "github.com/ava-labs/avalanchego/tests/fixture/tmpnet" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" + "github.com/ava-labs/avalanchego/vms/platformvm" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/secp256k1fx" +) + +var _ = e2e.DescribePChain("[Validator Sets]", func() { + require := require.New(ginkgo.GinkgoT()) + + ginkgo.It("should be identical for every height for all nodes in the network", func() { + network := e2e.Env.GetNetwork() + + ginkgo.By("creating wallet with a funded key to source delegated funds from") + keychain := e2e.Env.NewKeychain(1) + nodeURI := e2e.Env.GetRandomNodeURI() + baseWallet := e2e.NewWallet(keychain, nodeURI) + pWallet := baseWallet.P() + + const delegatorCount = 15 + ginkgo.By(fmt.Sprintf("adding %d delegators", delegatorCount), func() { + rewardKey, err := secp256k1.NewPrivateKey() + require.NoError(err) + avaxAssetID := pWallet.AVAXAssetID() + startTime := time.Now().Add(tmpnet.DefaultValidatorStartTimeDiff) + endTime := startTime.Add(time.Second * 360) + // This is the default flag value for MinDelegatorStake. + weight := genesis.LocalParams.StakingConfig.MinDelegatorStake + + for i := 0; i < delegatorCount; i++ { + _, err = pWallet.IssueAddPermissionlessDelegatorTx( + &txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: nodeURI.NodeID, + Start: uint64(startTime.Unix()), + End: uint64(endTime.Unix()), + Wght: weight, + }, + Subnet: constants.PrimaryNetworkID, + }, + avaxAssetID, + &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{rewardKey.Address()}, + }, + e2e.WithDefaultContext(), + ) + require.NoError(err) + } + }) + + ginkgo.By("getting the current P-Chain height from the wallet") + currentPChainHeight, err := platformvm.NewClient(nodeURI.URI).GetHeight(e2e.DefaultContext()) + require.NoError(err) + + ginkgo.By("checking that validator sets are equal across all heights for all nodes", func() { + pvmClients := make([]platformvm.Client, len(e2e.Env.URIs)) + for i, nodeURI := range e2e.Env.URIs { + pvmClients[i] = platformvm.NewClient(nodeURI.URI) + // Ensure that the height of the target node is at least the expected height + e2e.Eventually( + func() bool { + pChainHeight, err := pvmClients[i].GetHeight(e2e.DefaultContext()) + require.NoError(err) + return pChainHeight >= currentPChainHeight + }, + e2e.DefaultTimeout, + e2e.DefaultPollingInterval, + fmt.Sprintf("failed to see expected height %d for %s before timeout", currentPChainHeight, nodeURI.NodeID), + ) + } + + for height := uint64(0); height <= currentPChainHeight; height++ { + tests.Outf(" checked validator sets for height %d\n", height) + var observedValidatorSet map[ids.NodeID]*validators.GetValidatorOutput + for _, pvmClient := range pvmClients { + validatorSet, err := pvmClient.GetValidatorsAt( + e2e.DefaultContext(), + constants.PrimaryNetworkID, + height, + ) + require.NoError(err) + if observedValidatorSet == nil { + observedValidatorSet = validatorSet + continue + } + require.Equal(observedValidatorSet, validatorSet) + } + } + }) + + e2e.CheckBootstrapIsPossible(network) + }) +}) diff --git a/utils/bloom/metrics.go b/utils/bloom/metrics.go new file mode 100644 index 000000000000..7e33edc5c069 --- /dev/null +++ b/utils/bloom/metrics.go @@ -0,0 +1,70 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package bloom + +import ( + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/utils" +) + +// Metrics is a collection of commonly useful metrics when using a long-lived +// bloom filter. +type Metrics struct { + Count prometheus.Gauge + NumHashes prometheus.Gauge + NumEntries prometheus.Gauge + MaxCount prometheus.Gauge + ResetCount prometheus.Counter +} + +func NewMetrics( + namespace string, + registerer prometheus.Registerer, +) (*Metrics, error) { + m := &Metrics{ + Count: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "count", + Help: "Number of additions that have been performed to the bloom", + }), + NumHashes: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "hashes", + Help: "Number of hashes in the bloom", + }), + NumEntries: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "entries", + Help: "Number of bytes allocated to slots in the bloom", + }), + MaxCount: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "max_count", + Help: "Maximum number of additions that should be performed to the bloom before resetting", + }), + ResetCount: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Name: "reset_count", + Help: "Number times the bloom has been reset", + }), + } + err := utils.Err( + registerer.Register(m.Count), + registerer.Register(m.NumHashes), + registerer.Register(m.NumEntries), + registerer.Register(m.MaxCount), + registerer.Register(m.ResetCount), + ) + return m, err +} + +// Reset the metrics to align with the provided bloom filter and max count. +func (m *Metrics) Reset(newFilter *Filter, maxCount int) { + m.Count.Set(float64(newFilter.Count())) + m.NumHashes.Set(float64(len(newFilter.hashSeeds))) + m.NumEntries.Set(float64(len(newFilter.entries))) + m.MaxCount.Set(float64(maxCount)) + m.ResetCount.Inc() +} diff --git a/utils/metric/namespace.go b/utils/metric/namespace.go new file mode 100644 index 000000000000..4371bb1dc077 --- /dev/null +++ b/utils/metric/namespace.go @@ -0,0 +1,17 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metric + +import "strings" + +func AppendNamespace(prefix, suffix string) string { + switch { + case len(prefix) == 0: + return suffix + case len(suffix) == 0: + return prefix + default: + return strings.Join([]string{prefix, suffix}, "_") + } +} diff --git a/utils/metric/namespace_test.go b/utils/metric/namespace_test.go new file mode 100644 index 000000000000..b1daf8ec11b1 --- /dev/null +++ b/utils/metric/namespace_test.go @@ -0,0 +1,46 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package metric + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAppendNamespace(t *testing.T) { + tests := []struct { + prefix string + suffix string + expected string + }{ + { + prefix: "avalanchego", + suffix: "isgreat", + expected: "avalanchego_isgreat", + }, + { + prefix: "", + suffix: "sucks", + expected: "sucks", + }, + { + prefix: "sucks", + suffix: "", + expected: "sucks", + }, + { + prefix: "", + suffix: "", + expected: "", + }, + } + for _, test := range tests { + t.Run(strings.Join([]string{test.prefix, test.suffix}, "_"), func(t *testing.T) { + namespace := AppendNamespace(test.prefix, test.suffix) + require.Equal(t, test.expected, namespace) + }) + } +} diff --git a/version/compatibility.json b/version/compatibility.json index 620bcb57215d..88be72adc7c0 100644 --- a/version/compatibility.json +++ b/version/compatibility.json @@ -1,5 +1,6 @@ { "31": [ + "v1.1.19", "v1.1.18" ], "30": [ diff --git a/version/constants.go b/version/constants.go index 757e4339158a..57b2a69b677f 100644 --- a/version/constants.go +++ b/version/constants.go @@ -40,7 +40,7 @@ var ( Current = &Semantic{ Major: 1, Minor: 1, - Patch: 18, + Patch: 19, } CurrentApp = &Application{ Name: Client, @@ -84,6 +84,16 @@ var ( DefaultUpgradeTime = time.Date(2020, time.December, 5, 5, 0, 0, 0, time.UTC) + ApricotPhase1Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2021, time.March, 31, 14, 0, 0, 0, time.UTC), + constants.FujiID: time.Date(2021, time.March, 26, 14, 0, 0, 0, time.UTC), + } + + ApricotPhase2Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2021, time.May, 10, 11, 0, 0, 0, time.UTC), + constants.FujiID: time.Date(2021, time.May, 5, 14, 0, 0, 0, time.UTC), + } + ApricotPhase3Times = map[uint32]time.Time{ constants.MainnetID: time.Date(2021, time.August, 24, 14, 0, 0, 0, time.UTC), constants.FujiID: time.Date(2021, time.August, 16, 19, 0, 0, 0, time.UTC), @@ -103,6 +113,11 @@ var ( constants.FujiID: time.Date(2021, time.November, 24, 15, 0, 0, 0, time.UTC), } + ApricotPhasePre6Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2022, time.September, 5, 1, 30, 0, 0, time.UTC), + constants.FujiID: time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC), + } + SunrisePhase0Times = map[uint32]time.Time{} SunrisePhase0DefaultTime = time.Date(2022, time.May, 16, 8, 0, 0, 0, time.UTC) @@ -111,6 +126,11 @@ var ( constants.FujiID: time.Date(2022, time.September, 6, 20, 0, 0, 0, time.UTC), } + ApricotPhasePost6Times = map[uint32]time.Time{ + constants.MainnetID: time.Date(2022, time.September, 7, 3, 0, 0, 0, time.UTC), + constants.FujiID: time.Date(2022, time.September, 7, 6, 0, 0, 0, time.UTC), + } + BanffTimes = map[uint32]time.Time{ constants.MainnetID: time.Date(2022, time.October, 18, 16, 0, 0, 0, time.UTC), constants.FujiID: time.Date(2022, time.October, 3, 14, 0, 0, 0, time.UTC), @@ -189,6 +209,20 @@ func init() { } } +func GetApricotPhase1Time(networkID uint32) time.Time { + if upgradeTime, exists := ApricotPhase1Times[networkID]; exists { + return upgradeTime + } + return DefaultUpgradeTime +} + +func GetApricotPhase2Time(networkID uint32) time.Time { + if upgradeTime, exists := ApricotPhase2Times[networkID]; exists { + return upgradeTime + } + return DefaultUpgradeTime +} + func GetApricotPhase3Time(networkID uint32) time.Time { if upgradeTime, exists := ApricotPhase3Times[networkID]; exists { return upgradeTime @@ -210,6 +244,13 @@ func GetApricotPhase5Time(networkID uint32) time.Time { return DefaultUpgradeTime } +func GetApricotPhasePre6Time(networkID uint32) time.Time { + if upgradeTime, exists := ApricotPhasePre6Times[networkID]; exists { + return upgradeTime + } + return DefaultUpgradeTime +} + func GetSunrisePhase0Time(networkID uint32) time.Time { if upgradeTime, exists := SunrisePhase0Times[networkID]; exists { return upgradeTime @@ -224,6 +265,13 @@ func GetApricotPhase6Time(networkID uint32) time.Time { return DefaultUpgradeTime } +func GetApricotPhasePost6Time(networkID uint32) time.Time { + if upgradeTime, exists := ApricotPhasePost6Times[networkID]; exists { + return upgradeTime + } + return DefaultUpgradeTime +} + func GetBanffTime(networkID uint32) time.Time { if upgradeTime, exists := BanffTimes[networkID]; exists { return upgradeTime diff --git a/vms/avm/network/gossip.go b/vms/avm/network/gossip.go index adfe9aa6831f..0876f122c660 100644 --- a/vms/avm/network/gossip.go +++ b/vms/avm/network/gossip.go @@ -9,6 +9,8 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/gossip" @@ -65,6 +67,7 @@ func (g *txParser) UnmarshalGossip(bytes []byte) (*txs.Tx, error) { func newGossipMempool( mempool mempool.Mempool, + registerer prometheus.Registerer, log logging.Logger, txVerifier TxVerifier, parser txs.Parser, @@ -72,7 +75,7 @@ func newGossipMempool( targetFalsePositiveProbability, resetFalsePositiveProbability float64, ) (*gossipMempool, error) { - bloom, err := gossip.NewBloomFilter(minTargetElements, targetFalsePositiveProbability, resetFalsePositiveProbability) + bloom, err := gossip.NewBloomFilter(registerer, "mempool_bloom_filter", minTargetElements, targetFalsePositiveProbability, resetFalsePositiveProbability) return &gossipMempool{ Mempool: mempool, log: log, diff --git a/vms/avm/network/gossip_test.go b/vms/avm/network/gossip_test.go index 8f9df5ac7c86..2eb00dad15dc 100644 --- a/vms/avm/network/gossip_test.go +++ b/vms/avm/network/gossip_test.go @@ -71,6 +71,7 @@ func TestGossipMempoolAdd(t *testing.T) { mempool, err := newGossipMempool( baseMempool, + metrics, logging.NoLog{}, testVerifier{}, parser, @@ -107,6 +108,7 @@ func TestGossipMempoolAddVerified(t *testing.T) { mempool, err := newGossipMempool( baseMempool, + metrics, logging.NoLog{}, testVerifier{ err: errTest, // We shouldn't be attempting to verify the tx in this flow diff --git a/vms/avm/network/network.go b/vms/avm/network/network.go index fbeed92a95a1..d88599d928a4 100644 --- a/vms/avm/network/network.go +++ b/vms/avm/network/network.go @@ -90,6 +90,7 @@ func New( gossipMempool, err := newGossipMempool( mempool, + registerer, ctx.Log, txVerifier, parser, diff --git a/vms/components/avax/base_tx.go b/vms/components/avax/base_tx.go index 10561ae0ad52..5afed5f3a16d 100644 --- a/vms/components/avax/base_tx.go +++ b/vms/components/avax/base_tx.go @@ -65,3 +65,21 @@ func (t *BaseTx) Verify(ctx *snow.Context) error { return nil } } + +func VerifyMemoFieldLength(memo types.JSONByteSlice, isDurangoActive bool) error { + if !isDurangoActive { + // SyntacticVerify validates this field pre-Durango + return nil + } + + if len(memo) != 0 { + return fmt.Errorf( + "%w: %d > %d", + ErrMemoTooLarge, + len(memo), + 0, + ) + } + + return nil +} diff --git a/vms/fx/factory.go b/vms/fx/factory.go new file mode 100644 index 000000000000..a2c957a5bf60 --- /dev/null +++ b/vms/fx/factory.go @@ -0,0 +1,9 @@ +// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package fx + +// Factory returns an instance of a feature extension +type Factory interface { + New() any +} diff --git a/vms/nftfx/factory.go b/vms/nftfx/factory.go index a111bc0ed7e2..c8be03661bb2 100644 --- a/vms/nftfx/factory.go +++ b/vms/nftfx/factory.go @@ -5,12 +5,13 @@ package nftfx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" ) +const Name = "nftfx" + var ( - _ vms.Factory = (*Factory)(nil) + _ fx.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'n', 'f', 't', 'f', 'x'} @@ -18,6 +19,6 @@ var ( type Factory struct{} -func (*Factory) New(logging.Logger) (interface{}, error) { - return &Fx{}, nil +func (*Factory) New() any { + return &Fx{} } diff --git a/vms/nftfx/factory_test.go b/vms/nftfx/factory_test.go index 6b4fba9861ac..6b5ecafbeece 100644 --- a/vms/nftfx/factory_test.go +++ b/vms/nftfx/factory_test.go @@ -7,15 +7,11 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { require := require.New(t) factory := Factory{} - fx, err := factory.New(logging.NoLog{}) - require.NoError(err) - require.NotNil(fx) + require.Equal(&Fx{}, factory.New()) } diff --git a/vms/platformvm/block/builder/builder.go b/vms/platformvm/block/builder/builder.go index db62b6552b06..eb7a6ab86945 100644 --- a/vms/platformvm/block/builder/builder.go +++ b/vms/platformvm/block/builder/builder.go @@ -363,12 +363,9 @@ func packBlockTxs( return nil, err } - changes, err := txexecutor.AdvanceTimeTo(backend, stateDiff, timestamp) - if err != nil { + if _, err := txexecutor.AdvanceTimeTo(backend, stateDiff, timestamp); err != nil { return nil, err } - changes.Apply(stateDiff) - stateDiff.SetTimestamp(timestamp) var ( blockTxs []*txs.Tx diff --git a/vms/platformvm/block/builder/builder_test.go b/vms/platformvm/block/builder/builder_test.go index e25ffb68e039..893eaae718af 100644 --- a/vms/platformvm/block/builder/builder_test.go +++ b/vms/platformvm/block/builder/builder_test.go @@ -43,10 +43,7 @@ func TestBuildBlockBasic(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - env.ctx.Lock.Unlock() - }() + defer env.ctx.Lock.Unlock() // Create a valid transaction tx, err := env.txBuilder.NewCreateChainTx( @@ -88,10 +85,7 @@ func TestBuildBlockDoesNotBuildWithEmptyMempool(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - env.ctx.Lock.Unlock() - }() + defer env.ctx.Lock.Unlock() tx, exists := env.mempool.Peek() require.False(exists) @@ -108,10 +102,7 @@ func TestBuildBlockShouldReward(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - env.ctx.Lock.Unlock() - }() + defer env.ctx.Lock.Unlock() var ( now = env.backend.Clk.Time() @@ -208,10 +199,7 @@ func TestBuildBlockAdvanceTime(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - env.ctx.Lock.Unlock() - }() + defer env.ctx.Lock.Unlock() var ( now = env.backend.Clk.Time() @@ -244,10 +232,7 @@ func TestBuildBlockForceAdvanceTime(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - env.ctx.Lock.Unlock() - }() + defer env.ctx.Lock.Unlock() // Create a valid transaction tx, err := env.txBuilder.NewCreateChainTx( @@ -301,10 +286,7 @@ func TestBuildBlockDropExpiredStakerTxs(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - env.ctx.Lock.Unlock() - }() + defer env.ctx.Lock.Unlock() // The [StartTime] in a staker tx is only validated pre-Durango. // TODO: Delete this test post-Durango activation. @@ -407,10 +389,7 @@ func TestBuildBlockInvalidStakingDurations(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - env.ctx.Lock.Unlock() - }() + defer env.ctx.Lock.Unlock() // Post-Durango, [StartTime] is no longer validated. Staking durations are // based on the current chain timestamp and must be validated. @@ -486,11 +465,7 @@ func TestPreviouslyDroppedTxsCannotBeReAddedToMempool(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - env.ctx.Lock.Lock() - require.NoError(shutdownEnvironment(env)) - env.ctx.Lock.Unlock() - }() + defer env.ctx.Lock.Unlock() // Create a valid transaction tx, err := env.txBuilder.NewCreateChainTx( @@ -519,6 +494,7 @@ func TestPreviouslyDroppedTxsCannotBeReAddedToMempool(t *testing.T) { env.ctx.Lock.Unlock() err = env.network.IssueTx(context.Background(), tx) require.ErrorIs(err, errTestingDropped) + env.ctx.Lock.Lock() _, ok := env.mempool.Get(txID) require.False(ok) @@ -532,11 +508,9 @@ func TestNoErrorOnUnexpectedSetPreferenceDuringBootstrapping(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + env.isBootstrapped.Set(false) - defer func() { - require.NoError(shutdownEnvironment(env)) - env.ctx.Lock.Unlock() - }() require.True(env.blkManager.SetPreference(ids.GenerateTestID())) // should not panic } diff --git a/vms/platformvm/block/builder/helpers_test.go b/vms/platformvm/block/builder/helpers_test.go index fa7339be6fdb..f2d15195ad07 100644 --- a/vms/platformvm/block/builder/helpers_test.go +++ b/vms/platformvm/block/builder/helpers_test.go @@ -211,6 +211,24 @@ func newEnvironment(t *testing.T) *environment { res.blkManager.SetPreference(genesisID) addSubnet(t, res) + t.Cleanup(func() { + res.ctx.Lock.Lock() + defer res.ctx.Lock.Unlock() + + res.Builder.ShutdownBlockTimer() + + if res.isBootstrapped.Get() { + validatorIDs := res.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) + + require.NoError(res.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + + require.NoError(res.state.Commit()) + } + + require.NoError(res.state.Close()) + require.NoError(res.baseDB.Close()) + }) + return res } @@ -398,23 +416,3 @@ func buildGenesisTest(t *testing.T, ctx *snow.Context) []byte { return genesisBytes } - -func shutdownEnvironment(env *environment) error { - env.Builder.ShutdownBlockTimer() - - if env.isBootstrapped.Get() { - validatorIDs := env.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - - if err := env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { - return err - } - if err := env.state.Commit(); err != nil { - return err - } - } - - return utils.Err( - env.state.Close(), - env.baseDB.Close(), - ) -} diff --git a/vms/platformvm/block/builder/standard_block_test.go b/vms/platformvm/block/builder/standard_block_test.go index 61dd0d5e2ca1..6064b2153113 100644 --- a/vms/platformvm/block/builder/standard_block_test.go +++ b/vms/platformvm/block/builder/standard_block_test.go @@ -24,10 +24,7 @@ func TestAtomicTxImports(t *testing.T) { env := newEnvironment(t) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - env.ctx.Lock.Unlock() - }() + defer env.ctx.Lock.Unlock() utxoID := avax.UTXOID{ TxID: ids.Empty.Prefix(1), diff --git a/vms/platformvm/block/executor/acceptor.go b/vms/platformvm/block/executor/acceptor.go index 8ee4f6e4a04f..cc2bcef0521f 100644 --- a/vms/platformvm/block/executor/acceptor.go +++ b/vms/platformvm/block/executor/acceptor.go @@ -33,11 +33,11 @@ type acceptor struct { } func (a *acceptor) BanffAbortBlock(b *block.BanffAbortBlock) error { - return a.abortBlock(b, "banff abort") + return a.optionBlock(b, "banff abort") } func (a *acceptor) BanffCommitBlock(b *block.BanffCommitBlock) error { - return a.commitBlock(b, "apricot commit") + return a.optionBlock(b, "banff commit") } func (a *acceptor) BanffProposalBlock(b *block.BanffProposalBlock) error { @@ -50,11 +50,11 @@ func (a *acceptor) BanffStandardBlock(b *block.BanffStandardBlock) error { } func (a *acceptor) ApricotAbortBlock(b *block.ApricotAbortBlock) error { - return a.abortBlock(b, "apricot abort") + return a.optionBlock(b, "apricot abort") } func (a *acceptor) ApricotCommitBlock(b *block.ApricotCommitBlock) error { - return a.commitBlock(b, "apricot commit") + return a.optionBlock(b, "apricot commit") } func (a *acceptor) ApricotProposalBlock(b *block.ApricotProposalBlock) error { @@ -116,46 +116,14 @@ func (a *acceptor) ApricotAtomicBlock(b *block.ApricotAtomicBlock) error { return nil } -func (a *acceptor) abortBlock(b block.Block, blockType string) error { +func (a *acceptor) optionBlock(b block.Block, blockType string) error { parentID := b.Parent() parentState, ok := a.blkIDToState[parentID] if !ok { return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) } - if a.bootstrapped.Get() { - if parentState.initiallyPreferCommit { - a.metrics.MarkOptionVoteLost() - } else { - a.metrics.MarkOptionVoteWon() - } - } - - return a.optionBlock(b, parentState.statelessBlock, blockType) -} - -func (a *acceptor) commitBlock(b block.Block, blockType string) error { - parentID := b.Parent() - parentState, ok := a.blkIDToState[parentID] - if !ok { - return fmt.Errorf("%w: %s", state.ErrMissingParentState, parentID) - } - - if a.bootstrapped.Get() { - if parentState.initiallyPreferCommit { - a.metrics.MarkOptionVoteWon() - } else { - a.metrics.MarkOptionVoteLost() - } - } - - return a.optionBlock(b, parentState.statelessBlock, blockType) -} - -func (a *acceptor) optionBlock(b, parent block.Block, blockType string) error { blkID := b.ID() - parentID := parent.ID() - defer func() { // Note: we assume this block's sibling doesn't // need the parent's state when it's rejected. @@ -164,7 +132,7 @@ func (a *acceptor) optionBlock(b, parent block.Block, blockType string) error { }() // Note that the parent must be accepted first. - if err := a.commonAccept(parent); err != nil { + if err := a.commonAccept(parentState.statelessBlock); err != nil { return err } @@ -172,10 +140,6 @@ func (a *acceptor) optionBlock(b, parent block.Block, blockType string) error { return err } - parentState, ok := a.blkIDToState[parentID] - if !ok { - return fmt.Errorf("%w %s", errMissingBlockState, parentID) - } if parentState.onDecisionState != nil { if err := parentState.onDecisionState.Apply(a.state); err != nil { return err diff --git a/vms/platformvm/block/executor/acceptor_test.go b/vms/platformvm/block/executor/acceptor_test.go index fb5fcd5f88af..45fd1d54d189 100644 --- a/vms/platformvm/block/executor/acceptor_test.go +++ b/vms/platformvm/block/executor/acceptor_test.go @@ -303,7 +303,7 @@ func TestAcceptorVisitCommitBlock(t *testing.T) { // Set expected calls on dependencies. // Make sure the parent is accepted first. gomock.InOrder( - parentStatelessBlk.EXPECT().ID().Return(parentID).Times(2), + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), s.EXPECT().SetLastAccepted(parentID).Times(1), parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), s.EXPECT().SetHeight(blk.Height()-1).Times(1), @@ -335,7 +335,7 @@ func TestAcceptorVisitCommitBlock(t *testing.T) { // Set expected calls on dependencies. // Make sure the parent is accepted first. gomock.InOrder( - parentStatelessBlk.EXPECT().ID().Return(parentID).Times(2), + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), s.EXPECT().SetLastAccepted(parentID).Times(1), parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), s.EXPECT().SetHeight(blk.Height()-1).Times(1), @@ -413,7 +413,7 @@ func TestAcceptorVisitAbortBlock(t *testing.T) { // Set expected calls on dependencies. // Make sure the parent is accepted first. gomock.InOrder( - parentStatelessBlk.EXPECT().ID().Return(parentID).Times(2), + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), s.EXPECT().SetLastAccepted(parentID).Times(1), parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), s.EXPECT().SetHeight(blk.Height()-1).Times(1), @@ -445,7 +445,7 @@ func TestAcceptorVisitAbortBlock(t *testing.T) { // Set expected calls on dependencies. // Make sure the parent is accepted first. gomock.InOrder( - parentStatelessBlk.EXPECT().ID().Return(parentID).Times(2), + parentStatelessBlk.EXPECT().ID().Return(parentID).Times(1), s.EXPECT().SetLastAccepted(parentID).Times(1), parentStatelessBlk.EXPECT().Height().Return(blk.Height()-1).Times(1), s.EXPECT().SetHeight(blk.Height()-1).Times(1), diff --git a/vms/platformvm/block/executor/block.go b/vms/platformvm/block/executor/block.go index ef05f38442c5..5cd5a02f709c 100644 --- a/vms/platformvm/block/executor/block.go +++ b/vms/platformvm/block/executor/block.go @@ -5,7 +5,6 @@ package executor import ( "context" - "fmt" "time" "go.uber.org/zap" @@ -83,22 +82,18 @@ func (b *Block) Timestamp() time.Time { } func (b *Block) Options(context.Context) ([2]snowman.Block, error) { - options := options{} + options := options{ + log: b.manager.ctx.Log, + primaryUptimePercentage: b.manager.txExecutorBackend.Config.UptimePercentage, + uptimes: b.manager.txExecutorBackend.Uptimes, + state: b.manager.backend.state, + } if err := b.Block.Visit(&options); err != nil { return [2]snowman.Block{}, err } - commitBlock := b.manager.NewBlock(options.commitBlock) - abortBlock := b.manager.NewBlock(options.abortBlock) - - blkID := b.ID() - blkState, ok := b.manager.blkIDToState[blkID] - if !ok { - return [2]snowman.Block{}, fmt.Errorf("block %s state not found", blkID) - } - - if blkState.initiallyPreferCommit { - return [2]snowman.Block{commitBlock, abortBlock}, nil - } - return [2]snowman.Block{abortBlock, commitBlock}, nil + return [2]snowman.Block{ + b.manager.NewBlock(options.preferredBlock), + b.manager.NewBlock(options.alternateBlock), + }, nil } diff --git a/vms/platformvm/block/executor/block_state.go b/vms/platformvm/block/executor/block_state.go index ffc6f679b8f6..9d6b377c2644 100644 --- a/vms/platformvm/block/executor/block_state.go +++ b/vms/platformvm/block/executor/block_state.go @@ -14,10 +14,9 @@ import ( ) type proposalBlockState struct { - initiallyPreferCommit bool - onDecisionState state.Diff - onCommitState state.Diff - onAbortState state.Diff + onDecisionState state.Diff + onCommitState state.Diff + onAbortState state.Diff } // The state of a block. diff --git a/vms/platformvm/block/executor/block_test.go b/vms/platformvm/block/executor/block_test.go index b1d73cefb7f1..c26d71705857 100644 --- a/vms/platformvm/block/executor/block_test.go +++ b/vms/platformvm/block/executor/block_test.go @@ -6,6 +6,7 @@ package executor import ( "context" "testing" + "time" "github.com/stretchr/testify/require" @@ -14,9 +15,16 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" - "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/snowtest" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/config" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/status" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) func TestStatus(t *testing.T) { @@ -127,126 +135,485 @@ func TestStatus(t *testing.T) { func TestBlockOptions(t *testing.T) { type test struct { name string - blkF func() *Block + blkF func(*gomock.Controller) *Block expectedPreferenceType block.Block - expectedErr error } tests := []test{ { name: "apricot proposal block; commit preferred", - blkF: func() *Block { - innerBlk := &block.ApricotProposalBlock{} - blkID := innerBlk.ID() + blkF: func(ctrl *gomock.Controller) *Block { + state := state.NewMockState(ctrl) + + uptimes := uptime.NewMockCalculator(ctrl) manager := &manager{ backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: { - proposalBlockState: proposalBlockState{ - initiallyPreferCommit: true, + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.ApricotProposalBlock{}, + manager: manager, + } + }, + expectedPreferenceType: &block.ApricotCommitBlock{}, + }, + { + name: "banff proposal block; invalid proposal tx", + blkF: func(ctrl *gomock.Controller) *Block { + state := state.NewMockState(ctrl) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.CreateChainTx{}, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; missing tx", + blkF: func(ctrl *gomock.Controller) *Block { + stakerTxID := ids.GenerateTestID() + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(nil, status.Unknown, database.ErrNotFound) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, }, }, }, }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; error fetching staker tx", + blkF: func(ctrl *gomock.Controller) *Block { + stakerTxID := ids.GenerateTestID() + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(nil, status.Unknown, database.ErrClosed) + + uptimes := uptime.NewMockCalculator(ctrl) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, } return &Block{ - Block: innerBlk, + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, manager: manager, } }, - expectedPreferenceType: &block.ApricotCommitBlock{}, + expectedPreferenceType: &block.BanffCommitBlock{}, }, { - name: "apricot proposal block; abort preferred", - blkF: func() *Block { - innerBlk := &block.ApricotProposalBlock{} - blkID := innerBlk.ID() + name: "banff proposal block; unexpected staker tx type", + blkF: func(ctrl *gomock.Controller) *Block { + stakerTxID := ids.GenerateTestID() + stakerTx := &txs.Tx{ + Unsigned: &txs.CreateChainTx{}, + } + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + + uptimes := uptime.NewMockCalculator(ctrl) manager := &manager{ backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: {}, + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, }, + Uptimes: uptimes, }, } return &Block{ - Block: innerBlk, + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, manager: manager, } }, - expectedPreferenceType: &block.ApricotAbortBlock{}, + expectedPreferenceType: &block.BanffCommitBlock{}, }, { - name: "banff proposal block; commit preferred", - blkF: func() *Block { - innerBlk := &block.BanffProposalBlock{} - blkID := innerBlk.ID() + name: "banff proposal block; missing primary network validator", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(nil, database.ErrNotFound) + + uptimes := uptime.NewMockCalculator(ctrl) manager := &manager{ backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: { - proposalBlockState: proposalBlockState{ - initiallyPreferCommit: true, + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, }, }, }, }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; failed calculating primary network uptime", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = constants.PrimaryNetworkID + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + + uptimes := uptime.NewMockCalculator(ctrl) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(0.0, database.ErrNotFound) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, + }, + Uptimes: uptimes, + }, } return &Block{ - Block: innerBlk, + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, manager: manager, } }, expectedPreferenceType: &block.BanffCommitBlock{}, }, { - name: "banff proposal block; abort preferred", - blkF: func() *Block { - innerBlk := &block.BanffProposalBlock{} - blkID := innerBlk.ID() + name: "banff proposal block; failed fetching subnet transformation", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + state.EXPECT().GetSubnetTransformation(subnetID).Return(nil, database.ErrNotFound) + + uptimes := uptime.NewMockCalculator(ctrl) manager := &manager{ backend: &backend{ - blkIDToState: map[ids.ID]*blockState{ - blkID: {}, + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: 0, }, + Uptimes: uptimes, }, } return &Block{ - Block: innerBlk, + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, manager: manager, } }, - expectedPreferenceType: &block.BanffAbortBlock{}, + expectedPreferenceType: &block.BanffCommitBlock{}, }, { - name: "non oracle block", - blkF: func() *Block { + name: "banff proposal block; prefers commit", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + transformSubnetTx = &txs.Tx{ + Unsigned: &txs.TransformSubnetTx{ + UptimeRequirement: .2 * reward.PercentDenominator, + }, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + state.EXPECT().GetSubnetTransformation(subnetID).Return(transformSubnetTx, nil) + + uptimes := uptime.NewMockCalculator(ctrl) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(.5, nil) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: .8, + }, + Uptimes: uptimes, + }, + } + return &Block{ - Block: &block.BanffStandardBlock{}, - manager: &manager{}, + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, } }, - expectedErr: snowman.ErrNotOracle, + expectedPreferenceType: &block.BanffCommitBlock{}, + }, + { + name: "banff proposal block; prefers abort", + blkF: func(ctrl *gomock.Controller) *Block { + var ( + stakerTxID = ids.GenerateTestID() + nodeID = ids.GenerateTestNodeID() + subnetID = ids.GenerateTestID() + stakerTx = &txs.Tx{ + Unsigned: &txs.AddPermissionlessValidatorTx{ + Validator: txs.Validator{ + NodeID: nodeID, + }, + Subnet: subnetID, + }, + } + primaryNetworkValidatorStartTime = time.Now() + staker = &state.Staker{ + StartTime: primaryNetworkValidatorStartTime, + } + transformSubnetTx = &txs.Tx{ + Unsigned: &txs.TransformSubnetTx{ + UptimeRequirement: .6 * reward.PercentDenominator, + }, + } + ) + + state := state.NewMockState(ctrl) + state.EXPECT().GetTx(stakerTxID).Return(stakerTx, status.Committed, nil) + state.EXPECT().GetCurrentValidator(constants.PrimaryNetworkID, nodeID).Return(staker, nil) + state.EXPECT().GetSubnetTransformation(subnetID).Return(transformSubnetTx, nil) + + uptimes := uptime.NewMockCalculator(ctrl) + uptimes.EXPECT().CalculateUptimePercentFrom(nodeID, constants.PrimaryNetworkID, primaryNetworkValidatorStartTime).Return(.5, nil) + + manager := &manager{ + backend: &backend{ + state: state, + ctx: snowtest.Context(t, snowtest.PChainID), + }, + txExecutorBackend: &executor.Backend{ + Config: &config.Config{ + UptimePercentage: .8, + }, + Uptimes: uptimes, + }, + } + + return &Block{ + Block: &block.BanffProposalBlock{ + ApricotProposalBlock: block.ApricotProposalBlock{ + Tx: &txs.Tx{ + Unsigned: &txs.RewardValidatorTx{ + TxID: stakerTxID, + }, + }, + }, + }, + manager: manager, + } + }, + expectedPreferenceType: &block.BanffAbortBlock{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + ctrl := gomock.NewController(t) require := require.New(t) - blk := tt.blkF() + blk := tt.blkF(ctrl) options, err := blk.Options(context.Background()) - require.ErrorIs(err, tt.expectedErr) - if tt.expectedErr != nil { - return - } + require.NoError(err) require.IsType(tt.expectedPreferenceType, options[0].(*Block).Block) }) } diff --git a/vms/platformvm/block/executor/helpers_test.go b/vms/platformvm/block/executor/helpers_test.go index 5e7b5a3fce1e..1a3d2993328b 100644 --- a/vms/platformvm/block/executor/helpers_test.go +++ b/vms/platformvm/block/executor/helpers_test.go @@ -10,6 +10,8 @@ import ( "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "github.com/ava-labs/avalanchego/chains" @@ -220,6 +222,31 @@ func newEnvironment(t *testing.T, ctrl *gomock.Controller) *environment { // whatever we need } + t.Cleanup(func() { + res.ctx.Lock.Lock() + defer res.ctx.Lock.Unlock() + + if res.mockedState != nil { + // state is mocked, nothing to do here + return + } + + require := require.New(t) + + if res.isBootstrapped.Get() { + validatorIDs := res.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) + + require.NoError(res.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + require.NoError(res.state.Commit()) + } + + if res.state != nil { + require.NoError(res.state.Close()) + } + + require.NoError(res.baseDB.Close()) + }) + return res } @@ -424,33 +451,6 @@ func buildGenesisTest(ctx *snow.Context) []byte { return genesisBytes } -func shutdownEnvironment(t *environment) error { - if t.mockedState != nil { - // state is mocked, nothing to do here - return nil - } - - if t.isBootstrapped.Get() { - validatorIDs := t.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - - if err := t.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { - return err - } - if err := t.state.Commit(); err != nil { - return err - } - } - - var err error - if t.state != nil { - err = t.state.Close() - } - return utils.Err( - err, - t.baseDB.Close(), - ) -} - func addPendingValidator( env *environment, startTime time.Time, diff --git a/vms/platformvm/block/executor/manager.go b/vms/platformvm/block/executor/manager.go index ed82197a3568..ee29684808f8 100644 --- a/vms/platformvm/block/executor/manager.go +++ b/vms/platformvm/block/executor/manager.go @@ -127,12 +127,36 @@ func (m *manager) VerifyTx(tx *txs.Tx) error { return ErrChainNotSynced } - return tx.Unsigned.Visit(&executor.MempoolTxVerifier{ - Backend: m.txExecutorBackend, - ParentID: m.preferred, - StateVersions: m, - Tx: tx, + stateDiff, err := state.NewDiff(m.preferred, m) + if err != nil { + return err + } + + nextBlkTime, _, err := executor.NextBlockTime(stateDiff, m.txExecutorBackend.Clk) + if err != nil { + return err + } + + _, err = executor.AdvanceTimeTo(m.txExecutorBackend, stateDiff, nextBlkTime) + if err != nil { + return err + } + + err = tx.Unsigned.Visit(&executor.CaminoStandardTxExecutor{ + StandardTxExecutor: executor.StandardTxExecutor{ + Backend: m.txExecutorBackend, + State: stateDiff, + Tx: tx, + }, }) + // We ignore [errFutureStakeTime] here because the time will be advanced + // when this transaction is issued. + // + // TODO: Remove this check post-Durango. + if errors.Is(err, executor.ErrFutureStakeTime) { + return nil + } + return err } func (m *manager) VerifyUniqueInputs(blkID ids.ID, inputs set.Set[ids.ID]) error { diff --git a/vms/platformvm/block/executor/options.go b/vms/platformvm/block/executor/options.go index a8ccea315b0f..f349caa661e4 100644 --- a/vms/platformvm/block/executor/options.go +++ b/vms/platformvm/block/executor/options.go @@ -4,19 +4,44 @@ package executor import ( + "errors" "fmt" + "go.uber.org/zap" + "github.com/ava-labs/avalanchego/snow/consensus/snowman" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/logging" "github.com/ava-labs/avalanchego/vms/platformvm/block" + "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/state" + "github.com/ava-labs/avalanchego/vms/platformvm/txs" + "github.com/ava-labs/avalanchego/vms/platformvm/txs/executor" ) -var _ block.Visitor = (*verifier)(nil) +var ( + _ block.Visitor = (*options)(nil) + + errUnexpectedProposalTxType = errors.New("unexpected proposal transaction type") + errFailedFetchingStakerTx = errors.New("failed fetching staker transaction") + errUnexpectedStakerTxType = errors.New("unexpected staker transaction type") + errFailedFetchingPrimaryStaker = errors.New("failed fetching primary staker") + errFailedFetchingSubnetTransformation = errors.New("failed fetching subnet transformation") + errFailedCalculatingUptime = errors.New("failed calculating uptime") +) // options supports build new option blocks type options struct { + // inputs populated before calling this struct's methods: + log logging.Logger + primaryUptimePercentage float64 + uptimes uptime.Calculator + state state.Chain + // outputs populated by this struct's methods: - commitBlock block.Block - abortBlock block.Block + preferredBlock block.Block + alternateBlock block.Block } func (*options) BanffAbortBlock(*block.BanffAbortBlock) error { @@ -32,8 +57,7 @@ func (o *options) BanffProposalBlock(b *block.BanffProposalBlock) error { blkID := b.ID() nextHeight := b.Height() + 1 - var err error - o.commitBlock, err = block.NewBanffCommitBlock(timestamp, blkID, nextHeight) + commitBlock, err := block.NewBanffCommitBlock(timestamp, blkID, nextHeight) if err != nil { return fmt.Errorf( "failed to create commit block: %w", @@ -41,13 +65,35 @@ func (o *options) BanffProposalBlock(b *block.BanffProposalBlock) error { ) } - o.abortBlock, err = block.NewBanffAbortBlock(timestamp, blkID, nextHeight) + abortBlock, err := block.NewBanffAbortBlock(timestamp, blkID, nextHeight) if err != nil { return fmt.Errorf( "failed to create abort block: %w", err, ) } + + prefersCommit, err := o.prefersCommit(b.Tx) + if err != nil { + o.log.Debug("falling back to prefer commit", + zap.Error(err), + ) + // We fall back to commit here to err on the side of over-rewarding + // rather than under-rewarding. + // + // Invariant: We must not return the error here, because the error would + // be treated as fatal. Errors can occur here due to a malicious block + // proposer or even in unusual virtuous cases. + prefersCommit = true + } + + if prefersCommit { + o.preferredBlock = commitBlock + o.alternateBlock = abortBlock + } else { + o.preferredBlock = abortBlock + o.alternateBlock = commitBlock + } return nil } @@ -68,7 +114,7 @@ func (o *options) ApricotProposalBlock(b *block.ApricotProposalBlock) error { nextHeight := b.Height() + 1 var err error - o.commitBlock, err = block.NewApricotCommitBlock(blkID, nextHeight) + o.preferredBlock, err = block.NewApricotCommitBlock(blkID, nextHeight) if err != nil { return fmt.Errorf( "failed to create commit block: %w", @@ -76,7 +122,7 @@ func (o *options) ApricotProposalBlock(b *block.ApricotProposalBlock) error { ) } - o.abortBlock, err = block.NewApricotAbortBlock(blkID, nextHeight) + o.alternateBlock, err = block.NewApricotAbortBlock(blkID, nextHeight) if err != nil { return fmt.Errorf( "failed to create abort block: %w", @@ -93,3 +139,58 @@ func (*options) ApricotStandardBlock(*block.ApricotStandardBlock) error { func (*options) ApricotAtomicBlock(*block.ApricotAtomicBlock) error { return snowman.ErrNotOracle } + +func (o *options) prefersCommit(tx *txs.Tx) (bool, error) { + var unsignedTx *txs.RewardValidatorTx + switch utx := tx.Unsigned.(type) { + case *txs.RewardValidatorTx: + unsignedTx = utx + case *txs.CaminoRewardValidatorTx: + // CaminoRewardValidatorTx doesn't have any difference + // between commmit and abort states, so we always prefer commit. + return true, nil + default: + return false, fmt.Errorf("%w: %T", errUnexpectedProposalTxType, tx.Unsigned) + } + + stakerTx, _, err := o.state.GetTx(unsignedTx.TxID) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedFetchingStakerTx, err) + } + + staker, ok := stakerTx.Unsigned.(txs.Staker) + if !ok { + return false, fmt.Errorf("%w: %T", errUnexpectedStakerTxType, stakerTx.Unsigned) + } + + nodeID := staker.NodeID() + primaryNetworkValidator, err := o.state.GetCurrentValidator( + constants.PrimaryNetworkID, + nodeID, + ) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedFetchingPrimaryStaker, err) + } + + expectedUptimePercentage := o.primaryUptimePercentage + if subnetID := staker.SubnetID(); subnetID != constants.PrimaryNetworkID { + transformSubnet, err := executor.GetTransformSubnetTx(o.state, subnetID) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedFetchingSubnetTransformation, err) + } + + expectedUptimePercentage = float64(transformSubnet.UptimeRequirement) / reward.PercentDenominator + } + + // TODO: calculate subnet uptimes + uptime, err := o.uptimes.CalculateUptimePercentFrom( + nodeID, + constants.PrimaryNetworkID, + primaryNetworkValidator.StartTime, + ) + if err != nil { + return false, fmt.Errorf("%w: %w", errFailedCalculatingUptime, err) + } + + return uptime >= expectedUptimePercentage, nil +} diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index 3031aeea5c8f..dfc9ac7c05af 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -44,9 +44,6 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { ctrl := gomock.NewController(t) env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() // create apricotParentBlk. It's a standard one for simplicity parentHeight := uint64(2022) @@ -111,14 +108,6 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { }).Times(2) currentStakersIt.EXPECT().Release() onParentAccept.EXPECT().GetCurrentStakerIterator().Return(currentStakersIt, nil) - onParentAccept.EXPECT().GetCurrentValidator(utx.SubnetID(), utx.NodeID()).Return(&state.Staker{ - TxID: addValTx.ID(), - NodeID: utx.NodeID(), - SubnetID: utx.SubnetID(), - StartTime: utx.StartTime(), - NextTime: chainTime, - EndTime: chainTime, - }, nil) onParentAccept.EXPECT().GetTx(addValTx.ID()).Return(addValTx, status.Committed, nil) onParentAccept.EXPECT().GetCurrentSupply(constants.PrimaryNetworkID).Return(uint64(1000), nil).AnyTimes() onParentAccept.EXPECT().GetDelegateeReward(constants.PrimaryNetworkID, utx.NodeID()).Return(uint64(0), nil).AnyTimes() @@ -159,9 +148,6 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { ctrl := gomock.NewController(t) env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.clk.Set(defaultGenesisTime) env.config.BanffTime = time.Time{} // activate Banff env.config.DurangoTime = mockable.MaxTime // deactivate Durango @@ -224,13 +210,6 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(nextStakerTx.Initialize(txs.Codec)) nextStakerTxID := nextStakerTx.ID() - onParentAccept.EXPECT().GetCurrentValidator(unsignedNextStakerTx.SubnetID(), unsignedNextStakerTx.NodeID()).Return(&state.Staker{ - TxID: nextStakerTxID, - NodeID: unsignedNextStakerTx.NodeID(), - SubnetID: unsignedNextStakerTx.SubnetID(), - StartTime: unsignedNextStakerTx.StartTime(), - EndTime: chainTime, - }, nil) onParentAccept.EXPECT().GetTx(nextStakerTxID).Return(nextStakerTx, status.Processing, nil) currentStakersIt := state.NewMockStakerIterator(ctrl) @@ -590,9 +569,6 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { t.Run(test.description, func(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() @@ -743,9 +719,6 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() @@ -886,9 +859,6 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { t.Run(fmt.Sprintf("tracked %t", tracked), func(ts *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() @@ -992,9 +962,6 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff // Case: Timestamp is after next validator start time @@ -1177,9 +1144,6 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { func TestBanffProposalBlockDelegatorStakers(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff // Case: Timestamp is after next validator start time @@ -1362,9 +1326,6 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { func TestAddValidatorProposalBlock(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff env.config.DurangoTime = time.Time{} // activate Durango diff --git a/vms/platformvm/block/executor/standard_block_test.go b/vms/platformvm/block/executor/standard_block_test.go index 171646cab662..fa146b4e9625 100644 --- a/vms/platformvm/block/executor/standard_block_test.go +++ b/vms/platformvm/block/executor/standard_block_test.go @@ -41,9 +41,6 @@ func TestApricotStandardBlockTimeVerification(t *testing.T) { ctrl := gomock.NewController(t) env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() // setup and store parent block // it's a standard block for simplicity @@ -97,9 +94,6 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ctrl := gomock.NewController(t) env := newEnvironment(t, ctrl) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() now := env.clk.Time() env.clk.Set(now) env.config.BanffTime = time.Time{} // activate Banff @@ -318,9 +312,6 @@ func TestBanffStandardBlockUpdatePrimaryNetworkStakers(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff // Case: Timestamp is after next validator start time @@ -523,9 +514,6 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { t.Run(test.description, func(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() @@ -625,9 +613,6 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() @@ -724,9 +709,6 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { t.Run(fmt.Sprintf("tracked %t", tracked), func(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff subnetID := testSubnet1.ID() @@ -787,9 +769,6 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { require := require.New(t) env := newEnvironment(t, nil) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() env.config.BanffTime = time.Time{} // activate Banff // Case: Timestamp is after next validator start time diff --git a/vms/platformvm/block/executor/verifier.go b/vms/platformvm/block/executor/verifier.go index 61fa50baf6fe..83fed80024f7 100644 --- a/vms/platformvm/block/executor/verifier.go +++ b/vms/platformvm/block/executor/verifier.go @@ -76,19 +76,11 @@ func (v *verifier) BanffProposalBlock(b *block.BanffProposalBlock) error { return err } - // Apply the changes, if any, from advancing the chain time. - changes, err := executor.AdvanceTimeTo( - v.txExecutorBackend, - onDecisionState, - nextChainTime, - ) - if err != nil { + // Advance the time to [nextChainTime]. + if _, err := executor.AdvanceTimeTo(v.txExecutorBackend, onDecisionState, nextChainTime); err != nil { return err } - onDecisionState.SetTimestamp(nextChainTime) - changes.Apply(onDecisionState) - inputs, atomicRequests, onAcceptFunc, err := v.processStandardTxs(b.Transactions, onDecisionState, b.Parent()) if err != nil { return err @@ -126,12 +118,11 @@ func (v *verifier) BanffStandardBlock(b *block.BanffStandardBlock) error { return err } - // Apply the changes, if any, from advancing the chain time. - nextChainTime := b.Timestamp() - changes, err := executor.AdvanceTimeTo( + // Advance the time to [b.Timestamp()]. + changed, err := executor.AdvanceTimeTo( v.txExecutorBackend, onAcceptState, - nextChainTime, + b.Timestamp(), ) if err != nil { return err @@ -139,13 +130,10 @@ func (v *verifier) BanffStandardBlock(b *block.BanffStandardBlock) error { // If this block doesn't perform any changes, then it should never have been // issued. - if changes.Len() == 0 && len(b.Transactions) == 0 { + if !changed && len(b.Transactions) == 0 { return errBanffStandardBlockWithoutChanges } - onAcceptState.SetTimestamp(nextChainTime) - changes.Apply(onAcceptState) - return v.standardBlock(&b.ApricotStandardBlock, onAcceptState) } @@ -410,10 +398,9 @@ func (v *verifier) proposalBlock( blkID := b.ID() v.blkIDToState[blkID] = &blockState{ proposalBlockState: proposalBlockState{ - onDecisionState: onDecisionState, - onCommitState: onCommitState, - onAbortState: onAbortState, - initiallyPreferCommit: txExecutor.PrefersCommit, + onDecisionState: onDecisionState, + onCommitState: onCommitState, + onAbortState: onAbortState, }, statelessBlock: b, diff --git a/vms/platformvm/camino_helpers_test.go b/vms/platformvm/camino_helpers_test.go index 4cbd831192b7..e8e7d877d86c 100644 --- a/vms/platformvm/camino_helpers_test.go +++ b/vms/platformvm/camino_helpers_test.go @@ -168,8 +168,13 @@ func newCaminoVM(t *testing.T, genesisConfig api.Camino, genesisUTXOs []api.UTXO require.NoError(blk.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + t.Cleanup(func() { + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + require.NoError(vm.Shutdown(context.Background())) + }) + return vm - // return vm, baseDBManager.Current().Database, msm } func defaultCaminoConfig(t *testing.T) config.Config { @@ -303,15 +308,6 @@ func generateKeyAndOwner(t *testing.T) (*secp256k1.PrivateKey, ids.ShortID, secp } } -func stopVM(t *testing.T, vm *VM, doLock bool) { - t.Helper() - if doLock { - vm.ctx.Lock.Lock() - } - require.NoError(t, vm.Shutdown(context.TODO())) - vm.ctx.Lock.Unlock() -} - func generateTestUTXO(txID ids.ID, assetID ids.ID, amount uint64, outputOwners secp256k1fx.OutputOwners, depositTxID, bondTxID ids.ID) *avax.UTXO { var out avax.TransferableOut = &secp256k1fx.TransferOutput{ Amt: amount, diff --git a/vms/platformvm/camino_service.go b/vms/platformvm/camino_service.go index 9a829ce5af05..9bc920fd4819 100644 --- a/vms/platformvm/camino_service.go +++ b/vms/platformvm/camino_service.go @@ -1062,8 +1062,21 @@ type GetValidatorsAtReply2 struct { Validators map[ids.NodeID]ConsortiumMemberValidator `json:"validators"` } +type GetValidatorsAtResponseWrapper struct { + LockModeBondDeposit bool + avax GetValidatorsAtReply + camino GetValidatorsAtReply2 +} + +func (response GetValidatorsAtResponseWrapper) MarshalJSON() ([]byte, error) { + if !response.LockModeBondDeposit { + return response.avax.MarshalJSON() + } + return stdjson.Marshal(response.camino) +} + // Overrides avax service GetValidatorsAt -func (s *CaminoService) GetValidatorsAt(r *http.Request, args *GetValidatorsAtArgs, reply *GetValidatorsAtReply2) error { +func (s *CaminoService) GetValidatorsAt(r *http.Request, args *GetValidatorsAtArgs, reply *GetValidatorsAtResponseWrapper) error { height := uint64(args.Height) s.vm.ctx.Log.Debug("API called", zap.String("service", "platform"), @@ -1073,13 +1086,23 @@ func (s *CaminoService) GetValidatorsAt(r *http.Request, args *GetValidatorsAtAr ) s.vm.ctx.Lock.Lock() - defer s.vm.ctx.Lock.Unlock() + caminoConfig, err := s.vm.state.CaminoConfig() + s.vm.ctx.Lock.Unlock() + if err != nil { + return err + } + reply.LockModeBondDeposit = caminoConfig.LockModeBondDeposit + if !caminoConfig.LockModeBondDeposit { + return s.Service.GetValidatorsAt(r, args, &reply.avax) + } + s.vm.ctx.Lock.Lock() + defer s.vm.ctx.Lock.Unlock() vdrs, err := s.vm.GetValidatorSet(r.Context(), height, args.SubnetID) if err != nil { return fmt.Errorf("failed to get validator set: %w", err) } - reply.Validators = make(map[ids.NodeID]ConsortiumMemberValidator, len(vdrs)) + reply.camino.Validators = make(map[ids.NodeID]ConsortiumMemberValidator, len(vdrs)) for _, vdr := range vdrs { cMemberAddr, err := s.vm.state.GetShortIDLink(ids.ShortID(vdr.NodeID), state.ShortLinkKeyRegisterNode) if err != nil { @@ -1091,7 +1114,7 @@ func (s *CaminoService) GetValidatorsAt(r *http.Request, args *GetValidatorsAtAr return fmt.Errorf("failed to format consortium member address: %w", err) } - reply.Validators[vdr.NodeID] = ConsortiumMemberValidator{ + reply.camino.Validators[vdr.NodeID] = ConsortiumMemberValidator{ ValidatorWeight: json.Uint64(vdr.Weight), ConsortiumMemberAddress: addrStr, } diff --git a/vms/platformvm/camino_service_test.go b/vms/platformvm/camino_service_test.go index 0f82f6c04625..32e4563c2a61 100644 --- a/vms/platformvm/camino_service_test.go +++ b/vms/platformvm/camino_service_test.go @@ -101,7 +101,6 @@ func TestGetCaminoBalance(t *testing.T) { for name, tt := range tests { t.Run(name, func(t *testing.T) { service := defaultCaminoService(t, tt.camino, tt.genesisUTXOs) - defer stopVM(t, service.vm, true) request := GetBalanceRequest{ Addresses: []string{ @@ -241,7 +240,6 @@ func TestCaminoService_GetAllDepositOffers(t *testing.T) { for name, tt := range tests { t.Run(name, func(t *testing.T) { s := defaultCaminoService(t, api.Camino{}, []api.UTXO{}) - defer stopVM(t, s.vm, true) tt.prepare(s) @@ -285,7 +283,7 @@ func TestGetKeystoreKeys(t *testing.T) { s, _ := defaultService(t) defaultAddress(t, s) // Insert testAddress into keystore s.vm.ctx.Lock.Lock() - defer stopVM(t, s.vm, false) + defer s.vm.ctx.Lock.Unlock() keys, err := s.getKeystoreKeys(&userPass, &tt.from) //nolint:gosec require.ErrorIs(t, err, tt.expectedError) @@ -303,7 +301,6 @@ func TestGetKeystoreKeys(t *testing.T) { func TestGetFakeKeys(t *testing.T) { s, _ := defaultService(t) - defer stopVM(t, s.vm, true) _, _, testAddressBytes, _ := address.Parse(testAddress) testAddressID, _ := ids.ToShortID(testAddressBytes) @@ -360,7 +357,6 @@ func TestSpend(t *testing.T) { Message: "", }}, ) - defer stopVM(t, service.vm, true) spendArgs := SpendArgs{ JSONFromAddrs: json_api.JSONFromAddrs{ diff --git a/vms/platformvm/camino_vm_test.go b/vms/platformvm/camino_vm_test.go index b3626b64aa98..5f43fa009f67 100644 --- a/vms/platformvm/camino_vm_test.go +++ b/vms/platformvm/camino_vm_test.go @@ -60,7 +60,7 @@ func TestRemoveDeferredValidator(t *testing.T) { vm := newCaminoVM(t, caminoGenesisConf, genesisUTXOs) vm.ctx.Lock.Lock() - defer stopVM(t, vm, false) + defer vm.ctx.Lock.Unlock() // Set consortium member // add admin proposer role to root admin @@ -152,11 +152,11 @@ func TestRemoveDeferredValidator(t *testing.T) { options, err := blk.(snowman.OracleBlock).Options(context.Background()) require.NoError(err) - commit := options[1].(*blockexecutor.Block) + commit := options[0].(*blockexecutor.Block) _, ok := commit.Block.(*block.BanffCommitBlock) require.True(ok) - abort := options[0].(*blockexecutor.Block) + abort := options[1].(*blockexecutor.Block) _, ok = abort.Block.(*block.BanffAbortBlock) require.True(ok) @@ -217,7 +217,7 @@ func TestRemoveReactivatedValidator(t *testing.T) { vm := newCaminoVM(t, caminoGenesisConf, genesisUTXOs) vm.ctx.Lock.Lock() - defer stopVM(t, vm, false) + defer vm.ctx.Lock.Unlock() // Set consortium member // add admin proposer role to root admin @@ -322,11 +322,11 @@ func TestRemoveReactivatedValidator(t *testing.T) { options, err := blk.(snowman.OracleBlock).Options(context.Background()) require.NoError(err) - commit := options[1].(*blockexecutor.Block) + commit := options[0].(*blockexecutor.Block) _, ok := commit.Block.(*block.BanffCommitBlock) require.True(ok) - abort := options[0].(*blockexecutor.Block) + abort := options[1].(*blockexecutor.Block) _, ok = abort.Block.(*block.BanffAbortBlock) require.True(ok) @@ -388,7 +388,7 @@ func TestDepositsAutoUnlock(t *testing.T) { Address: depositOwnerAddrBech32, }}) vm.ctx.Lock.Lock() - defer stopVM(t, vm, false) + defer vm.ctx.Lock.Unlock() // Add deposit depositTx, err := vm.txBuilder.NewDepositTx( @@ -527,7 +527,7 @@ func TestProposals(t *testing.T) { }, }) vm.ctx.Lock.Lock() - defer stopVM(t, vm, false) + defer vm.ctx.Lock.Unlock() checkBalance(t, vm.state, proposerAddr, balance, // total 0, 0, 0, balance, // unlocked @@ -669,7 +669,7 @@ func TestAdminProposals(t *testing.T) { }, }) vm.ctx.Lock.Lock() - defer stopVM(t, vm, false) + defer vm.ctx.Lock.Unlock() checkBalance(t, vm.state, proposerAddr, balance, // total 0, 0, 0, balance, // unlocked @@ -860,7 +860,7 @@ func TestExcludeMemberProposals(t *testing.T) { InitialAdmin: rootAdminKey.Address(), }, []api.UTXO{{Amount: json.Uint64(initialBalance - defaultCaminoValidatorWeight), Address: fundsKeyAddrStr}}) vm.ctx.Lock.Lock() - defer stopVM(t, vm, false) + defer vm.ctx.Lock.Unlock() height, err := vm.GetCurrentHeight(context.Background()) require.NoError(err) require.Equal(expectedHeight, height) diff --git a/vms/platformvm/metrics/metrics.go b/vms/platformvm/metrics/metrics.go index 5357721d9bd6..98b611a017ed 100644 --- a/vms/platformvm/metrics/metrics.go +++ b/vms/platformvm/metrics/metrics.go @@ -19,10 +19,6 @@ var _ Metrics = (*metrics)(nil) type Metrics interface { metric.APIInterceptor - // Mark that an option vote that we initially preferred was accepted. - MarkOptionVoteWon() - // Mark that an option vote that we initially preferred was rejected. - MarkOptionVoteLost() // Mark that the given block was accepted. MarkAccepted(block.Block) error // Mark that a validator set was created. @@ -75,17 +71,6 @@ func New( Help: "Amount (in nAVAX) of AVAX staked on the Primary Network", }), - numVotesWon: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "votes_won", - Help: "Total number of votes this node has won", - }), - numVotesLost: prometheus.NewCounter(prometheus.CounterOpts{ - Namespace: namespace, - Name: "votes_lost", - Help: "Total number of votes this node has lost", - }), - validatorSetsCached: prometheus.NewCounter(prometheus.CounterOpts{ Namespace: namespace, Name: "validator_sets_cached", @@ -118,9 +103,6 @@ func New( registerer.Register(m.localStake), registerer.Register(m.totalStake), - registerer.Register(m.numVotesWon), - registerer.Register(m.numVotesLost), - registerer.Register(m.validatorSetsCreated), registerer.Register(m.validatorSetsCached), registerer.Register(m.validatorSetsHeightDiff), @@ -140,22 +122,12 @@ type metrics struct { localStake prometheus.Gauge totalStake prometheus.Gauge - numVotesWon, numVotesLost prometheus.Counter - validatorSetsCached prometheus.Counter validatorSetsCreated prometheus.Counter validatorSetsHeightDiff prometheus.Gauge validatorSetsDuration prometheus.Gauge } -func (m *metrics) MarkOptionVoteWon() { - m.numVotesWon.Inc() -} - -func (m *metrics) MarkOptionVoteLost() { - m.numVotesLost.Inc() -} - func (m *metrics) MarkAccepted(b block.Block) error { return b.Visit(m.blockMetrics) } diff --git a/vms/platformvm/network/gossip.go b/vms/platformvm/network/gossip.go index 877d6336e5be..0cca1ab4e3f6 100644 --- a/vms/platformvm/network/gossip.go +++ b/vms/platformvm/network/gossip.go @@ -10,6 +10,8 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/network/p2p" "github.com/ava-labs/avalanchego/network/p2p/gossip" @@ -65,13 +67,14 @@ func (txMarshaller) UnmarshalGossip(bytes []byte) (*txs.Tx, error) { func newGossipMempool( mempool mempool.Mempool, + registerer prometheus.Registerer, log logging.Logger, txVerifier TxVerifier, minTargetElements int, targetFalsePositiveProbability, resetFalsePositiveProbability float64, ) (*gossipMempool, error) { - bloom, err := gossip.NewBloomFilter(minTargetElements, targetFalsePositiveProbability, resetFalsePositiveProbability) + bloom, err := gossip.NewBloomFilter(registerer, "mempool_bloom_filter", minTargetElements, targetFalsePositiveProbability, resetFalsePositiveProbability) return &gossipMempool{ Mempool: mempool, log: log, diff --git a/vms/platformvm/network/gossip_test.go b/vms/platformvm/network/gossip_test.go index dcfca4ed2eed..a393515716b6 100644 --- a/vms/platformvm/network/gossip_test.go +++ b/vms/platformvm/network/gossip_test.go @@ -7,6 +7,8 @@ import ( "errors" "testing" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -38,6 +40,7 @@ func TestGossipMempoolAddVerificationError(t *testing.T) { gossipMempool, err := newGossipMempool( mempool, + prometheus.NewRegistry(), logging.NoLog{}, txVerifier, testConfig.ExpectedBloomFilterElements, @@ -71,6 +74,7 @@ func TestGossipMempoolAddError(t *testing.T) { gossipMempool, err := newGossipMempool( mempool, + prometheus.NewRegistry(), logging.NoLog{}, txVerifier, testConfig.ExpectedBloomFilterElements, @@ -101,6 +105,7 @@ func TestMempoolDuplicate(t *testing.T) { gossipMempool, err := newGossipMempool( testMempool, + prometheus.NewRegistry(), logging.NoLog{}, txVerifier, testConfig.ExpectedBloomFilterElements, @@ -135,6 +140,7 @@ func TestGossipAddBloomFilter(t *testing.T) { gossipMempool, err := newGossipMempool( mempool, + prometheus.NewRegistry(), logging.NoLog{}, txVerifier, testConfig.ExpectedBloomFilterElements, diff --git a/vms/platformvm/network/network.go b/vms/platformvm/network/network.go index 6844654a41c1..39f6ee1dea0e 100644 --- a/vms/platformvm/network/network.go +++ b/vms/platformvm/network/network.go @@ -24,7 +24,7 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs/mempool" ) -const txGossipHandlerID = 0 +const TxGossipHandlerID = 0 type Network interface { common.AppHandler @@ -80,7 +80,7 @@ func New( config.MaxValidatorSetStaleness, ) txGossipClient := p2pNetwork.NewClient( - txGossipHandlerID, + TxGossipHandlerID, p2p.WithValidatorSampling(validators), ) txGossipMetrics, err := gossip.NewMetrics(registerer, "tx") @@ -97,6 +97,7 @@ func New( gossipMempool, err := newGossipMempool( mempool, + registerer, log, txVerifier, config.ExpectedBloomFilterElements, @@ -153,7 +154,7 @@ func New( appRequestHandler: validatorHandler, } - if err := p2pNetwork.AddHandler(txGossipHandlerID, txGossipHandler); err != nil { + if err := p2pNetwork.AddHandler(TxGossipHandlerID, txGossipHandler); err != nil { return nil, err } diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 7a6cb3f1b9c6..520834297798 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -144,11 +144,6 @@ func TestExportKey(t *testing.T) { service, _ := defaultService(t) defaultAddress(t, service) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() reply := ExportKeyReply{} require.NoError(service.ExportKey(nil, &args, &reply)) @@ -163,11 +158,6 @@ func TestImportKey(t *testing.T) { require.NoError(stdjson.Unmarshal([]byte(jsonString), &args)) service, _ := defaultService(t) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() reply := api.JSONAddress{} require.NoError(service.ImportKey(nil, &args, &reply)) @@ -180,11 +170,6 @@ func TestGetTxStatus(t *testing.T) { service, mutableSharedMemory := defaultService(t) defaultAddress(t, service) service.vm.ctx.Lock.Lock() - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() recipientKey, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -385,10 +370,6 @@ func TestGetTx(t *testing.T) { require.NoError(err) require.Equal(expectedTxJSON, []byte(response.Tx)) } - - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() }) } } @@ -398,11 +379,6 @@ func TestGetBalance(t *testing.T) { require := require.New(t) service, _ := defaultService(t) defaultAddress(t, service) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() // Ensure GetStake is correct for each of the genesis validators genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) @@ -432,11 +408,6 @@ func TestGetStake(t *testing.T) { require := require.New(t) service, _ := defaultService(t) defaultAddress(t, service) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() // Ensure GetStake is correct for each of the genesis validators genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) @@ -608,11 +579,6 @@ func TestGetCurrentValidators(t *testing.T) { require := require.New(t) service, _ := defaultService(t) defaultAddress(t, service) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() genesis, _ := defaultGenesis(t, service.vm.ctx.AVAXAssetID) @@ -738,11 +704,6 @@ func TestGetCurrentValidators(t *testing.T) { func TestGetTimestamp(t *testing.T) { require := require.New(t) service, _ := defaultService(t) - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() reply := GetTimestampReply{} require.NoError(service.GetTimestamp(nil, nil, &reply)) @@ -780,11 +741,6 @@ func TestGetBlock(t *testing.T) { require := require.New(t) service, _ := defaultService(t) service.vm.ctx.Lock.Lock() - defer func() { - service.vm.ctx.Lock.Lock() - require.NoError(service.vm.Shutdown(context.Background())) - service.vm.ctx.Lock.Unlock() - }() service.vm.Config.CreateAssetTxFee = 100 * defaultTxFee diff --git a/vms/platformvm/state/camino_test.go b/vms/platformvm/state/camino_test.go index b74c4bf1bd46..58c4a3c036b4 100644 --- a/vms/platformvm/state/camino_test.go +++ b/vms/platformvm/state/camino_test.go @@ -165,7 +165,7 @@ func TestSyncGenesis(t *testing.T) { require := require.New(t) s := newInitializedState(require) db := memdb.New() - validatorsDB := prefixdb.New(validatorsPrefix, db) + validatorsDB := prefixdb.New(ValidatorsPrefix, db) var ( id = ids.GenerateTestID() diff --git a/vms/platformvm/state/state.go b/vms/platformvm/state/state.go index 855678bb4aae..1d5ef89e7084 100644 --- a/vms/platformvm/state/state.go +++ b/vms/platformvm/state/state.go @@ -71,35 +71,35 @@ var ( errValidatorSetAlreadyPopulated = errors.New("validator set already populated") errIsNotSubnet = errors.New("is not a subnet") - blockIDPrefix = []byte("blockID") - blockPrefix = []byte("block") - validatorsPrefix = []byte("validators") - currentPrefix = []byte("current") - pendingPrefix = []byte("pending") - validatorPrefix = []byte("validator") - delegatorPrefix = []byte("delegator") - subnetValidatorPrefix = []byte("subnetValidator") - subnetDelegatorPrefix = []byte("subnetDelegator") - nestedValidatorWeightDiffsPrefix = []byte("validatorDiffs") - nestedValidatorPublicKeyDiffsPrefix = []byte("publicKeyDiffs") - flatValidatorWeightDiffsPrefix = []byte("flatValidatorDiffs") - flatValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs") - txPrefix = []byte("tx") - rewardUTXOsPrefix = []byte("rewardUTXOs") - utxoPrefix = []byte("utxo") - subnetPrefix = []byte("subnet") - subnetOwnerPrefix = []byte("subnetOwner") - transformedSubnetPrefix = []byte("transformedSubnet") - supplyPrefix = []byte("supply") - chainPrefix = []byte("chain") - singletonPrefix = []byte("singleton") - - timestampKey = []byte("timestamp") - currentSupplyKey = []byte("current supply") - lastAcceptedKey = []byte("last accepted") - heightsIndexedKey = []byte("heights indexed") - initializedKey = []byte("initialized") - prunedKey = []byte("pruned") + BlockIDPrefix = []byte("blockID") + BlockPrefix = []byte("block") + ValidatorsPrefix = []byte("validators") + CurrentPrefix = []byte("current") + PendingPrefix = []byte("pending") + ValidatorPrefix = []byte("validator") + DelegatorPrefix = []byte("delegator") + SubnetValidatorPrefix = []byte("subnetValidator") + SubnetDelegatorPrefix = []byte("subnetDelegator") + NestedValidatorWeightDiffsPrefix = []byte("validatorDiffs") + NestedValidatorPublicKeyDiffsPrefix = []byte("publicKeyDiffs") + FlatValidatorWeightDiffsPrefix = []byte("flatValidatorDiffs") + FlatValidatorPublicKeyDiffsPrefix = []byte("flatPublicKeyDiffs") + TxPrefix = []byte("tx") + RewardUTXOsPrefix = []byte("rewardUTXOs") + UTXOPrefix = []byte("utxo") + SubnetPrefix = []byte("subnet") + SubnetOwnerPrefix = []byte("subnetOwner") + TransformedSubnetPrefix = []byte("transformedSubnet") + SupplyPrefix = []byte("supply") + ChainPrefix = []byte("chain") + SingletonPrefix = []byte("singleton") + + TimestampKey = []byte("timestamp") + CurrentSupplyKey = []byte("current supply") + LastAcceptedKey = []byte("last accepted") + HeightsIndexedKey = []byte("heights indexed") + InitializedKey = []byte("initialized") + PrunedKey = []byte("pruned") ) // Chain collects all methods to manage the state of the chain for block @@ -504,7 +504,7 @@ func New( // If the pruned key is on disk, we must delete it to ensure our disk // can't get into a partially pruned state if the node restarts mid-way // through pruning. - if err := s.singletonDB.Delete(prunedKey); err != nil { + if err := s.singletonDB.Delete(PrunedKey); err != nil { return nil, fmt.Errorf("failed to remove prunedKey from singletonDB: %w", err) } @@ -545,24 +545,24 @@ func newState( baseDB := versiondb.New(db) - validatorsDB := prefixdb.New(validatorsPrefix, baseDB) + validatorsDB := prefixdb.New(ValidatorsPrefix, baseDB) - currentValidatorsDB := prefixdb.New(currentPrefix, validatorsDB) - currentValidatorBaseDB := prefixdb.New(validatorPrefix, currentValidatorsDB) - currentDelegatorBaseDB := prefixdb.New(delegatorPrefix, currentValidatorsDB) - currentSubnetValidatorBaseDB := prefixdb.New(subnetValidatorPrefix, currentValidatorsDB) - currentSubnetDelegatorBaseDB := prefixdb.New(subnetDelegatorPrefix, currentValidatorsDB) + currentValidatorsDB := prefixdb.New(CurrentPrefix, validatorsDB) + currentValidatorBaseDB := prefixdb.New(ValidatorPrefix, currentValidatorsDB) + currentDelegatorBaseDB := prefixdb.New(DelegatorPrefix, currentValidatorsDB) + currentSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, currentValidatorsDB) + currentSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, currentValidatorsDB) - pendingValidatorsDB := prefixdb.New(pendingPrefix, validatorsDB) - pendingValidatorBaseDB := prefixdb.New(validatorPrefix, pendingValidatorsDB) - pendingDelegatorBaseDB := prefixdb.New(delegatorPrefix, pendingValidatorsDB) - pendingSubnetValidatorBaseDB := prefixdb.New(subnetValidatorPrefix, pendingValidatorsDB) - pendingSubnetDelegatorBaseDB := prefixdb.New(subnetDelegatorPrefix, pendingValidatorsDB) + pendingValidatorsDB := prefixdb.New(PendingPrefix, validatorsDB) + pendingValidatorBaseDB := prefixdb.New(ValidatorPrefix, pendingValidatorsDB) + pendingDelegatorBaseDB := prefixdb.New(DelegatorPrefix, pendingValidatorsDB) + pendingSubnetValidatorBaseDB := prefixdb.New(SubnetValidatorPrefix, pendingValidatorsDB) + pendingSubnetDelegatorBaseDB := prefixdb.New(SubnetDelegatorPrefix, pendingValidatorsDB) - nestedValidatorWeightDiffsDB := prefixdb.New(nestedValidatorWeightDiffsPrefix, validatorsDB) - nestedValidatorPublicKeyDiffsDB := prefixdb.New(nestedValidatorPublicKeyDiffsPrefix, validatorsDB) - flatValidatorWeightDiffsDB := prefixdb.New(flatValidatorWeightDiffsPrefix, validatorsDB) - flatValidatorPublicKeyDiffsDB := prefixdb.New(flatValidatorPublicKeyDiffsPrefix, validatorsDB) + nestedValidatorWeightDiffsDB := prefixdb.New(NestedValidatorWeightDiffsPrefix, validatorsDB) + nestedValidatorPublicKeyDiffsDB := prefixdb.New(NestedValidatorPublicKeyDiffsPrefix, validatorsDB) + flatValidatorWeightDiffsDB := prefixdb.New(FlatValidatorWeightDiffsPrefix, validatorsDB) + flatValidatorPublicKeyDiffsDB := prefixdb.New(FlatValidatorPublicKeyDiffsPrefix, validatorsDB) txCache, err := metercacher.New( "tx_cache", @@ -573,7 +573,7 @@ func newState( return nil, err } - rewardUTXODB := prefixdb.New(rewardUTXOsPrefix, baseDB) + rewardUTXODB := prefixdb.New(RewardUTXOsPrefix, baseDB) rewardUTXOsCache, err := metercacher.New[ids.ID, []*avax.UTXO]( "reward_utxos_cache", metricsReg, @@ -583,15 +583,15 @@ func newState( return nil, err } - utxoDB := prefixdb.New(utxoPrefix, baseDB) + utxoDB := prefixdb.New(UTXOPrefix, baseDB) utxoState, err := avax.NewMeteredUTXOState(utxoDB, txs.GenesisCodec, metricsReg, execCfg.ChecksumsEnabled) if err != nil { return nil, err } - subnetBaseDB := prefixdb.New(subnetPrefix, baseDB) + subnetBaseDB := prefixdb.New(SubnetPrefix, baseDB) - subnetOwnerDB := prefixdb.New(subnetOwnerPrefix, baseDB) + subnetOwnerDB := prefixdb.New(SubnetOwnerPrefix, baseDB) subnetOwnerCache, err := metercacher.New[ids.ID, fxOwnerAndSize]( "subnet_owner_cache", metricsReg, @@ -656,11 +656,11 @@ func newState( addedBlockIDs: make(map[uint64]ids.ID), blockIDCache: blockIDCache, - blockIDDB: prefixdb.New(blockIDPrefix, baseDB), + blockIDDB: prefixdb.New(BlockIDPrefix, baseDB), addedBlocks: make(map[ids.ID]block.Block), blockCache: blockCache, - blockDB: prefixdb.New(blockPrefix, baseDB), + blockDB: prefixdb.New(BlockPrefix, baseDB), currentStakers: newBaseStakers(), pendingStakers: newBaseStakers(), @@ -692,7 +692,7 @@ func newState( flatValidatorPublicKeyDiffsDB: flatValidatorPublicKeyDiffsDB, addedTxs: make(map[ids.ID]*txAndStatus), - txDB: prefixdb.New(txPrefix, baseDB), + txDB: prefixdb.New(TxPrefix, baseDB), txCache: txCache, addedRewardUTXOs: make(map[ids.ID][]*avax.UTXO), @@ -712,18 +712,18 @@ func newState( transformedSubnets: make(map[ids.ID]*txs.Tx), transformedSubnetCache: transformedSubnetCache, - transformedSubnetDB: prefixdb.New(transformedSubnetPrefix, baseDB), + transformedSubnetDB: prefixdb.New(TransformedSubnetPrefix, baseDB), modifiedSupplies: make(map[ids.ID]uint64), supplyCache: supplyCache, - supplyDB: prefixdb.New(supplyPrefix, baseDB), + supplyDB: prefixdb.New(SupplyPrefix, baseDB), addedChains: make(map[ids.ID][]*txs.Tx), - chainDB: prefixdb.New(chainPrefix, baseDB), + chainDB: prefixdb.New(ChainPrefix, baseDB), chainCache: chainCache, chainDBCache: chainDBCache, - singletonDB: prefixdb.New(singletonPrefix, baseDB), + singletonDB: prefixdb.New(SingletonPrefix, baseDB), }, nil } @@ -784,16 +784,16 @@ func (s *state) GetPendingStakerIterator() (StakerIterator, error) { } func (s *state) shouldInit() (bool, error) { - has, err := s.singletonDB.Has(initializedKey) + has, err := s.singletonDB.Has(InitializedKey) return !has, err } func (s *state) doneInit() error { - return s.singletonDB.Put(initializedKey, nil) + return s.singletonDB.Put(InitializedKey, nil) } func (s *state) ShouldPrune() (bool, error) { - has, err := s.singletonDB.Has(prunedKey) + has, err := s.singletonDB.Has(PrunedKey) if err != nil { return true, err } @@ -820,7 +820,7 @@ func (s *state) ShouldPrune() (bool, error) { } func (s *state) donePrune() error { - return s.singletonDB.Put(prunedKey, nil) + return s.singletonDB.Put(PrunedKey, nil) } func (s *state) GetSubnets() ([]*txs.Tx, error) { @@ -1178,6 +1178,18 @@ func (s *state) ApplyValidatorWeightDiffs( if err != nil { return err } + + if parsedHeight > prevHeight { + s.ctx.Log.Error("unexpected parsed height", + zap.Stringer("subnetID", subnetID), + zap.Uint64("parsedHeight", parsedHeight), + zap.Stringer("nodeID", nodeID), + zap.Uint64("prevHeight", prevHeight), + zap.Uint64("startHeight", startHeight), + zap.Uint64("endHeight", endHeight), + ) + } + // If the parsedHeight is less than our target endHeight, then we have // fully processed the diffs from startHeight through endHeight. if parsedHeight < endHeight { @@ -1415,21 +1427,21 @@ func (s *state) load() error { } func (s *state) loadMetadata() error { - timestamp, err := database.GetTimestamp(s.singletonDB, timestampKey) + timestamp, err := database.GetTimestamp(s.singletonDB, TimestampKey) if err != nil { return err } s.persistedTimestamp = timestamp s.SetTimestamp(timestamp) - currentSupply, err := database.GetUInt64(s.singletonDB, currentSupplyKey) + currentSupply, err := database.GetUInt64(s.singletonDB, CurrentSupplyKey) if err != nil { return err } s.persistedCurrentSupply = currentSupply s.SetCurrentSupply(constants.PrimaryNetworkID, currentSupply) - lastAccepted, err := database.GetID(s.singletonDB, lastAcceptedKey) + lastAccepted, err := database.GetID(s.singletonDB, LastAcceptedKey) if err != nil { return err } @@ -1438,7 +1450,7 @@ func (s *state) loadMetadata() error { // Lookup the most recently indexed range on disk. If we haven't started // indexing the weights, then we keep the indexed heights as nil. - indexedHeightsBytes, err := s.singletonDB.Get(heightsIndexedKey) + indexedHeightsBytes, err := s.singletonDB.Get(HeightsIndexedKey) if err == database.ErrNotFound { return nil } @@ -2437,19 +2449,19 @@ func (s *state) writeChains() error { func (s *state) writeMetadata() error { if !s.persistedTimestamp.Equal(s.timestamp) { - if err := database.PutTimestamp(s.singletonDB, timestampKey, s.timestamp); err != nil { + if err := database.PutTimestamp(s.singletonDB, TimestampKey, s.timestamp); err != nil { return fmt.Errorf("failed to write timestamp: %w", err) } s.persistedTimestamp = s.timestamp } if s.persistedCurrentSupply != s.currentSupply { - if err := database.PutUInt64(s.singletonDB, currentSupplyKey, s.currentSupply); err != nil { + if err := database.PutUInt64(s.singletonDB, CurrentSupplyKey, s.currentSupply); err != nil { return fmt.Errorf("failed to write current supply: %w", err) } s.persistedCurrentSupply = s.currentSupply } if s.persistedLastAccepted != s.lastAccepted { - if err := database.PutID(s.singletonDB, lastAcceptedKey, s.lastAccepted); err != nil { + if err := database.PutID(s.singletonDB, LastAcceptedKey, s.lastAccepted); err != nil { return fmt.Errorf("failed to write last accepted: %w", err) } s.persistedLastAccepted = s.lastAccepted @@ -2460,7 +2472,7 @@ func (s *state) writeMetadata() error { if err != nil { return err } - if err := s.singletonDB.Put(heightsIndexedKey, indexedHeightsBytes); err != nil { + if err := s.singletonDB.Put(HeightsIndexedKey, indexedHeightsBytes); err != nil { return fmt.Errorf("failed to write indexed range: %w", err) } } diff --git a/vms/platformvm/txs/builder/builder.go b/vms/platformvm/txs/builder/builder.go index 3427bf2b1015..665342dab5b7 100644 --- a/vms/platformvm/txs/builder/builder.go +++ b/vms/platformvm/txs/builder/builder.go @@ -6,7 +6,6 @@ package builder import ( "errors" "fmt" - "time" "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow" @@ -183,10 +182,6 @@ type ProposalTxBuilder interface { changeAddr ids.ShortID, ) (*txs.Tx, error) - // newAdvanceTimeTx creates a new tx that, if it is accepted and followed by a - // Commit block, will set the chain's timestamp to [timestamp]. - NewAdvanceTimeTx(timestamp time.Time) (*txs.Tx, error) - // RewardStakerTx creates a new transaction that proposes to remove the staker // [validatorID] from the default validator set. NewRewardValidatorTx(txID ids.ID) (*txs.Tx, error) @@ -615,15 +610,6 @@ func (b *builder) NewRemoveSubnetValidatorTx( return tx, tx.SyntacticVerify(b.ctx) } -func (b *builder) NewAdvanceTimeTx(timestamp time.Time) (*txs.Tx, error) { - utx := &txs.AdvanceTimeTx{Time: uint64(timestamp.Unix())} - tx, err := txs.NewSigned(utx, txs.Codec, nil) - if err != nil { - return nil, err - } - return tx, tx.SyntacticVerify(b.ctx) -} - func (b *builder) NewRewardValidatorTx(txID ids.ID) (*txs.Tx, error) { utx := &txs.RewardValidatorTx{TxID: txID} tx, err := txs.NewSigned(utx, txs.Codec, nil) diff --git a/vms/platformvm/txs/executor/advance_time_test.go b/vms/platformvm/txs/executor/advance_time_test.go index bb66c7ef7f82..b7b636ad6dbe 100644 --- a/vms/platformvm/txs/executor/advance_time_test.go +++ b/vms/platformvm/txs/executor/advance_time_test.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/vms/platformvm/reward" @@ -20,15 +21,22 @@ import ( "github.com/ava-labs/avalanchego/vms/platformvm/txs" ) +func newAdvanceTimeTx(t testing.TB, timestamp time.Time) (*txs.Tx, error) { + utx := &txs.AdvanceTimeTx{Time: uint64(timestamp.Unix())} + tx, err := txs.NewSigned(utx, txs.Codec, nil) + if err != nil { + return nil, err + } + return tx, tx.SyntacticVerify(snowtest.Context(t, snowtest.PChainID)) +} + // Ensure semantic verification updates the current and pending staker set // for the primary network func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) // Case: Timestamp is after next validator start time @@ -45,7 +53,7 @@ func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { ) require.NoError(err) - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -90,11 +98,8 @@ func TestAdvanceTimeTxUpdatePrimaryNetworkStakers(t *testing.T) { func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - tx, err := env.txBuilder.NewAdvanceTimeTx(env.state.GetTimestamp()) + tx, err := newAdvanceTimeTx(t, env.state.GetTimestamp()) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -118,6 +123,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() // Case: Timestamp is after next validator start time // Add a pending validator @@ -128,7 +134,7 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { require.NoError(err) { - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime.Add(1 * time.Second)) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime.Add(1*time.Second)) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -147,21 +153,17 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { require.ErrorIs(err, ErrChildBlockAfterStakerChangeTime) } - require.NoError(shutdownEnvironment(env)) - // Case: Timestamp is after next validator end time env = newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() // fast forward clock to 10 seconds before genesis validators stop validating env.clk.Set(defaultValidateEndTime.Add(-10 * time.Second)) { // Proposes advancing timestamp to 1 second after genesis validators stop validating - tx, err := env.txBuilder.NewAdvanceTimeTx(defaultValidateEndTime.Add(1 * time.Second)) + tx, err := newAdvanceTimeTx(t, defaultValidateEndTime.Add(1*time.Second)) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -354,9 +356,7 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) @@ -400,7 +400,7 @@ func TestAdvanceTimeTxUpdateStakers(t *testing.T) { for _, newTime := range test.advanceTimeTo { env.clk.Set(newTime) - tx, err := env.txBuilder.NewAdvanceTimeTx(newTime) + tx, err := newAdvanceTimeTx(t, newTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -459,9 +459,7 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() subnetID := testSubnet1.ID() env.config.TrackedSubnets.Add(subnetID) @@ -526,7 +524,7 @@ func TestAdvanceTimeTxRemoveSubnetValidator(t *testing.T) { // Advance time to the first staker's end time. env.clk.Set(subnetVdr1EndTime) - tx, err = env.txBuilder.NewAdvanceTimeTx(subnetVdr1EndTime) + tx, err = newAdvanceTimeTx(t, subnetVdr1EndTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -563,9 +561,7 @@ func TestTrackedSubnet(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) subnetID := testSubnet1.ID() @@ -602,7 +598,7 @@ func TestTrackedSubnet(t *testing.T) { // Advance time to the staker's start time. env.clk.Set(subnetVdr1StartTime) - tx, err = env.txBuilder.NewAdvanceTimeTx(subnetVdr1StartTime) + tx, err = newAdvanceTimeTx(t, subnetVdr1StartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -633,9 +629,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) // Case: Timestamp is after next validator start time @@ -652,7 +646,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { ) require.NoError(err) - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -709,7 +703,7 @@ func TestAdvanceTimeTxDelegatorStakerWeight(t *testing.T) { require.NoError(env.state.Commit()) // Advance Time - tx, err = env.txBuilder.NewAdvanceTimeTx(pendingDelegatorStartTime) + tx, err = newAdvanceTimeTx(t, pendingDelegatorStartTime) require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) @@ -740,9 +734,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() dummyHeight := uint64(1) // Case: Timestamp is after next validator start time @@ -753,7 +745,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { _, err := addPendingValidator(env, pendingValidatorStartTime, pendingValidatorEndTime, nodeID, []*secp256k1.PrivateKey{preFundedKeys[0]}) require.NoError(err) - tx, err := env.txBuilder.NewAdvanceTimeTx(pendingValidatorStartTime) + tx, err := newAdvanceTimeTx(t, pendingValidatorStartTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -805,7 +797,7 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { require.NoError(env.state.Commit()) // Advance Time - tx, err = env.txBuilder.NewAdvanceTimeTx(pendingDelegatorStartTime) + tx, err = newAdvanceTimeTx(t, pendingDelegatorStartTime) require.NoError(err) onCommitState, err = state.NewDiff(lastAcceptedID, env) @@ -832,44 +824,11 @@ func TestAdvanceTimeTxDelegatorStakers(t *testing.T) { require.Equal(env.config.MinDelegatorStake+env.config.MinValidatorStake, vdrWeight) } -// Test method InitiallyPrefersCommit -func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) { - require := require.New(t) - env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() - now := env.clk.Time() - - // Proposed advancing timestamp to 1 second after sync bound - tx, err := env.txBuilder.NewAdvanceTimeTx(now.Add(SyncBound)) - require.NoError(err) - - onCommitState, err := state.NewDiff(lastAcceptedID, env) - require.NoError(err) - - onAbortState, err := state.NewDiff(lastAcceptedID, env) - require.NoError(err) - - executor := ProposalTxExecutor{ - OnCommitState: onCommitState, - OnAbortState: onAbortState, - Backend: &env.backend, - Tx: tx, - } - require.NoError(tx.Unsigned.Visit(&executor)) - - require.True(executor.PrefersCommit, "should prefer to commit this tx because its proposed timestamp it's within sync bound") -} - func TestAdvanceTimeTxAfterBanff(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() env.clk.Set(defaultGenesisTime) // VM's clock reads the genesis time upgradeTime := env.clk.Time().Add(SyncBound) env.config.BanffTime = upgradeTime @@ -877,7 +836,7 @@ func TestAdvanceTimeTxAfterBanff(t *testing.T) { env.config.DurangoTime = upgradeTime // Proposed advancing timestamp to the banff timestamp - tx, err := env.txBuilder.NewAdvanceTimeTx(upgradeTime) + tx, err := newAdvanceTimeTx(t, upgradeTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) @@ -901,12 +860,10 @@ func TestAdvanceTimeTxUnmarshal(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() chainTime := env.state.GetTimestamp() - tx, err := env.txBuilder.NewAdvanceTimeTx(chainTime.Add(time.Second)) + tx, err := newAdvanceTimeTx(t, chainTime.Add(time.Second)) require.NoError(err) bytes, err := txs.Codec.Marshal(txs.CodecVersion, tx) diff --git a/vms/platformvm/txs/executor/backend.go b/vms/platformvm/txs/executor/backend.go index a5e017090701..847aefc16499 100644 --- a/vms/platformvm/txs/executor/backend.go +++ b/vms/platformvm/txs/executor/backend.go @@ -20,7 +20,7 @@ type Backend struct { Clk *mockable.Clock Fx fx.Fx FlowChecker utxo.Verifier - Uptimes uptime.Manager + Uptimes uptime.Calculator Rewards reward.Calculator Bootstrapped *utils.Atomic[bool] } diff --git a/vms/platformvm/txs/executor/camino_advance_time_test.go b/vms/platformvm/txs/executor/camino_advance_time_test.go index 983239310452..5f2c550498d6 100644 --- a/vms/platformvm/txs/executor/camino_advance_time_test.go +++ b/vms/platformvm/txs/executor/camino_advance_time_test.go @@ -246,7 +246,7 @@ func TestDeferredStakers(t *testing.T) { for _, newTime := range test.advanceTimeTo { env.clk.Set(newTime) - tx, err := env.txBuilder.NewAdvanceTimeTx(newTime) + tx, err := newAdvanceTimeTx(t, newTime) require.NoError(err) onCommitState, err := state.NewDiff(lastAcceptedID, env) diff --git a/vms/platformvm/txs/executor/camino_state_changes.go b/vms/platformvm/txs/executor/camino_state_changes.go deleted file mode 100644 index d0a4d6270a65..000000000000 --- a/vms/platformvm/txs/executor/camino_state_changes.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright (C) 2022-2023, Chain4Travel AG. All rights reserved. -// See the file LICENSE for licensing terms. - -package executor - -import ( - "time" - - "github.com/ava-labs/avalanchego/vms/platformvm/state" -) - -type caminoStateChanges struct{} - -func (*caminoStateChanges) Apply(_ state.Diff) { -} - -func (*caminoStateChanges) Len() int { - return 0 -} - -func caminoAdvanceTimeTo( - _ *Backend, - _ state.Chain, - _ time.Time, - _ *stateChanges, -) error { - return nil -} diff --git a/vms/platformvm/txs/executor/camino_visitor.go b/vms/platformvm/txs/executor/camino_visitor.go index ea04fb46c808..20feace424a1 100644 --- a/vms/platformvm/txs/executor/camino_visitor.go +++ b/vms/platformvm/txs/executor/camino_visitor.go @@ -144,49 +144,3 @@ func (*AtomicTxExecutor) AddVoteTx(*txs.AddVoteTx) error { func (*AtomicTxExecutor) FinishProposalsTx(*txs.FinishProposalsTx) error { return ErrWrongTxType } - -// MemPool - -func (v *MempoolTxVerifier) AddressStateTx(tx *txs.AddressStateTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) DepositTx(tx *txs.DepositTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) UnlockDepositTx(tx *txs.UnlockDepositTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) ClaimTx(tx *txs.ClaimTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) RegisterNodeTx(tx *txs.RegisterNodeTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) RewardsImportTx(tx *txs.RewardsImportTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) MultisigAliasTx(tx *txs.MultisigAliasTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddDepositOfferTx(tx *txs.AddDepositOfferTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddProposalTx(tx *txs.AddProposalTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddVoteTx(tx *txs.AddVoteTx) error { - return v.standardTx(tx) -} - -func (*MempoolTxVerifier) FinishProposalsTx(*txs.FinishProposalsTx) error { - return ErrWrongTxType -} diff --git a/vms/platformvm/txs/executor/create_chain_test.go b/vms/platformvm/txs/executor/create_chain_test.go index 3b0502616473..0342c8dc1cfe 100644 --- a/vms/platformvm/txs/executor/create_chain_test.go +++ b/vms/platformvm/txs/executor/create_chain_test.go @@ -27,9 +27,7 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) { require := require.New(t) env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -62,9 +60,7 @@ func TestCreateChainTxWrongControlSig(t *testing.T) { require := require.New(t) env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -104,9 +100,7 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) { require := require.New(t) env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -138,9 +132,7 @@ func TestCreateChainTxValid(t *testing.T) { require := require.New(t) env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() tx, err := env.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -198,9 +190,6 @@ func TestCreateChainTxAP3FeeChange(t *testing.T) { env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.config.ApricotPhase3Time = ap3Time - defer func() { - require.NoError(shutdownEnvironment(env)) - }() ins, outs, _, signers, err := env.utxosHandler.Spend(env.state, preFundedKeys, 0, test.fee, ids.ShortEmpty) require.NoError(err) diff --git a/vms/platformvm/txs/executor/create_subnet_test.go b/vms/platformvm/txs/executor/create_subnet_test.go index 158eded74867..6d968daa4df0 100644 --- a/vms/platformvm/txs/executor/create_subnet_test.go +++ b/vms/platformvm/txs/executor/create_subnet_test.go @@ -52,9 +52,7 @@ func TestCreateSubnetTxAP3FeeChange(t *testing.T) { env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.config.ApricotPhase3Time = ap3Time env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() ins, outs, _, signers, err := env.utxosHandler.Spend(env.state, preFundedKeys, 0, test.fee, ids.ShortEmpty) require.NoError(err) diff --git a/vms/platformvm/txs/executor/export_test.go b/vms/platformvm/txs/executor/export_test.go index 1272d2815142..d9e0ce071008 100644 --- a/vms/platformvm/txs/executor/export_test.go +++ b/vms/platformvm/txs/executor/export_test.go @@ -17,9 +17,7 @@ import ( func TestNewExportTx(t *testing.T) { env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(t, shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() type test struct { description string @@ -59,19 +57,15 @@ func TestNewExportTx(t *testing.T) { ) require.NoError(err) - fakedState, err := state.NewDiff(lastAcceptedID, env) + stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) - fakedState.SetTimestamp(tt.timestamp) + stateDiff.SetTimestamp(tt.timestamp) - fakedParent := ids.GenerateTestID() - env.SetState(fakedParent, fakedState) - - verifier := MempoolTxVerifier{ - Backend: &env.backend, - ParentID: fakedParent, - StateVersions: env, - Tx: tx, + verifier := StandardTxExecutor{ + Backend: &env.backend, + State: stateDiff, + Tx: tx, } require.NoError(tx.Unsigned.Visit(&verifier)) }) diff --git a/vms/platformvm/txs/executor/helpers_test.go b/vms/platformvm/txs/executor/helpers_test.go index 1ea645efcfb8..b2654ec7c8c9 100644 --- a/vms/platformvm/txs/executor/helpers_test.go +++ b/vms/platformvm/txs/executor/helpers_test.go @@ -176,6 +176,30 @@ func newEnvironment(t *testing.T, postBanff, postCortina, postDurango bool) *env addSubnet(t, env, txBuilder) + t.Cleanup(func() { + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + require := require.New(t) + + if env.isBootstrapped.Get() { + validatorIDs := env.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) + + require.NoError(env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID)) + + for subnetID := range env.config.TrackedSubnets { + validatorIDs := env.config.Validators.GetValidatorIDs(subnetID) + + require.NoError(env.uptimes.StopTracking(validatorIDs, subnetID)) + } + env.state.SetHeight(math.MaxUint64) + require.NoError(env.state.Commit()) + } + + require.NoError(env.state.Close()) + require.NoError(env.baseDB.Close()) + }) + return env } @@ -396,30 +420,3 @@ func buildGenesisTest(ctx *snow.Context) []byte { return genesisBytes } - -func shutdownEnvironment(env *environment) error { - if env.isBootstrapped.Get() { - validatorIDs := env.config.Validators.GetValidatorIDs(constants.PrimaryNetworkID) - - if err := env.uptimes.StopTracking(validatorIDs, constants.PrimaryNetworkID); err != nil { - return err - } - - for subnetID := range env.config.TrackedSubnets { - validatorIDs := env.config.Validators.GetValidatorIDs(subnetID) - - if err := env.uptimes.StopTracking(validatorIDs, subnetID); err != nil { - return err - } - } - env.state.SetHeight( /*height*/ math.MaxUint64) - if err := env.state.Commit(); err != nil { - return err - } - } - - return utils.Err( - env.state.Close(), - env.baseDB.Close(), - ) -} diff --git a/vms/platformvm/txs/executor/import_test.go b/vms/platformvm/txs/executor/import_test.go index 5d281da57185..fed09bd882ce 100644 --- a/vms/platformvm/txs/executor/import_test.go +++ b/vms/platformvm/txs/executor/import_test.go @@ -23,9 +23,6 @@ import ( func TestNewImportTx(t *testing.T) { env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) - defer func() { - require.NoError(t, shutdownEnvironment(env)) - }() type test struct { description string @@ -182,19 +179,15 @@ func TestNewImportTx(t *testing.T) { require.Equal(env.config.TxFee, totalIn-totalOut) - fakedState, err := state.NewDiff(lastAcceptedID, env) + stateDiff, err := state.NewDiff(lastAcceptedID, env) require.NoError(err) - fakedState.SetTimestamp(tt.timestamp) + stateDiff.SetTimestamp(tt.timestamp) - fakedParent := ids.GenerateTestID() - env.SetState(fakedParent, fakedState) - - verifier := MempoolTxVerifier{ - Backend: &env.backend, - ParentID: fakedParent, - StateVersions: env, - Tx: tx, + verifier := StandardTxExecutor{ + Backend: &env.backend, + State: stateDiff, + Tx: tx, } require.NoError(tx.Unsigned.Visit(&verifier)) }) diff --git a/vms/platformvm/txs/executor/proposal_tx_executor.go b/vms/platformvm/txs/executor/proposal_tx_executor.go index 9a8a159e3646..375e359429eb 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor.go @@ -20,7 +20,6 @@ import ( "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/math" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" @@ -67,12 +66,6 @@ type ProposalTxExecutor struct { // [OnAbortState] is modified by this struct's methods to // reflect changes made to the state if the proposal is aborted. OnAbortState state.Diff - - // outputs populated by this struct's methods: - // - // [PrefersCommit] is true iff this node initially prefers to - // commit this block transaction. - PrefersCommit bool } func (*ProposalTxExecutor) CreateChainTx(*txs.CreateChainTx) error { @@ -159,8 +152,6 @@ func (e *ProposalTxExecutor) AddValidatorTx(tx *txs.AddValidatorTx) error { avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs avax.Produce(e.OnAbortState, txID, onAbortOuts) - - e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil } @@ -207,8 +198,6 @@ func (e *ProposalTxExecutor) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs avax.Produce(e.OnAbortState, txID, tx.Outs) - - e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil } @@ -256,8 +245,6 @@ func (e *ProposalTxExecutor) AddDelegatorTx(tx *txs.AddDelegatorTx) error { avax.Consume(e.OnAbortState, tx.Ins) // Produce the UTXOs avax.Produce(e.OnAbortState, txID, onAbortOuts) - - e.PrefersCommit = tx.StartTime().After(e.Clk.Time()) return nil } @@ -306,19 +293,9 @@ func (e *ProposalTxExecutor) AdvanceTimeTx(tx *txs.AdvanceTimeTx) error { return err } - changes, err := AdvanceTimeTo(e.Backend, e.OnCommitState, newChainTime) - if err != nil { - return err - } - - // Update the state if this tx is committed - e.OnCommitState.SetTimestamp(newChainTime) - changes.Apply(e.OnCommitState) - - e.PrefersCommit = !newChainTime.After(now.Add(SyncBound)) - // Note that state doesn't change if this proposal is aborted - return nil + _, err = AdvanceTimeTo(e.Backend, e.OnCommitState, newChainTime) + return err } func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error { @@ -362,17 +339,6 @@ func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error ) } - // retrieve primaryNetworkValidator before possibly removing it. - primaryNetworkValidator, err := e.OnCommitState.GetCurrentValidator( - constants.PrimaryNetworkID, - stakerToReward.NodeID, - ) - if err != nil { - // This should never error because the staker set is in memory and - // primary network validators are removed last. - return err - } - stakerTx, _, err := e.OnCommitState.GetTx(stakerToReward.TxID) if err != nil { return fmt.Errorf("failed to get next removed staker tx: %w", err) @@ -415,10 +381,7 @@ func (e *ProposalTxExecutor) RewardValidatorTx(tx *txs.RewardValidatorTx) error return err } e.OnAbortState.SetCurrentSupply(stakerToReward.SubnetID, newSupply) - - // handle option preference - e.PrefersCommit, err = e.shouldBeRewarded(stakerToReward, primaryNetworkValidator) - return err + return nil } func (e *ProposalTxExecutor) rewardValidatorTx(uValidatorTx txs.ValidatorTx, validator *state.Staker) error { @@ -655,26 +618,3 @@ func (e *ProposalTxExecutor) rewardDelegatorTx(uDelegatorTx txs.DelegatorTx, del } return nil } - -func (e *ProposalTxExecutor) shouldBeRewarded(stakerToReward, primaryNetworkValidator *state.Staker) (bool, error) { - expectedUptimePercentage := e.Config.UptimePercentage - if stakerToReward.SubnetID != constants.PrimaryNetworkID { - transformSubnet, err := GetTransformSubnetTx(e.OnCommitState, stakerToReward.SubnetID) - if err != nil { - return false, fmt.Errorf("failed to calculate uptime: %w", err) - } - - expectedUptimePercentage = float64(transformSubnet.UptimeRequirement) / reward.PercentDenominator - } - - // TODO: calculate subnet uptimes - uptime, err := e.Uptimes.CalculateUptimePercentFrom( - primaryNetworkValidator.NodeID, - constants.PrimaryNetworkID, - primaryNetworkValidator.StartTime, - ) - if err != nil { - return false, fmt.Errorf("failed to calculate uptime: %w", err) - } - return uptime >= expectedUptimePercentage, nil -} diff --git a/vms/platformvm/txs/executor/proposal_tx_executor_test.go b/vms/platformvm/txs/executor/proposal_tx_executor_test.go index 930c0d9c253c..e01bd267a024 100644 --- a/vms/platformvm/txs/executor/proposal_tx_executor_test.go +++ b/vms/platformvm/txs/executor/proposal_tx_executor_test.go @@ -247,9 +247,6 @@ func TestProposalTxExecuteAddDelegator(t *testing.T) { require := require.New(t) freshTH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) freshTH.config.ApricotPhase3Time = tt.AP3Time - defer func() { - require.NoError(shutdownEnvironment(freshTH)) - }() tx, err := freshTH.txBuilder.NewAddDelegatorTx( tt.stakeAmount, @@ -288,9 +285,7 @@ func TestProposalTxExecuteAddSubnetValidator(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() nodeID := genesisNodeIDs[0] { @@ -723,9 +718,7 @@ func TestProposalTxExecuteAddValidator(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() nodeID := ids.GenerateTestNodeID() chainTime := env.state.GetTimestamp() diff --git a/vms/platformvm/txs/executor/reward_validator_test.go b/vms/platformvm/txs/executor/reward_validator_test.go index 973dea4de9be..3ee34aeb672d 100644 --- a/vms/platformvm/txs/executor/reward_validator_test.go +++ b/vms/platformvm/txs/executor/reward_validator_test.go @@ -26,9 +26,6 @@ import ( func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() dummyHeight := uint64(1) currentStakerIterator, err := env.state.GetCurrentStakerIterator() @@ -129,9 +126,6 @@ func TestRewardValidatorTxExecuteOnCommit(t *testing.T) { func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() dummyHeight := uint64(1) currentStakerIterator, err := env.state.GetCurrentStakerIterator() @@ -226,9 +220,6 @@ func TestRewardValidatorTxExecuteOnAbort(t *testing.T) { func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() dummyHeight := uint64(1) vdrRewardAddress := ids.GenerateTestShortID() @@ -350,9 +341,6 @@ func TestRewardDelegatorTxExecuteOnCommitPreDelegateeDeferral(t *testing.T) { func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { require := require.New(t) env := newEnvironment(t, true /*=postBanff*/, true /*=postCortina*/, false /*=postDurango*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() dummyHeight := uint64(1) vdrRewardAddress := ids.GenerateTestShortID() @@ -569,9 +557,6 @@ func TestRewardDelegatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t *testing.T) { require := require.New(t) env := newEnvironment(t, true /*=postBanff*/, true /*=postCortina*/, false /*=postDurango*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() dummyHeight := uint64(1) vdrRewardAddress := ids.GenerateTestShortID() @@ -731,9 +716,6 @@ func TestRewardDelegatorTxAndValidatorTxExecuteOnCommitPostDelegateeDeferral(t * func TestRewardDelegatorTxExecuteOnAbort(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) - defer func() { - require.NoError(shutdownEnvironment(env)) - }() dummyHeight := uint64(1) initialSupply, err := env.state.GetCurrentSupply(constants.PrimaryNetworkID) diff --git a/vms/platformvm/txs/executor/staker_tx_verification.go b/vms/platformvm/txs/executor/staker_tx_verification.go index 9c80fcddb850..e467c8346404 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification.go +++ b/vms/platformvm/txs/executor/staker_tx_verification.go @@ -113,11 +113,16 @@ func verifyAddValidatorTx( var ( currentTimestamp = chainState.GetTimestamp() isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) - startTime = currentTimestamp ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return nil, err + } + + startTime := currentTimestamp if !isDurangoActive { startTime = tx.StartTime() } + duration := tx.EndTime().Sub(startTime) switch { case tx.Validator.Wght < backend.Config.MinValidatorStake: @@ -204,8 +209,12 @@ func verifyAddSubnetValidatorTx( var ( currentTimestamp = chainState.GetTimestamp() isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) - startTime = currentTimestamp ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + + startTime := currentTimestamp if !isDurangoActive { startTime = tx.StartTime() } @@ -293,6 +302,14 @@ func verifyRemoveSubnetValidatorTx( return nil, false, err } + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return nil, false, err + } + isCurrentValidator := true vdr, err := chainState.GetCurrentValidator(tx.Subnet, tx.NodeID) if err == database.ErrNotFound { @@ -366,8 +383,14 @@ func verifyAddDelegatorTx( var ( currentTimestamp = chainState.GetTimestamp() isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) - endTime = tx.EndTime() - startTime = currentTimestamp + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return nil, err + } + + var ( + endTime = tx.EndTime() + startTime = currentTimestamp ) if !isDurangoActive { startTime = tx.StartTime() @@ -473,15 +496,19 @@ func verifyAddPermissionlessValidatorTx( return err } - if !backend.Bootstrapped.Get() { - return nil - } - var ( currentTimestamp = chainState.GetTimestamp() isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) - startTime = currentTimestamp ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + + if !backend.Bootstrapped.Get() { + return nil + } + + startTime := currentTimestamp if !isDurangoActive { startTime = tx.StartTime() } @@ -593,15 +620,21 @@ func verifyAddPermissionlessDelegatorTx( return err } + var ( + currentTimestamp = chainState.GetTimestamp() + isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + if !backend.Bootstrapped.Get() { return nil } var ( - currentTimestamp = chainState.GetTimestamp() - isDurangoActive = backend.Config.IsDurangoActivated(currentTimestamp) - endTime = tx.EndTime() - startTime = currentTimestamp + endTime = tx.EndTime() + startTime = currentTimestamp ) if !isDurangoActive { startTime = tx.StartTime() @@ -743,6 +776,10 @@ func verifyTransferSubnetOwnershipTx( return err } + if err := avax.VerifyMemoFieldLength(tx.Memo, true /*=isDurangoActive*/); err != nil { + return err + } + if !backend.Bootstrapped.Get() { // Not bootstrapped yet -- don't need to do full verification. return nil diff --git a/vms/platformvm/txs/executor/staker_tx_verification_test.go b/vms/platformvm/txs/executor/staker_tx_verification_test.go index b59daf0da2b0..1431f32e56f5 100644 --- a/vms/platformvm/txs/executor/staker_tx_verification_test.go +++ b/vms/platformvm/txs/executor/staker_tx_verification_test.go @@ -139,13 +139,15 @@ func TestVerifyAddPermissionlessValidatorTx(t *testing.T) { } }, stateF: func(ctrl *gomock.Controller) state.Chain { - return nil + mockState := state.NewMockChain(ctrl) + mockState.EXPECT().GetTimestamp().Return(now) // chain time is after Durango fork activation since now.After(activeForkTime) + return mockState }, sTxF: func() *txs.Tx { return &verifiedSignedTx }, txF: func() *txs.AddPermissionlessValidatorTx { - return nil + return &txs.AddPermissionlessValidatorTx{} }, expectedErr: nil, }, diff --git a/vms/platformvm/txs/executor/standard_tx_executor.go b/vms/platformvm/txs/executor/standard_tx_executor.go index 6fc008c04616..18db8988c2c8 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor.go +++ b/vms/platformvm/txs/executor/standard_tx_executor.go @@ -64,14 +64,21 @@ func (e *StandardTxExecutor) CreateChainTx(tx *txs.CreateChainTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + baseTxCreds, err := verifyPoASubnetAuthorization(e.Backend, e.State, e.Tx, tx.SubnetID, tx.SubnetAuth) if err != nil { return err } // Verify the flowcheck - timestamp := e.State.GetTimestamp() - createBlockchainTxFee := e.Config.GetCreateBlockchainTxFee(timestamp) + createBlockchainTxFee := e.Config.GetCreateBlockchainTxFee(currentTimestamp) if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -108,9 +115,16 @@ func (e *StandardTxExecutor) CreateSubnetTx(tx *txs.CreateSubnetTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + // Verify the flowcheck - timestamp := e.State.GetTimestamp() - createSubnetTxFee := e.Config.GetCreateSubnetTxFee(timestamp) + createSubnetTxFee := e.Config.GetCreateSubnetTxFee(currentTimestamp) if err := e.FlowChecker.VerifySpend( tx, e.State, @@ -141,6 +155,14 @@ func (e *StandardTxExecutor) ImportTx(tx *txs.ImportTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + e.Inputs = set.NewSet[ids.ID](len(tx.ImportedInputs)) utxoIDs := make([][]byte, len(tx.ImportedInputs)) for i, in := range tx.ImportedInputs { @@ -225,6 +247,14 @@ func (e *StandardTxExecutor) ExportTx(tx *txs.ExportTx) error { return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + outs := make([]*avax.TransferableOutput, len(tx.Outs)+len(tx.ExportedOutputs)) copy(outs, tx.Outs) copy(outs[len(tx.Outs):], tx.ExportedOutputs) @@ -407,6 +437,14 @@ func (e *StandardTxExecutor) TransformSubnetTx(tx *txs.TransformSubnetTx) error return err } + var ( + currentTimestamp = e.State.GetTimestamp() + isDurangoActive = e.Config.IsDurangoActivated(currentTimestamp) + ) + if err := avax.VerifyMemoFieldLength(tx.Memo, isDurangoActive); err != nil { + return err + } + // Note: math.MaxInt32 * time.Second < math.MaxInt64 - so this can never // overflow. if time.Duration(tx.MaxStakeDuration)*time.Second > e.Backend.Config.MaxStakeDuration { @@ -533,6 +571,10 @@ func (e *StandardTxExecutor) BaseTx(tx *txs.BaseTx) error { return err } + if err := avax.VerifyMemoFieldLength(tx.Memo, true /*=isDurangoActive*/); err != nil { + return err + } + // Verify the flowcheck if err := e.FlowChecker.VerifySpend( tx, diff --git a/vms/platformvm/txs/executor/standard_tx_executor_test.go b/vms/platformvm/txs/executor/standard_tx_executor_test.go index e26d0c55836c..1bd12d00e502 100644 --- a/vms/platformvm/txs/executor/standard_tx_executor_test.go +++ b/vms/platformvm/txs/executor/standard_tx_executor_test.go @@ -29,18 +29,22 @@ import ( "github.com/ava-labs/avalanchego/snow" "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/hashing" + "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/components/verify" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/fx" "github.com/ava-labs/avalanchego/vms/platformvm/reward" + "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/status" "github.com/ava-labs/avalanchego/vms/platformvm/txs" "github.com/ava-labs/avalanchego/vms/platformvm/utxo" "github.com/ava-labs/avalanchego/vms/secp256k1fx" + "github.com/ava-labs/avalanchego/vms/types" ) // This tests that the math performed during TransformSubnetTx execution can @@ -53,9 +57,7 @@ func TestStandardTxExecutorAddValidatorTxEmptyID(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() chainTime := env.state.GetTimestamp() startTime := defaultValidateStartTime.Add(1 * time.Second) @@ -189,7 +191,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup func(*environment) AP3Time time.Time expectedExecutionErr error - expectedMempoolErr error } tests := []test{ @@ -204,7 +205,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: nil, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrPeriodMismatch, - expectedMempoolErr: ErrPeriodMismatch, }, { description: fmt.Sprintf("delegator should not be added more than (%s) in the future", MaxFutureStartTime), @@ -217,7 +217,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: nil, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrFutureStakeTime, - expectedMempoolErr: nil, }, { description: "validator not in the current or pending validator sets", @@ -230,7 +229,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: nil, AP3Time: defaultGenesisTime, expectedExecutionErr: database.ErrNotFound, - expectedMempoolErr: database.ErrNotFound, }, { description: "delegator starts before validator", @@ -243,7 +241,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: addMinStakeValidator, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrPeriodMismatch, - expectedMempoolErr: ErrPeriodMismatch, }, { description: "delegator stops before validator", @@ -256,7 +253,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: addMinStakeValidator, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrPeriodMismatch, - expectedMempoolErr: ErrPeriodMismatch, }, { description: "valid", @@ -269,7 +265,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: addMinStakeValidator, AP3Time: defaultGenesisTime, expectedExecutionErr: nil, - expectedMempoolErr: nil, }, { description: "starts delegating at current timestamp", @@ -282,7 +277,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: nil, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrTimestampNotBeforeStartTime, - expectedMempoolErr: ErrTimestampNotBeforeStartTime, }, { description: "tx fee paying key has no funds", @@ -307,7 +301,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { }, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrFlowCheckFailed, - expectedMempoolErr: ErrFlowCheckFailed, }, { description: "over delegation before AP3", @@ -320,7 +313,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: addMaxStakeValidator, AP3Time: defaultValidateEndTime, expectedExecutionErr: nil, - expectedMempoolErr: nil, }, { description: "over delegation after AP3", @@ -333,7 +325,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { setup: addMaxStakeValidator, AP3Time: defaultGenesisTime, expectedExecutionErr: ErrOverDelegated, - expectedMempoolErr: ErrOverDelegated, }, } @@ -342,9 +333,6 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { require := require.New(t) freshTH := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) freshTH.config.ApricotPhase3Time = tt.AP3Time - defer func() { - require.NoError(shutdownEnvironment(freshTH)) - }() tx, err := freshTH.txBuilder.NewAddDelegatorTx( tt.stakeAmount, @@ -373,26 +361,15 @@ func TestStandardTxExecutorAddDelegator(t *testing.T) { } err = tx.Unsigned.Visit(&executor) require.ErrorIs(err, tt.expectedExecutionErr) - - mempoolExecutor := MempoolTxVerifier{ - Backend: &freshTH.backend, - ParentID: lastAcceptedID, - StateVersions: freshTH, - Tx: tx, - } - err = tx.Unsigned.Visit(&mempoolExecutor) - require.ErrorIs(err, tt.expectedMempoolErr) }) } } -func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { +func TestApricotStandardTxExecutorAddSubnetValidator(t *testing.T) { require := require.New(t) env := newEnvironment(t, false /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() nodeID := genesisNodeIDs[0] @@ -815,13 +792,11 @@ func TestStandardTxExecutorAddSubnetValidator(t *testing.T) { } } -func TestStandardTxExecutorBanffAddValidator(t *testing.T) { +func TestBanffStandardTxExecutorAddValidator(t *testing.T) { require := require.New(t) env := newEnvironment(t, true /*=postBanff*/, false /*=postCortina*/, false /*=postDurango*/) env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - }() + defer env.ctx.Lock.Unlock() nodeID := ids.GenerateTestNodeID() @@ -988,52 +963,640 @@ func TestStandardTxExecutorBanffAddValidator(t *testing.T) { } } -func TestStandardTxExecutorDurangoAddValidator(t *testing.T) { - require := require.New(t) - env := newEnvironment(t, true /*=postBanff*/, true /*=postCortina*/, true /*=postDurango*/) - env.ctx.Lock.Lock() - defer func() { - require.NoError(shutdownEnvironment(env)) - env.ctx.Lock.Unlock() - }() - - var ( - nodeID = ids.GenerateTestNodeID() - chainTime = env.state.GetTimestamp() - endTime = chainTime.Add(defaultMaxStakingDuration) - ) +// Verifies that the Memo field is required to be empty post-Durango +func TestDurangoMemoField(t *testing.T) { + type test struct { + name string + setupTest func(*environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) + } - addValTx, err := env.txBuilder.NewAddValidatorTx( - env.config.MinValidatorStake, - 0, - uint64(endTime.Unix()), - nodeID, - ids.ShortEmpty, - reward.PercentDenominator, - []*secp256k1.PrivateKey{preFundedKeys[0]}, - ids.ShortEmpty, // change addr - ) - require.NoError(err) + tests := []test{ + { + name: "AddValidatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + ins, unstakedOuts, stakedOuts, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.AddPrimaryNetworkValidatorFee, + ids.ShortEmpty, + ) + require.NoError(t, err) - onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) - require.NoError(err) + var ( + nodeID = ids.GenerateTestNodeID() + chainTime = env.state.GetTimestamp() + endTime = chainTime.Add(defaultMaxStakingDuration) + ) - require.NoError(addValTx.Unsigned.Visit(&StandardTxExecutor{ - Backend: &env.backend, - State: onAcceptState, - Tx: addValTx, - })) + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) - // Check that a current validator is added - val, err := onAcceptState.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) - require.NoError(err) + tx := &txs.AddValidatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Validator: txs.Validator{ + NodeID: nodeID, + Start: 0, + End: uint64(endTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + StakeOuts: stakedOuts, + RewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + DelegationShares: reward.PercentDenominator, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "AddSubnetValidatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.TxID, preFundedKeys) + require.NoError(t, err) + signers = append(signers, subnetSigners) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.AddSubnetValidatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + SubnetValidator: txs.SubnetValidator{ + Validator: txs.Validator{ + NodeID: primaryValidator.NodeID, + End: uint64(primaryValidator.EndTime.Unix()), + Wght: defaultMinValidatorStake, + }, + Subnet: testSubnet1.TxID, + }, + SubnetAuth: subnetAuth, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "AddDelegatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + ins, unstakedOuts, stakedOuts, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.AddPrimaryNetworkDelegatorFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.AddDelegatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Validator: txs.Validator{ + NodeID: primaryValidator.NodeID, + End: uint64(primaryValidator.EndTime.Unix()), + Wght: defaultMinValidatorStake, + }, + StakeOuts: stakedOuts, + DelegationRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "CreateChainTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + chainTime := env.state.GetTimestamp() + createBlockchainTxFee := env.config.GetCreateBlockchainTxFee(chainTime) + + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + createBlockchainTxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.TxID, preFundedKeys) + require.NoError(t, err) + signers = append(signers, subnetSigners) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.CreateChainTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + SubnetID: testSubnet1.TxID, + ChainName: "aaa", + VMID: ids.GenerateTestID(), + FxIDs: []ids.ID{}, + GenesisData: []byte{}, + SubnetAuth: subnetAuth, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "CreateSubnetTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + chainTime := env.state.GetTimestamp() + createSubnetTxFee := env.config.GetCreateSubnetTxFee(chainTime) + + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + createSubnetTxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.CreateSubnetTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Owner: &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "ImportTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + // Skip shared memory checks + env.backend.Bootstrapped.Set(false) + + utxoID := avax.UTXOID{ + TxID: ids.Empty.Prefix(1), + OutputIndex: 1, + } + amount := uint64(50000) + recipientKey := preFundedKeys[1] + + utxo := &avax.UTXO{ + UTXOID: utxoID, + Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{recipientKey.PublicKey().Address()}, + }, + }, + } + + signers := [][]*secp256k1.PrivateKey{{recipientKey}} + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.ImportTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + }}, + SourceChain: env.ctx.XChainID, + ImportedInputs: []*avax.TransferableInput{ + { + UTXOID: utxo.UTXOID, + Asset: utxo.Asset, + In: &secp256k1fx.TransferInput{ + Amt: env.config.TxFee, + }, + }, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "ExportTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + amount := units.Avax + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + amount, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.ExportTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + DestinationChain: env.ctx.XChainID, + ExportedOutputs: []*avax.TransferableOutput{{ + Asset: avax.Asset{ID: env.ctx.AVAXAssetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ids.GenerateTestShortID()}, + }, + }, + }}, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "RemoveSubnetValidatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + endTime := primaryValidator.EndTime + subnetValTx, err := env.txBuilder.NewAddSubnetValidatorTx( + defaultWeight, + 0, + uint64(endTime.Unix()), + primaryValidator.NodeID, + testSubnet1.ID(), + []*secp256k1.PrivateKey{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + ids.ShortEmpty, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + require.NoError(t, subnetValTx.Unsigned.Visit(&StandardTxExecutor{ + Backend: &env.backend, + State: onAcceptState, + Tx: subnetValTx, + })) + + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.TxID, preFundedKeys) + require.NoError(t, err) + signers = append(signers, subnetSigners) + + tx := &txs.RemoveSubnetValidatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Subnet: testSubnet1.ID(), + NodeID: primaryValidator.NodeID, + SubnetAuth: subnetAuth, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "TransformSubnetTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.TxID, preFundedKeys) + require.NoError(t, err) + signers = append(signers, subnetSigners) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.TransformSubnetTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Subnet: testSubnet1.TxID, + AssetID: ids.GenerateTestID(), + InitialSupply: 10, + MaximumSupply: 10, + MinConsumptionRate: 0, + MaxConsumptionRate: reward.PercentDenominator, + MinValidatorStake: 2, + MaxValidatorStake: 10, + MinStakeDuration: 1, + MaxStakeDuration: 2, + MinDelegationFee: reward.PercentDenominator, + MinDelegatorStake: 1, + MaxValidatorWeightFactor: 1, + UptimeRequirement: reward.PercentDenominator, + SubnetAuth: subnetAuth, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "AddPermissionlessValidatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + ins, unstakedOuts, stakedOuts, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.AddPrimaryNetworkValidatorFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + sk, err := bls.NewSecretKey() + require.NoError(t, err) + + var ( + nodeID = ids.GenerateTestNodeID() + chainTime = env.state.GetTimestamp() + endTime = chainTime.Add(defaultMaxStakingDuration) + ) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.AddPermissionlessValidatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Validator: txs.Validator{ + NodeID: nodeID, + End: uint64(endTime.Unix()), + Wght: env.config.MinValidatorStake, + }, + Subnet: constants.PrimaryNetworkID, + Signer: signer.NewProofOfPossession(sk), + StakeOuts: stakedOuts, + ValidatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + DelegatorRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + DelegationShares: reward.PercentDenominator, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "AddPermissionlessDelegatorTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + var primaryValidator *state.Staker + it, err := env.state.GetCurrentStakerIterator() + require.NoError(t, err) + for it.Next() { + staker := it.Value() + if staker.Priority != txs.PrimaryNetworkValidatorCurrentPriority { + continue + } + primaryValidator = staker + break + } + it.Release() + + ins, unstakedOuts, stakedOuts, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.AddPrimaryNetworkDelegatorFee, + ids.ShortEmpty, + ) + require.NoError(t, err) - require.Equal(addValTx.ID(), val.TxID) - require.Equal(chainTime, val.StartTime) - require.Equal(endTime, val.EndTime) + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.AddPermissionlessDelegatorTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Validator: txs.Validator{ + NodeID: primaryValidator.NodeID, + End: uint64(primaryValidator.EndTime.Unix()), + Wght: defaultMinValidatorStake, + }, + StakeOuts: stakedOuts, + DelegationRewardsOwner: &secp256k1fx.OutputOwners{ + Locktime: 0, + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "TransferSubnetOwnershipTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + subnetAuth, subnetSigners, err := env.utxosHandler.Authorize(env.state, testSubnet1.TxID, preFundedKeys) + require.NoError(t, err) + signers = append(signers, subnetSigners) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.TransferSubnetOwnershipTx{ + BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }}, + Subnet: testSubnet1.TxID, + SubnetAuth: subnetAuth, + Owner: &secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ids.ShortEmpty}, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + { + name: "BaseTx", + setupTest: func(env *environment) (txs.UnsignedTx, [][]*secp256k1.PrivateKey, state.Diff, *types.JSONByteSlice) { + ins, unstakedOuts, _, signers, err := env.utxosHandler.Spend( + env.state, + preFundedKeys, + defaultMinValidatorStake, + env.config.TxFee, + ids.ShortEmpty, + ) + require.NoError(t, err) + + onAcceptState, err := state.NewDiff(env.state.GetLastAccepted(), env) + require.NoError(t, err) + + tx := &txs.BaseTx{ + BaseTx: avax.BaseTx{ + NetworkID: env.ctx.NetworkID, + BlockchainID: env.ctx.ChainID, + Ins: ins, + Outs: unstakedOuts, + }, + } + return tx, signers, onAcceptState, &tx.Memo + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + env := newEnvironment(t, true /*=postBanff*/, true /*=postCortina*/, true /*=postDurango*/) + env.ctx.Lock.Lock() + defer env.ctx.Lock.Unlock() + + utx, signers, onAcceptState, memo := tt.setupTest(env) + + // Populated memo field should error + *memo = []byte{'m', 'e', 'm', 'o'} + tx, err := txs.NewSigned(utx, txs.Codec, signers) + require.NoError(err) + + err = tx.Unsigned.Visit(&StandardTxExecutor{ + Backend: &env.backend, + State: onAcceptState, + Tx: tx, + }) + require.ErrorIs(err, avax.ErrMemoTooLarge) + + // Empty memo field should not error + *memo = []byte{} + tx, err = txs.NewSigned(utx, txs.Codec, signers) + require.NoError(err) + + require.NoError(tx.Unsigned.Visit(&StandardTxExecutor{ + Backend: &env.backend, + State: onAcceptState, + Tx: tx, + })) + }) + } } // Returns a RemoveSubnetValidatorTx that passes syntactic verification. +// Memo field is empty as required post Durango activation func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *txs.Tx) { t.Helper() @@ -1076,7 +1639,6 @@ func newRemoveSubnetValidatorTx(t *testing.T) (*txs.RemoveSubnetValidatorTx, *tx }, }, }, - Memo: []byte("hi"), }, }, Subnet: ids.GenerateTestID(), @@ -1144,6 +1706,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) // Set dependency expectations. + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil).Times(1) subnetOwner := fx.NewMockOwner(ctrl) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) @@ -1207,6 +1770,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(nil, database.ErrNotFound) env.state.EXPECT().GetPendingValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(nil, database.ErrNotFound) e := &StandardTxExecutor{ @@ -1238,6 +1802,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { staker.Priority = txs.SubnetPermissionlessValidatorCurrentPriority // Set dependency expectations. + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(&staker, nil).Times(1) e := &StandardTxExecutor{ Backend: &Backend{ @@ -1266,6 +1831,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { // Remove credentials env.tx.Creds = nil env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) e := &StandardTxExecutor{ Backend: &Backend{ @@ -1292,6 +1858,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound) e := &StandardTxExecutor{ @@ -1319,6 +1886,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) subnetOwner := fx.NewMockOwner(ctrl) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) @@ -1348,6 +1916,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { newExecutor: func(ctrl *gomock.Controller) (*txs.RemoveSubnetValidatorTx, *StandardTxExecutor) { env := newValidRemoveSubnetValidatorTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetCurrentValidator(env.unsignedTx.Subnet, env.unsignedTx.NodeID).Return(env.staker, nil) subnetOwner := fx.NewMockOwner(ctrl) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) @@ -1391,6 +1960,7 @@ func TestStandardExecutorRemoveSubnetValidatorTx(t *testing.T) { } // Returns a TransformSubnetTx that passes syntactic verification. +// Memo field is empty as required post Durango activation func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { t.Helper() @@ -1433,7 +2003,6 @@ func newTransformSubnetTx(t *testing.T) (*txs.TransformSubnetTx, *txs.Tx) { }, }, }, - Memo: []byte("hi"), }, }, Subnet: ids.GenerateTestID(), @@ -1539,6 +2108,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env := newValidTransformSubnetTxVerifyEnv(t, ctrl) env.unsignedTx.MaxStakeDuration = math.MaxUint32 env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ @@ -1566,6 +2136,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { // Remove credentials env.tx.Creds = nil env.state = state.NewMockDiff(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) e := &StandardTxExecutor{ Backend: &Backend{ Config: &config.Config{ @@ -1593,6 +2164,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { env := newValidTransformSubnetTxVerifyEnv(t, ctrl) env.state = state.NewMockDiff(ctrl) subnetOwner := fx.NewMockOwner(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(gomock.Any(), env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil) @@ -1627,6 +2199,7 @@ func TestStandardExecutorTransformSubnetTx(t *testing.T) { // Set dependency expectations. subnetOwner := fx.NewMockOwner(ctrl) + env.state.EXPECT().GetTimestamp().Return(env.latestForkTime) env.state.EXPECT().GetSubnetOwner(env.unsignedTx.Subnet).Return(subnetOwner, nil).Times(1) env.state.EXPECT().GetSubnetTransformation(env.unsignedTx.Subnet).Return(nil, database.ErrNotFound).Times(1) env.fx.EXPECT().VerifyPermission(env.unsignedTx, env.unsignedTx.SubnetAuth, env.tx.Creds[len(env.tx.Creds)-1], subnetOwner).Return(nil).Times(1) diff --git a/vms/platformvm/txs/executor/state_changes.go b/vms/platformvm/txs/executor/state_changes.go index 66363578e877..06302a1c0a79 100644 --- a/vms/platformvm/txs/executor/state_changes.go +++ b/vms/platformvm/txs/executor/state_changes.go @@ -20,6 +20,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/timer/mockable" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/state" "github.com/ava-labs/avalanchego/vms/platformvm/txs" @@ -67,82 +68,59 @@ func VerifyNewChainTime( return nil } -type StateChanges interface { - Apply(onAccept state.Diff) - Len() int -} - -type stateChanges struct { - updatedSupplies map[ids.ID]uint64 - currentValidatorsToAdd []*state.Staker - currentDelegatorsToAdd []*state.Staker - pendingValidatorsToRemove []*state.Staker - pendingDelegatorsToRemove []*state.Staker - currentValidatorsToRemove []*state.Staker - - caminoStateChanges -} - -func (s *stateChanges) Apply(stateDiff state.Diff) { - for subnetID, supply := range s.updatedSupplies { - stateDiff.SetCurrentSupply(subnetID, supply) +func NextBlockTime(state state.Chain, clk *mockable.Clock) (time.Time, bool, error) { + var ( + timestamp = clk.Time() + parentTime = state.GetTimestamp() + ) + if parentTime.After(timestamp) { + timestamp = parentTime } + // [timestamp] = max(now, parentTime) - for _, currentValidatorToAdd := range s.currentValidatorsToAdd { - stateDiff.PutCurrentValidator(currentValidatorToAdd) - } - for _, pendingValidatorToRemove := range s.pendingValidatorsToRemove { - stateDiff.DeletePendingValidator(pendingValidatorToRemove) - } - for _, currentDelegatorToAdd := range s.currentDelegatorsToAdd { - stateDiff.PutCurrentDelegator(currentDelegatorToAdd) - } - for _, pendingDelegatorToRemove := range s.pendingDelegatorsToRemove { - stateDiff.DeletePendingDelegator(pendingDelegatorToRemove) - } - for _, currentValidatorToRemove := range s.currentValidatorsToRemove { - stateDiff.DeleteCurrentValidator(currentValidatorToRemove) + nextStakerChangeTime, err := GetNextStakerChangeTime(state) + if err != nil { + return time.Time{}, false, fmt.Errorf("failed getting next staker change time: %w", err) } - s.caminoStateChanges.Apply(stateDiff) -} - -func (s *stateChanges) Len() int { - return len(s.currentValidatorsToAdd) + len(s.currentDelegatorsToAdd) + - len(s.pendingValidatorsToRemove) + len(s.pendingDelegatorsToRemove) + - len(s.currentValidatorsToRemove) + s.caminoStateChanges.Len() + // timeWasCapped means that [timestamp] was reduced to [nextStakerChangeTime] + timeWasCapped := !timestamp.Before(nextStakerChangeTime) + if timeWasCapped { + timestamp = nextStakerChangeTime + } + // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) + return timestamp, timeWasCapped, nil } -// AdvanceTimeTo does not modify [parentState]. -// Instead it returns all the StateChanges caused by advancing the chain time to -// the [newChainTime]. +// AdvanceTimeTo applies all state changes to [parentState] resulting from +// advancing the chain time to [newChainTime]. +// Returns true iff the validator set changed. func AdvanceTimeTo( backend *Backend, parentState state.Chain, newChainTime time.Time, -) (StateChanges, error) { - pendingStakerIterator, err := parentState.GetPendingStakerIterator() +) (bool, error) { + // We promote pending stakers to current stakers first and remove + // completed stakers from the current staker set. We assume that any + // promoted staker will not immediately be removed from the current staker + // set. This is guaranteed by the following invariants. + // + // Invariant: MinStakeDuration > 0 => guarantees [StartTime] != [EndTime] + // Invariant: [newChainTime] <= nextStakerChangeTime. + + changes, err := state.NewDiffOn(parentState) if err != nil { - return nil, err + return false, err } - defer pendingStakerIterator.Release() - changes := &stateChanges{ - updatedSupplies: make(map[ids.ID]uint64), + pendingStakerIterator, err := parentState.GetPendingStakerIterator() + if err != nil { + return false, err } + defer pendingStakerIterator.Release() - // Add to the staker set any pending stakers whose start time is at or - // before the new timestamp - - // Note: we process pending stakers ready to be promoted to current ones and - // then we process current stakers to be demoted out of stakers set. It is - // guaranteed that no promoted stakers would be demoted immediately. A - // failure of this invariant would cause a staker to be added to - // StateChanges and be persisted among current stakers even if it already - // expired. The following invariants ensure this does not happens: - // Invariant: minimum stake duration is > 0, so staker.StartTime != staker.EndTime. - // Invariant: [newChainTime] does not skip stakers set change times. - + var changed bool + // Promote any pending stakers to current if [StartTime] <= [newChainTime]. for pendingStakerIterator.Next() { stakerToRemove := pendingStakerIterator.Value() if stakerToRemove.StartTime.After(newChainTime) { @@ -157,22 +135,20 @@ func AdvanceTimeTo( stakerToAdd.Priority = txs.PendingToCurrentPriorities[stakerToRemove.Priority] if stakerToRemove.Priority == txs.SubnetPermissionedValidatorPendingPriority { - changes.currentValidatorsToAdd = append(changes.currentValidatorsToAdd, &stakerToAdd) - changes.pendingValidatorsToRemove = append(changes.pendingValidatorsToRemove, stakerToRemove) + changes.PutCurrentValidator(&stakerToAdd) + changes.DeletePendingValidator(stakerToRemove) + changed = true continue } - supply, ok := changes.updatedSupplies[stakerToRemove.SubnetID] - if !ok { - supply, err = parentState.GetCurrentSupply(stakerToRemove.SubnetID) - if err != nil { - return nil, err - } + supply, err := changes.GetCurrentSupply(stakerToRemove.SubnetID) + if err != nil { + return false, err } rewards, err := GetRewardsCalculator(backend, parentState, stakerToRemove.SubnetID) if err != nil { - return nil, err + return false, err } potentialReward := rewards.Calculate( @@ -184,25 +160,28 @@ func AdvanceTimeTo( // Invariant: [rewards.Calculate] can never return a [potentialReward] // such that [supply + potentialReward > maximumSupply]. - changes.updatedSupplies[stakerToRemove.SubnetID] = supply + potentialReward + changes.SetCurrentSupply(stakerToRemove.SubnetID, supply+potentialReward) switch stakerToRemove.Priority { case txs.PrimaryNetworkValidatorPendingPriority, txs.SubnetPermissionlessValidatorPendingPriority: - changes.currentValidatorsToAdd = append(changes.currentValidatorsToAdd, &stakerToAdd) - changes.pendingValidatorsToRemove = append(changes.pendingValidatorsToRemove, stakerToRemove) + changes.PutCurrentValidator(&stakerToAdd) + changes.DeletePendingValidator(stakerToRemove) case txs.PrimaryNetworkDelegatorApricotPendingPriority, txs.PrimaryNetworkDelegatorBanffPendingPriority, txs.SubnetPermissionlessDelegatorPendingPriority: - changes.currentDelegatorsToAdd = append(changes.currentDelegatorsToAdd, &stakerToAdd) - changes.pendingDelegatorsToRemove = append(changes.pendingDelegatorsToRemove, stakerToRemove) + changes.PutCurrentDelegator(&stakerToAdd) + changes.DeletePendingDelegator(stakerToRemove) default: - return nil, fmt.Errorf("expected staker priority got %d", stakerToRemove.Priority) + return false, fmt.Errorf("expected staker priority got %d", stakerToRemove.Priority) } + + changed = true } + // Remove any current stakers whose [EndTime] <= [newChainTime]. currentStakerIterator, err := parentState.GetCurrentStakerIterator() if err != nil { - return nil, err + return false, err } defer currentStakerIterator.Release() @@ -220,14 +199,16 @@ func AdvanceTimeTo( break } - changes.currentValidatorsToRemove = append(changes.currentValidatorsToRemove, stakerToRemove) + changes.DeleteCurrentValidator(stakerToRemove) + changed = true } - if err := caminoAdvanceTimeTo(backend, parentState, newChainTime, changes); err != nil { - return nil, err + if err := changes.Apply(parentState); err != nil { + return false, err } - return changes, nil + parentState.SetTimestamp(newChainTime) + return changed, nil } func GetRewardsCalculator( diff --git a/vms/platformvm/txs/executor/tx_mempool_verifier.go b/vms/platformvm/txs/executor/tx_mempool_verifier.go deleted file mode 100644 index 2b65a5cd4234..000000000000 --- a/vms/platformvm/txs/executor/tx_mempool_verifier.go +++ /dev/null @@ -1,161 +0,0 @@ -// Copyright (C) 2022, Chain4Travel AG. All rights reserved. -// -// This file is a derived work, based on ava-labs code whose -// original notices appear below. -// -// It is distributed under the same license conditions as the -// original code from which it is derived. -// -// Much love to the original authors for their work. -// ********************************************************** -// Copyright (C) 2019-2024, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package executor - -import ( - "errors" - "fmt" - "time" - - "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/timer/mockable" - "github.com/ava-labs/avalanchego/vms/platformvm/state" - "github.com/ava-labs/avalanchego/vms/platformvm/txs" -) - -var _ txs.Visitor = (*MempoolTxVerifier)(nil) - -type MempoolTxVerifier struct { - *Backend - ParentID ids.ID - StateVersions state.Versions - Tx *txs.Tx -} - -func (*MempoolTxVerifier) AdvanceTimeTx(*txs.AdvanceTimeTx) error { - return ErrWrongTxType -} - -func (*MempoolTxVerifier) RewardValidatorTx(*txs.RewardValidatorTx) error { - return ErrWrongTxType -} - -func (v *MempoolTxVerifier) AddValidatorTx(tx *txs.AddValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddSubnetValidatorTx(tx *txs.AddSubnetValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddDelegatorTx(tx *txs.AddDelegatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) CreateChainTx(tx *txs.CreateChainTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) CreateSubnetTx(tx *txs.CreateSubnetTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) ImportTx(tx *txs.ImportTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) ExportTx(tx *txs.ExportTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) RemoveSubnetValidatorTx(tx *txs.RemoveSubnetValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) TransformSubnetTx(tx *txs.TransformSubnetTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddPermissionlessValidatorTx(tx *txs.AddPermissionlessValidatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) AddPermissionlessDelegatorTx(tx *txs.AddPermissionlessDelegatorTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) TransferSubnetOwnershipTx(tx *txs.TransferSubnetOwnershipTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) BaseTx(tx *txs.BaseTx) error { - return v.standardTx(tx) -} - -func (v *MempoolTxVerifier) standardTx(tx txs.UnsignedTx) error { - baseState, err := v.standardBaseState() - if err != nil { - return err - } - - executor := CaminoStandardTxExecutor{ - StandardTxExecutor{ - Backend: v.Backend, - State: baseState, - Tx: v.Tx, - }, - } - err = tx.Visit(&executor) - // We ignore [errFutureStakeTime] here because the time will be advanced - // when this transaction is issued. - if errors.Is(err, ErrFutureStakeTime) { - return nil - } - return err -} - -func (v *MempoolTxVerifier) standardBaseState() (state.Diff, error) { - state, err := state.NewDiff(v.ParentID, v.StateVersions) - if err != nil { - return nil, err - } - - nextBlkTime, _, err := NextBlockTime(state, v.Clk) - if err != nil { - return nil, err - } - - changes, err := AdvanceTimeTo(v.Backend, state, nextBlkTime) - if err != nil { - return nil, err - } - changes.Apply(state) - state.SetTimestamp(nextBlkTime) - - return state, nil -} - -func NextBlockTime(state state.Chain, clk *mockable.Clock) (time.Time, bool, error) { - var ( - timestamp = clk.Time() - parentTime = state.GetTimestamp() - ) - if parentTime.After(timestamp) { - timestamp = parentTime - } - // [timestamp] = max(now, parentTime) - - nextStakerChangeTime, err := GetNextStakerChangeTime(state) - if err != nil { - return time.Time{}, false, fmt.Errorf("failed getting next staker change time: %w", err) - } - - // timeWasCapped means that [timestamp] was reduced to [nextStakerChangeTime] - timeWasCapped := !timestamp.Before(nextStakerChangeTime) - if timeWasCapped { - timestamp = nextStakerChangeTime - } - // [timestamp] = min(max(now, parentTime), nextStakerChangeTime) - return timestamp, timeWasCapped, nil -} diff --git a/vms/platformvm/txs/reward_validator_tx.go b/vms/platformvm/txs/reward_validator_tx.go index 01b1e34bde46..85129af4695c 100644 --- a/vms/platformvm/txs/reward_validator_tx.go +++ b/vms/platformvm/txs/reward_validator_tx.go @@ -26,9 +26,6 @@ type RewardValidatorTx struct { // ID of the tx that created the delegator/validator being removed/rewarded TxID ids.ID `serialize:"true" json:"txID"` - // Marks if this validator should be rewarded according to this node. - ShouldPreferCommit bool `json:"-"` - unsignedBytes []byte // Unsigned byte representation of this data } diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 22a38acdd9f2..d59fe140fe49 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -216,7 +216,10 @@ func (vm *VM) Initialize( chainCtx.Log, chainCtx.NodeID, chainCtx.SubnetID, - chainCtx.ValidatorState, + validators.NewLockedState( + &chainCtx.Lock, + validatorManager, + ), txVerifier, mempool, txExecutorBackend.Config.PartialSyncPrimaryNetwork, diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index 36186ec32ae0..f4e84d0776ae 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -14,26 +14,33 @@ import ( "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + "github.com/ava-labs/avalanchego/chains" "github.com/ava-labs/avalanchego/chains/atomic" "github.com/ava-labs/avalanchego/database" "github.com/ava-labs/avalanchego/database/memdb" "github.com/ava-labs/avalanchego/database/prefixdb" "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/network/p2p" + "github.com/ava-labs/avalanchego/network/p2p/gossip" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/snow/consensus/snowman" "github.com/ava-labs/avalanchego/snow/engine/common" "github.com/ava-labs/avalanchego/snow/snowtest" "github.com/ava-labs/avalanchego/snow/uptime" "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/bloom" "github.com/ava-labs/avalanchego/utils/constants" "github.com/ava-labs/avalanchego/utils/crypto/bls" "github.com/ava-labs/avalanchego/utils/crypto/secp256k1" "github.com/ava-labs/avalanchego/utils/timer/mockable" + "github.com/ava-labs/avalanchego/version" "github.com/ava-labs/avalanchego/vms/components/avax" "github.com/ava-labs/avalanchego/vms/platformvm/block" "github.com/ava-labs/avalanchego/vms/platformvm/config" "github.com/ava-labs/avalanchego/vms/platformvm/metrics" + "github.com/ava-labs/avalanchego/vms/platformvm/network" "github.com/ava-labs/avalanchego/vms/platformvm/reward" "github.com/ava-labs/avalanchego/vms/platformvm/signer" "github.com/ava-labs/avalanchego/vms/platformvm/state" @@ -49,11 +56,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - vm.ctx.Lock.Lock() - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() validatorStartTime := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) @@ -174,6 +177,7 @@ func TestAddDelegatorTxOverDelegatedRegression(t *testing.T) { vm.ctx.Lock.Unlock() err = vm.issueTx(context.Background(), addThirdDelegatorTx) require.ErrorIs(err, executor.ErrOverDelegated) + vm.ctx.Lock.Lock() } func TestAddDelegatorTxHeapCorruption(t *testing.T) { @@ -219,10 +223,7 @@ func TestAddDelegatorTxHeapCorruption(t *testing.T) { vm.ApricotPhase3Time = test.ap3Time vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() key, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -487,10 +488,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { vm, baseDB, mutableSharedMemory := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() nodeID := ids.GenerateTestNodeID() newValidatorStartTime := vm.clock.Time().Add(executor.SyncBound).Add(1 * time.Second) @@ -695,10 +693,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { vm, baseDB, mutableSharedMemory := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() vm.state.SetCurrentSupply(constants.PrimaryNetworkID, defaultRewardConfig.SupplyCap/2) @@ -1010,10 +1005,7 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() currentHeight, err := vm.GetCurrentHeight(context.Background()) require.NoError(err) @@ -1148,14 +1140,8 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { delegator2Stake := defaultMaxValidatorStake - validatorStake vm, _, _ := defaultVM(t, cortinaFork) - vm.ctx.Lock.Lock() - defer func() { - vm.ctx.Lock.Lock() - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() key, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -1230,6 +1216,7 @@ func TestAddDelegatorTxAddBeforeRemove(t *testing.T) { vm.ctx.Lock.Unlock() err = vm.issueTx(context.Background(), addSecondDelegatorTx) require.ErrorIs(err, executor.ErrOverDelegated) + vm.ctx.Lock.Lock() } func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t *testing.T) { @@ -1239,12 +1226,8 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) vm, _, _ := defaultVM(t, cortinaFork) - vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() key, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -1364,13 +1347,8 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionTracked(t *t validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) vm, _, _ := defaultVM(t, cortinaFork) - vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() key, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -1474,10 +1452,7 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() subnetID := testSubnet1.TxID @@ -1762,10 +1737,7 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // setup time currentTime := defaultGenesisTime @@ -1925,10 +1897,7 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() subnetID := testSubnet1.TxID @@ -2142,10 +2111,7 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() subnetID := testSubnet1.TxID @@ -2259,6 +2225,72 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { require.NoError(err) } +func TestValidatorSetRaceCondition(t *testing.T) { + require := require.New(t) + vm, _, _ := defaultVM(t, cortinaFork) + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + nodeID := ids.GenerateTestNodeID() + require.NoError(vm.Connected(context.Background(), nodeID, version.CurrentApp)) + + protocolAppRequestBytest, err := gossip.MarshalAppRequest( + bloom.EmptyFilter.Marshal(), + ids.Empty[:], + ) + require.NoError(err) + + appRequestBytes := p2p.PrefixMessage( + p2p.ProtocolPrefix(network.TxGossipHandlerID), + protocolAppRequestBytest, + ) + + var ( + eg errgroup.Group + ctx, cancel = context.WithCancel(context.Background()) + ) + // keep 10 workers running + for i := 0; i < 10; i++ { + eg.Go(func() error { + for ctx.Err() == nil { + err := vm.AppRequest( + context.Background(), + nodeID, + 0, + time.Now().Add(time.Hour), + appRequestBytes, + ) + if err != nil { + return err + } + } + return nil + }) + } + + // If the validator set lock isn't held, the race detector should fail here. + for i := uint64(0); i < 1000; i++ { + blk, err := block.NewBanffStandardBlock( + time.Now(), + vm.state.GetLastAccepted(), + i, + nil, + ) + require.NoError(err) + + vm.state.SetLastAccepted(blk.ID()) + vm.state.SetHeight(blk.Height()) + vm.state.AddStatelessBlock(blk) + } + + // If the validator set lock is grabbed, we need to make sure to release the + // lock to avoid a deadlock. + vm.ctx.Lock.Unlock() + cancel() // stop and wait for workers + require.NoError(eg.Wait()) + vm.ctx.Lock.Lock() +} + func buildAndAcceptStandardBlock(vm *VM) error { blk, err := vm.Builder.BuildBlock(context.Background()) if err != nil { diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 23e88a646368..7cc53bb2320d 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -277,13 +277,14 @@ func defaultVM(t *testing.T, fork activeFork) (*VM, database.Database, *mutableS return nil } + dynamicConfigBytes := []byte(`{"network":{"max-validator-set-staleness":0}}`) require.NoError(vm.Initialize( context.Background(), ctx, chainDB, genesisBytes, nil, - nil, + dynamicConfigBytes, msgChan, nil, appSender, @@ -315,6 +316,13 @@ func defaultVM(t *testing.T, fork activeFork) (*VM, database.Database, *mutableS require.NoError(blk.Accept(context.Background())) require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + t.Cleanup(func() { + vm.ctx.Lock.Lock() + defer vm.ctx.Lock.Unlock() + + require.NoError(vm.Shutdown(context.Background())) + }) + return vm, db, msm } @@ -323,10 +331,7 @@ func TestGenesis(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Ensure the genesis block has been accepted and stored genesisBlockID, err := vm.LastAccepted(context.Background()) // lastAccepted should be ID of genesis block @@ -379,10 +384,7 @@ func TestAddValidatorCommit(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() var ( startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) @@ -429,10 +431,7 @@ func TestInvalidAddValidatorCommit(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() nodeID := ids.GenerateTestNodeID() startTime := defaultGenesisTime.Add(-txexecutor.SyncBound).Add(-1 * time.Second) @@ -482,10 +481,7 @@ func TestAddValidatorReject(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, cortinaFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() var ( startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) @@ -530,11 +526,7 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - vm.ctx.Lock.Lock() - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Use nodeID that is already in the genesis repeatNodeID := genesisNodeIDs[0] @@ -559,6 +551,7 @@ func TestAddValidatorInvalidNotReissued(t *testing.T) { vm.ctx.Lock.Unlock() err = vm.issueTx(context.Background(), tx) require.ErrorIs(err, txexecutor.ErrAlreadyValidator) + vm.ctx.Lock.Lock() } // Accept proposal to add validator to subnet @@ -566,10 +559,7 @@ func TestAddSubnetValidatorAccept(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() var ( startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) @@ -616,10 +606,7 @@ func TestAddSubnetValidatorReject(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() var ( startTime = vm.clock.Time().Add(txexecutor.SyncBound).Add(1 * time.Second) @@ -665,10 +652,7 @@ func TestRewardValidatorAccept(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Fast forward clock to time for genesis validators to leave vm.clock.Set(defaultValidateEndTime) @@ -736,10 +720,7 @@ func TestRewardValidatorReject(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Fast forward clock to time for genesis validators to leave vm.clock.Set(defaultValidateEndTime) @@ -809,10 +790,8 @@ func TestUnneededBuildBlock(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() + _, err := vm.Builder.BuildBlock(context.Background()) require.ErrorIs(err, blockbuilder.ErrNoPendingBlocks) } @@ -822,10 +801,7 @@ func TestCreateChain(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() tx, err := vm.txBuilder.NewCreateChainTx( testSubnet1.ID(), @@ -874,10 +850,7 @@ func TestCreateSubnet(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() nodeID := genesisNodeIDs[0] createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( @@ -975,10 +948,7 @@ func TestAtomicImport(t *testing.T) { require := require.New(t) vm, baseDB, mutableSharedMemory := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() utxoID := avax.UTXOID{ TxID: ids.Empty.Prefix(1), @@ -1064,10 +1034,7 @@ func TestOptimisticAtomicImport(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, apricotPhase3) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() tx := &txs.Tx{Unsigned: &txs.ImportTx{ BaseTx: txs.BaseTx{BaseTx: avax.BaseTx{ @@ -1257,8 +1224,8 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { require := require.New(t) baseDB := memdb.New() - vmDB := prefixdb.New([]byte("vm"), baseDB) - bootstrappingDB := prefixdb.New([]byte("bootstrapping"), baseDB) + vmDB := prefixdb.New(chains.VMDBPrefix, baseDB) + bootstrappingDB := prefixdb.New(chains.ChainBootstrappingDBPrefix, baseDB) blocked, err := queue.NewWithMissing(bootstrappingDB, "", prometheus.NewRegistry()) require.NoError(err) @@ -1713,10 +1680,7 @@ func TestUnverifiedParent(t *testing.T) { func TestMaxStakeAmount(t *testing.T) { vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(t, vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() nodeID := genesisNodeIDs[0] @@ -2019,13 +1983,8 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { validatorEndTime := validatorStartTime.Add(360 * 24 * time.Hour) vm, _, _ := defaultVM(t, latestFork) - vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() key, err := secp256k1.NewPrivateKey() require.NoError(err) @@ -2120,10 +2079,7 @@ func TestTransferSubnetOwnershipTx(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Create a subnet createSubnetTx, err := vm.txBuilder.NewCreateSubnetTx( @@ -2199,10 +2155,7 @@ func TestBaseTx(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() sendAmt := uint64(100000) changeAddr := ids.ShortEmpty @@ -2276,10 +2229,7 @@ func TestPruneMempool(t *testing.T) { require := require.New(t) vm, _, _ := defaultVM(t, latestFork) vm.ctx.Lock.Lock() - defer func() { - require.NoError(vm.Shutdown(context.Background())) - vm.ctx.Lock.Unlock() - }() + defer vm.ctx.Lock.Unlock() // Create a tx that will be valid regardless of timestamp. sendAmt := uint64(100000) diff --git a/vms/platformvm/warp/gwarp/signer_test.go b/vms/platformvm/warp/gwarp/signer_test.go index 306067dc883d..31c7b3e993d4 100644 --- a/vms/platformvm/warp/gwarp/signer_test.go +++ b/vms/platformvm/warp/gwarp/signer_test.go @@ -23,7 +23,6 @@ type testSigner struct { sk *bls.SecretKey networkID uint32 chainID ids.ID - closeFn func() } func setupSigner(t testing.TB) *testSigner { @@ -55,18 +54,21 @@ func setupSigner(t testing.TB) *testSigner { require.NoError(err) s.client = NewClient(pb.NewSignerClient(conn)) - s.closeFn = func() { + + t.Cleanup(func() { serverCloser.Stop() _ = conn.Close() _ = listener.Close() - } + }) + return s } func TestInterface(t *testing.T) { - for _, test := range warp.SignerTests { - s := setupSigner(t) - test(t, s.client, s.sk, s.networkID, s.chainID) - s.closeFn() + for name, test := range warp.SignerTests { + t.Run(name, func(t *testing.T) { + s := setupSigner(t) + test(t, s.client, s.sk, s.networkID, s.chainID) + }) } } diff --git a/vms/platformvm/warp/signer_test.go b/vms/platformvm/warp/signer_test.go index 1bc177872d87..84b51f6574fa 100644 --- a/vms/platformvm/warp/signer_test.go +++ b/vms/platformvm/warp/signer_test.go @@ -14,13 +14,15 @@ import ( ) func TestSigner(t *testing.T) { - for _, test := range SignerTests { - sk, err := bls.NewSecretKey() - require.NoError(t, err) + for name, test := range SignerTests { + t.Run(name, func(t *testing.T) { + sk, err := bls.NewSecretKey() + require.NoError(t, err) - chainID := ids.GenerateTestID() - s := NewSigner(sk, constants.UnitTestID, chainID) + chainID := ids.GenerateTestID() + s := NewSigner(sk, constants.UnitTestID, chainID) - test(t, s, sk, constants.UnitTestID, chainID) + test(t, s, sk, constants.UnitTestID, chainID) + }) } } diff --git a/vms/platformvm/warp/test_signer.go b/vms/platformvm/warp/test_signer.go index c17b15b215e2..e30423edf1ed 100644 --- a/vms/platformvm/warp/test_signer.go +++ b/vms/platformvm/warp/test_signer.go @@ -14,13 +14,14 @@ import ( ) // SignerTests is a list of all signer tests -var SignerTests = []func(t *testing.T, s Signer, sk *bls.SecretKey, networkID uint32, chainID ids.ID){ - TestSignerWrongChainID, - TestSignerVerifies, +var SignerTests = map[string]func(t *testing.T, s Signer, sk *bls.SecretKey, networkID uint32, chainID ids.ID){ + "WrongChainID": TestWrongChainID, + "WrongNetworkID": TestWrongNetworkID, + "Verifies": TestVerifies, } // Test that using a random SourceChainID results in an error -func TestSignerWrongChainID(t *testing.T, s Signer, _ *bls.SecretKey, _ uint32, _ ids.ID) { +func TestWrongChainID(t *testing.T, s Signer, _ *bls.SecretKey, _ uint32, _ ids.ID) { require := require.New(t) msg, err := NewUnsignedMessage( @@ -36,7 +37,7 @@ func TestSignerWrongChainID(t *testing.T, s Signer, _ *bls.SecretKey, _ uint32, } // Test that using a different networkID results in an error -func TestSignerWrongNetworkID(t *testing.T, s Signer, _ *bls.SecretKey, networkID uint32, blockchainID ids.ID) { +func TestWrongNetworkID(t *testing.T, s Signer, _ *bls.SecretKey, networkID uint32, blockchainID ids.ID) { require := require.New(t) msg, err := NewUnsignedMessage( @@ -52,7 +53,7 @@ func TestSignerWrongNetworkID(t *testing.T, s Signer, _ *bls.SecretKey, networkI } // Test that a signature generated with the signer verifies correctly -func TestSignerVerifies(t *testing.T, s Signer, sk *bls.SecretKey, networkID uint32, chainID ids.ID) { +func TestVerifies(t *testing.T, s Signer, sk *bls.SecretKey, networkID uint32, chainID ids.ID) { require := require.New(t) msg, err := NewUnsignedMessage( diff --git a/vms/propertyfx/factory.go b/vms/propertyfx/factory.go index c42b92c84c5f..53d6101b1306 100644 --- a/vms/propertyfx/factory.go +++ b/vms/propertyfx/factory.go @@ -5,12 +5,13 @@ package propertyfx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" ) +const Name = "propertyfx" + var ( - _ vms.Factory = (*Factory)(nil) + _ fx.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'p', 'r', 'o', 'p', 'e', 'r', 't', 'y', 'f', 'x'} @@ -18,6 +19,6 @@ var ( type Factory struct{} -func (*Factory) New(logging.Logger) (interface{}, error) { - return &Fx{}, nil +func (*Factory) New() any { + return &Fx{} } diff --git a/vms/propertyfx/factory_test.go b/vms/propertyfx/factory_test.go index f40cb2610a80..9aa461921e42 100644 --- a/vms/propertyfx/factory_test.go +++ b/vms/propertyfx/factory_test.go @@ -7,15 +7,11 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { require := require.New(t) factory := Factory{} - fx, err := factory.New(logging.NoLog{}) - require.NoError(err) - require.NotNil(fx) + require.Equal(&Fx{}, factory.New()) } diff --git a/vms/proposervm/block.go b/vms/proposervm/block.go index 45a67b41bce0..14108cd3dba1 100644 --- a/vms/proposervm/block.go +++ b/vms/proposervm/block.go @@ -359,7 +359,7 @@ func (p *postForkCommonComponents) verifyPreDurangoBlockDelay( delay := blkTimestamp.Sub(parentTimestamp) if delay < minDelay { - return false, errProposerWindowNotStarted + return false, fmt.Errorf("%w: delay %s < minDelay %s", errProposerWindowNotStarted, delay, minDelay) } return delay < proposer.MaxVerifyDelay, nil @@ -374,6 +374,7 @@ func (p *postForkCommonComponents) verifyPostDurangoBlockDelay( var ( blkTimestamp = blk.Timestamp() blkHeight = blk.Height() + currentSlot = proposer.TimeToSlot(parentTimestamp, blkTimestamp) proposerID = blk.Proposer() ) @@ -381,7 +382,7 @@ func (p *postForkCommonComponents) verifyPostDurangoBlockDelay( ctx, blkHeight, parentPChainHeight, - proposer.TimeToSlot(parentTimestamp, blkTimestamp), + currentSlot, ) switch { case errors.Is(err, proposer.ErrAnyoneCanPropose): @@ -396,7 +397,7 @@ func (p *postForkCommonComponents) verifyPostDurangoBlockDelay( case expectedProposerID == proposerID: return true, nil // block should be signed default: - return false, errUnexpectedProposer + return false, fmt.Errorf("%w: slot %d expects %s", errUnexpectedProposer, currentSlot, expectedProposerID) } } @@ -464,7 +465,7 @@ func (p *postForkCommonComponents) shouldBuildSignedBlockPostDurango( // In case the inner VM only issued one pendingTxs message, we should // attempt to re-handle that once it is our turn to build the block. p.vm.notifyInnerBlockReady() - return false, errProposerWindowNotStarted + return false, fmt.Errorf("%w: slot %d expects %s", errUnexpectedProposer, currentSlot, expectedProposerID) } func (p *postForkCommonComponents) shouldBuildSignedBlockPreDurango( @@ -509,5 +510,5 @@ func (p *postForkCommonComponents) shouldBuildSignedBlockPreDurango( // In case the inner VM only issued one pendingTxs message, we should // attempt to re-handle that once it is our turn to build the block. p.vm.notifyInnerBlockReady() - return false, errProposerWindowNotStarted + return false, fmt.Errorf("%w: delay %s < minDelay %s", errProposerWindowNotStarted, delay, minDelay) } diff --git a/vms/proposervm/block_test.go b/vms/proposervm/block_test.go index fea216120811..7f81f4e70175 100644 --- a/vms/proposervm/block_test.go +++ b/vms/proposervm/block_test.go @@ -336,7 +336,7 @@ func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { proVM.Set(localTime) _, err := proVM.BuildBlock(ctx) - require.ErrorIs(errProposerWindowNotStarted, err) + require.ErrorIs(err, errProposerWindowNotStarted) } { @@ -346,7 +346,7 @@ func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { proVM.Set(localTime) _, err := proVM.BuildBlock(ctx) - require.ErrorIs(errProposerWindowNotStarted, err) + require.ErrorIs(err, errProposerWindowNotStarted) } { @@ -356,7 +356,7 @@ func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { proVM.Set(localTime) _, err := proVM.BuildBlock(ctx) - require.ErrorIs(errProposerWindowNotStarted, err) + require.ErrorIs(err, errProposerWindowNotStarted) } { @@ -446,6 +446,6 @@ func TestPostDurangoBuildChildResetScheduler(t *testing.T) { parentTimestamp, pChainHeight-1, ) - require.ErrorIs(err, errProposerWindowNotStarted) + require.ErrorIs(err, errUnexpectedProposer) } } diff --git a/vms/proposervm/state/block_state.go b/vms/proposervm/state/block_state.go index 862d492b925b..0c5e210a8d81 100644 --- a/vms/proposervm/state/block_state.go +++ b/vms/proposervm/state/block_state.go @@ -5,7 +5,6 @@ package state import ( "errors" - "fmt" "github.com/prometheus/client_golang/prometheus" @@ -15,6 +14,7 @@ import ( "github.com/ava-labs/avalanchego/ids" "github.com/ava-labs/avalanchego/snow/choices" "github.com/ava-labs/avalanchego/utils/constants" + "github.com/ava-labs/avalanchego/utils/metric" "github.com/ava-labs/avalanchego/utils/units" "github.com/ava-labs/avalanchego/utils/wrappers" "github.com/ava-labs/avalanchego/version" @@ -69,7 +69,7 @@ func NewBlockState(db database.Database) BlockState { func NewMeteredBlockState(db database.Database, namespace string, metrics prometheus.Registerer) (BlockState, error) { blkCache, err := metercacher.New[ids.ID, *blockWrapper]( - fmt.Sprintf("%s_block_cache", namespace), + metric.AppendNamespace(namespace, "block_cache"), metrics, cache.NewSizedLRU[ids.ID, *blockWrapper]( blockCacheSize, diff --git a/vms/proposervm/vm_regression_test.go b/vms/proposervm/vm_regression_test.go index 168dd913d22a..ac34df120641 100644 --- a/vms/proposervm/vm_regression_test.go +++ b/vms/proposervm/vm_regression_test.go @@ -77,5 +77,5 @@ func TestProposerVMInitializeShouldFailIfInnerVMCantVerifyItsHeightIndex(t *test nil, nil, ) - require.ErrorIs(customError, err) + require.ErrorIs(err, customError) } diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index 39e9a46b6995..072634956e4a 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -1035,16 +1035,15 @@ func TestExpiredBuildBlock(t *testing.T) { coreVM.InitializeF = nil require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), coreGenBlk.IDV)) - // Make sure that passing a message works - toScheduler <- common.PendingTxs - <-toEngine - // Notify the proposer VM of a new block on the inner block side toScheduler <- common.PendingTxs + // The first notification will be read from the consensus engine + <-toEngine + // Before calling BuildBlock, verify a remote block and set it as the + // preferred block. coreBlk := &snowman.TestBlock{ TestDecidable: choices.TestDecidable{ IDV: ids.GenerateTestID(), @@ -1089,7 +1088,6 @@ func TestExpiredBuildBlock(t *testing.T) { require.NoError(err) require.NoError(parsedBlock.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parsedBlock.ID())) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { @@ -1097,17 +1095,18 @@ func TestExpiredBuildBlock(t *testing.T) { return nil, errUnexpectedCall } - // The first notification will be read from the consensus engine - <-toEngine - + // Because we are now building on a different block, the proposer window + // shouldn't have started. _, err = proVM.BuildBlock(context.Background()) require.ErrorIs(err, errProposerWindowNotStarted) - proVM.Set(statelessBlock.Timestamp().Add(proposer.MaxVerifyDelay)) + proVM.Set(statelessBlock.Timestamp().Add(proposer.MaxBuildDelay)) proVM.Scheduler.SetBuildBlockTime(time.Now()) // The engine should have been notified to attempt to build a block now that - // the window has started again + // the window has started again. This is to guarantee that the inner VM has + // build block called after it sent a pendingTxs message on its internal + // engine channel. <-toEngine } diff --git a/vms/secp256k1fx/factory.go b/vms/secp256k1fx/factory.go index fd52fe79a6fe..9630795ea378 100644 --- a/vms/secp256k1fx/factory.go +++ b/vms/secp256k1fx/factory.go @@ -5,12 +5,13 @@ package secp256k1fx import ( "github.com/ava-labs/avalanchego/ids" - "github.com/ava-labs/avalanchego/utils/logging" - "github.com/ava-labs/avalanchego/vms" + "github.com/ava-labs/avalanchego/vms/fx" ) +const Name = "secp256k1fx" + var ( - _ vms.Factory = (*Factory)(nil) + _ fx.Factory = (*Factory)(nil) // ID that this Fx uses when labeled ID = ids.ID{'s', 'e', 'c', 'p', '2', '5', '6', 'k', '1', 'f', 'x'} @@ -18,6 +19,6 @@ var ( type Factory struct{} -func (*Factory) New(logging.Logger) (interface{}, error) { - return &Fx{}, nil +func (*Factory) New() any { + return &Fx{} } diff --git a/vms/secp256k1fx/factory_test.go b/vms/secp256k1fx/factory_test.go index 2b1fa184474d..d7653d361f59 100644 --- a/vms/secp256k1fx/factory_test.go +++ b/vms/secp256k1fx/factory_test.go @@ -7,14 +7,10 @@ import ( "testing" "github.com/stretchr/testify/require" - - "github.com/ava-labs/avalanchego/utils/logging" ) func TestFactory(t *testing.T) { require := require.New(t) factory := Factory{} - fx, err := factory.New(logging.NoLog{}) - require.NoError(err) - require.NotNil(fx) + require.Equal(&Fx{}, factory.New()) } diff --git a/x/merkledb/db_test.go b/x/merkledb/db_test.go index 244858aeccc8..48703556b72a 100644 --- a/x/merkledb/db_test.go +++ b/x/merkledb/db_test.go @@ -99,10 +99,12 @@ func Test_MerkleDB_GetValues_Safety(t *testing.T) { func Test_MerkleDB_DB_Interface(t *testing.T) { for _, bf := range validBranchFactors { - for _, test := range database.Tests { - db, err := getBasicDBWithBranchFactor(bf) - require.NoError(t, err) - test(t, db) + for name, test := range database.Tests { + t.Run(fmt.Sprintf("%s_%d", name, bf), func(t *testing.T) { + db, err := getBasicDBWithBranchFactor(bf) + require.NoError(t, err) + test(t, db) + }) } } } @@ -111,10 +113,12 @@ func Benchmark_MerkleDB_DBInterface(b *testing.B) { for _, size := range database.BenchmarkSizes { keys, values := database.SetupBenchmark(b, size[0], size[1], size[2]) for _, bf := range validBranchFactors { - for _, bench := range database.Benchmarks { - db, err := getBasicDBWithBranchFactor(bf) - require.NoError(b, err) - bench(b, db, fmt.Sprintf("merkledb_%d", bf), keys, values) + for name, bench := range database.Benchmarks { + b.Run(fmt.Sprintf("merkledb_%d_%d_pairs_%d_keys_%d_values_%s", bf, size[0], size[1], size[2], name), func(b *testing.B) { + db, err := getBasicDBWithBranchFactor(bf) + require.NoError(b, err) + bench(b, db, keys, values) + }) } } }